LLVM 20.0.0git
AMDGPUTargetMachine.cpp
Go to the documentation of this file.
1//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// The AMDGPU target machine contains all of the hardware specific
11/// information needed to emit code for SI+ GPUs.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPUTargetMachine.h"
16#include "AMDGPU.h"
17#include "AMDGPUAliasAnalysis.h"
21#include "AMDGPUIGroupLP.h"
22#include "AMDGPUISelDAGToDAG.h"
23#include "AMDGPUMacroFusion.h"
25#include "AMDGPURegBankSelect.h"
26#include "AMDGPUSplitModule.h"
31#include "GCNSchedStrategy.h"
32#include "GCNVOPDUtils.h"
33#include "R600.h"
35#include "R600TargetMachine.h"
36#include "SIFixSGPRCopies.h"
38#include "SIMachineScheduler.h"
50#include "llvm/CodeGen/Passes.h"
53#include "llvm/IR/IntrinsicsAMDGPU.h"
54#include "llvm/IR/PassManager.h"
61#include "llvm/Transforms/IPO.h"
72#include <optional>
73
74using namespace llvm;
75using namespace llvm::PatternMatch;
76
77namespace {
78class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> {
79public:
80 SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
82};
83
84class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> {
85public:
86 VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
88};
89
90static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI,
92 const Register Reg) {
93 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
94 return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
95}
96
97static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI,
99 const Register Reg) {
100 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
101 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
102}
103
104/// -{sgpr|vgpr}-regalloc=... command line option.
105static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
106
107/// A dummy default pass factory indicates whether the register allocator is
108/// overridden on the command line.
109static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag;
110static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag;
111
112static SGPRRegisterRegAlloc
113defaultSGPRRegAlloc("default",
114 "pick SGPR register allocator based on -O option",
116
117static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false,
119SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
120 cl::desc("Register allocator to use for SGPRs"));
121
122static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false,
124VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
125 cl::desc("Register allocator to use for VGPRs"));
126
127
128static void initializeDefaultSGPRRegisterAllocatorOnce() {
129 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
130
131 if (!Ctor) {
132 Ctor = SGPRRegAlloc;
133 SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
134 }
135}
136
137static void initializeDefaultVGPRRegisterAllocatorOnce() {
138 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
139
140 if (!Ctor) {
141 Ctor = VGPRRegAlloc;
142 VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
143 }
144}
145
146static FunctionPass *createBasicSGPRRegisterAllocator() {
147 return createBasicRegisterAllocator(onlyAllocateSGPRs);
148}
149
150static FunctionPass *createGreedySGPRRegisterAllocator() {
151 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
152}
153
154static FunctionPass *createFastSGPRRegisterAllocator() {
155 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
156}
157
158static FunctionPass *createBasicVGPRRegisterAllocator() {
159 return createBasicRegisterAllocator(onlyAllocateVGPRs);
160}
161
162static FunctionPass *createGreedyVGPRRegisterAllocator() {
163 return createGreedyRegisterAllocator(onlyAllocateVGPRs);
164}
165
166static FunctionPass *createFastVGPRRegisterAllocator() {
167 return createFastRegisterAllocator(onlyAllocateVGPRs, true);
168}
169
170static SGPRRegisterRegAlloc basicRegAllocSGPR(
171 "basic", "basic register allocator", createBasicSGPRRegisterAllocator);
172static SGPRRegisterRegAlloc greedyRegAllocSGPR(
173 "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator);
174
175static SGPRRegisterRegAlloc fastRegAllocSGPR(
176 "fast", "fast register allocator", createFastSGPRRegisterAllocator);
177
178
179static VGPRRegisterRegAlloc basicRegAllocVGPR(
180 "basic", "basic register allocator", createBasicVGPRRegisterAllocator);
181static VGPRRegisterRegAlloc greedyRegAllocVGPR(
182 "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator);
183
184static VGPRRegisterRegAlloc fastRegAllocVGPR(
185 "fast", "fast register allocator", createFastVGPRRegisterAllocator);
186} // anonymous namespace
187
188static cl::opt<bool>
190 cl::desc("Run early if-conversion"),
191 cl::init(false));
192
193static cl::opt<bool>
194OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
195 cl::desc("Run pre-RA exec mask optimizations"),
196 cl::init(true));
197
198static cl::opt<bool>
199 LowerCtorDtor("amdgpu-lower-global-ctor-dtor",
200 cl::desc("Lower GPU ctor / dtors to globals on the device."),
201 cl::init(true), cl::Hidden);
202
203// Option to disable vectorizer for tests.
205 "amdgpu-load-store-vectorizer",
206 cl::desc("Enable load store vectorizer"),
207 cl::init(true),
208 cl::Hidden);
209
210// Option to control global loads scalarization
212 "amdgpu-scalarize-global-loads",
213 cl::desc("Enable global load scalarization"),
214 cl::init(true),
215 cl::Hidden);
216
217// Option to run internalize pass.
219 "amdgpu-internalize-symbols",
220 cl::desc("Enable elimination of non-kernel functions and unused globals"),
221 cl::init(false),
222 cl::Hidden);
223
224// Option to inline all early.
226 "amdgpu-early-inline-all",
227 cl::desc("Inline all functions early"),
228 cl::init(false),
229 cl::Hidden);
230
232 "amdgpu-enable-remove-incompatible-functions", cl::Hidden,
233 cl::desc("Enable removal of functions when they"
234 "use features not supported by the target GPU"),
235 cl::init(true));
236
238 "amdgpu-sdwa-peephole",
239 cl::desc("Enable SDWA peepholer"),
240 cl::init(true));
241
243 "amdgpu-dpp-combine",
244 cl::desc("Enable DPP combiner"),
245 cl::init(true));
246
247// Enable address space based alias analysis
249 cl::desc("Enable AMDGPU Alias Analysis"),
250 cl::init(true));
251
252// Option to run late CFG structurizer
254 "amdgpu-late-structurize",
255 cl::desc("Enable late CFG structurization"),
257 cl::Hidden);
258
259// Disable structurizer-based control-flow lowering in order to test convergence
260// control tokens. This should eventually be replaced by the wave-transform.
262 "amdgpu-disable-structurizer",
263 cl::desc("Disable structurizer for experiments; produces unusable code"),
265
266// Enable lib calls simplifications
268 "amdgpu-simplify-libcall",
269 cl::desc("Enable amdgpu library simplifications"),
270 cl::init(true),
271 cl::Hidden);
272
274 "amdgpu-ir-lower-kernel-arguments",
275 cl::desc("Lower kernel argument loads in IR pass"),
276 cl::init(true),
277 cl::Hidden);
278
280 "amdgpu-reassign-regs",
281 cl::desc("Enable register reassign optimizations on gfx10+"),
282 cl::init(true),
283 cl::Hidden);
284
286 "amdgpu-opt-vgpr-liverange",
287 cl::desc("Enable VGPR liverange optimizations for if-else structure"),
288 cl::init(true), cl::Hidden);
289
291 "amdgpu-atomic-optimizer-strategy",
292 cl::desc("Select DPP or Iterative strategy for scan"),
293 cl::init(ScanOptions::Iterative),
295 clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"),
296 clEnumValN(ScanOptions::Iterative, "Iterative",
297 "Use Iterative approach for scan"),
298 clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")));
299
300// Enable Mode register optimization
302 "amdgpu-mode-register",
303 cl::desc("Enable mode register pass"),
304 cl::init(true),
305 cl::Hidden);
306
307// Enable GFX11.5+ s_singleuse_vdst insertion
308static cl::opt<bool>
309 EnableInsertSingleUseVDST("amdgpu-enable-single-use-vdst",
310 cl::desc("Enable s_singleuse_vdst insertion"),
311 cl::init(false), cl::Hidden);
312
313// Enable GFX11+ s_delay_alu insertion
314static cl::opt<bool>
315 EnableInsertDelayAlu("amdgpu-enable-delay-alu",
316 cl::desc("Enable s_delay_alu insertion"),
317 cl::init(true), cl::Hidden);
318
319// Enable GFX11+ VOPD
320static cl::opt<bool>
321 EnableVOPD("amdgpu-enable-vopd",
322 cl::desc("Enable VOPD, dual issue of VALU in wave32"),
323 cl::init(true), cl::Hidden);
324
325// Option is used in lit tests to prevent deadcoding of patterns inspected.
326static cl::opt<bool>
327EnableDCEInRA("amdgpu-dce-in-ra",
328 cl::init(true), cl::Hidden,
329 cl::desc("Enable machine DCE inside regalloc"));
330
331static cl::opt<bool> EnableSetWavePriority("amdgpu-set-wave-priority",
332 cl::desc("Adjust wave priority"),
333 cl::init(false), cl::Hidden);
334
336 "amdgpu-scalar-ir-passes",
337 cl::desc("Enable scalar IR passes"),
338 cl::init(true),
339 cl::Hidden);
340
342 "amdgpu-enable-structurizer-workarounds",
343 cl::desc("Enable workarounds for the StructurizeCFG pass"),
345 cl::init(true), cl::Hidden);
346
348 "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"),
350 cl::Hidden);
351
353 "amdgpu-enable-pre-ra-optimizations",
354 cl::desc("Enable Pre-RA optimizations pass"), cl::init(true),
355 cl::Hidden);
356
358 "amdgpu-enable-promote-kernel-arguments",
359 cl::desc("Enable promotion of flat kernel pointer arguments to global"),
360 cl::Hidden, cl::init(true));
361
363 "amdgpu-enable-image-intrinsic-optimizer",
364 cl::desc("Enable image intrinsic optimizer pass"), cl::init(true),
365 cl::Hidden);
366
367static cl::opt<bool>
368 EnableLoopPrefetch("amdgpu-loop-prefetch",
369 cl::desc("Enable loop data prefetch on AMDGPU"),
370 cl::Hidden, cl::init(false));
371
373 "amdgpu-enable-max-ilp-scheduling-strategy",
374 cl::desc("Enable scheduling strategy to maximize ILP for a single wave."),
375 cl::Hidden, cl::init(false));
376
378 "amdgpu-enable-rewrite-partial-reg-uses",
379 cl::desc("Enable rewrite partial reg uses pass"), cl::init(true),
380 cl::Hidden);
381
383 "amdgpu-enable-hipstdpar",
384 cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false),
385 cl::Hidden);
386
387static cl::opt<bool>
388 EnableAMDGPUAttributor("amdgpu-attributor-enable",
389 cl::desc("Enable AMDGPUAttributorPass"),
390 cl::init(true), cl::Hidden);
391
393 // Register the target
396
471}
472
473static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
474 return std::make_unique<AMDGPUTargetObjectFile>();
475}
476
478 return new SIScheduleDAGMI(C);
479}
480
481static ScheduleDAGInstrs *
483 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
484 ScheduleDAGMILive *DAG =
485 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
486 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
487 if (ST.shouldClusterStores())
488 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
489 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
490 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
491 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
492 return DAG;
493}
494
495static ScheduleDAGInstrs *
497 ScheduleDAGMILive *DAG =
498 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxILPSchedStrategy>(C));
499 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
500 return DAG;
501}
502
503static ScheduleDAGInstrs *
505 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
506 auto DAG = new GCNIterativeScheduler(C,
508 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
509 if (ST.shouldClusterStores())
510 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
511 return DAG;
512}
513
515 return new GCNIterativeScheduler(C,
517}
518
519static ScheduleDAGInstrs *
521 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
522 auto DAG = new GCNIterativeScheduler(C,
524 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
525 if (ST.shouldClusterStores())
526 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
527 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
528 return DAG;
529}
530
532SISchedRegistry("si", "Run SI's custom scheduler",
534
537 "Run GCN scheduler to maximize occupancy",
539
541 GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp",
543
545 "gcn-iterative-max-occupancy-experimental",
546 "Run GCN scheduler to maximize occupancy (experimental)",
548
550 "gcn-iterative-minreg",
551 "Run GCN iterative scheduler for minimal register usage (experimental)",
553
555 "gcn-iterative-ilp",
556 "Run GCN iterative scheduler for ILP scheduling (experimental)",
558
560 if (TT.getArch() == Triple::r600) {
561 // 32-bit pointers.
562 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
563 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1";
564 }
565
566 // 32-bit private, local, and region pointers. 64-bit global, constant and
567 // flat. 160-bit non-integral fat buffer pointers that include a 128-bit
568 // buffer descriptor and a 32-bit offset, which are indexed by 32-bit values
569 // (address space 7), and 128-bit non-integral buffer resourcees (address
570 // space 8) which cannot be non-trivilally accessed by LLVM memory operations
571 // like getelementptr.
572 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
573 "-p7:160:256:256:32-p8:128:128-p9:192:256:256:32-i64:64-v16:16-v24:32-"
574 "v32:32-v48:64-v96:"
575 "128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-"
576 "G1-ni:7:8:9";
577}
578
581 if (!GPU.empty())
582 return GPU;
583
584 // Need to default to a target with flat support for HSA.
585 if (TT.getArch() == Triple::amdgcn)
586 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
587
588 return "r600";
589}
590
591static Reloc::Model getEffectiveRelocModel(std::optional<Reloc::Model> RM) {
592 // The AMDGPU toolchain only supports generating shared objects, so we
593 // must always use PIC.
594 return Reloc::PIC_;
595}
596
598 StringRef CPU, StringRef FS,
599 const TargetOptions &Options,
600 std::optional<Reloc::Model> RM,
601 std::optional<CodeModel::Model> CM,
602 CodeGenOptLevel OptLevel)
605 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
606 TLOF(createTLOF(getTargetTriple())) {
607 initAsmInfo();
608 if (TT.getArch() == Triple::amdgcn) {
609 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
611 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
613 }
614}
615
621
623
625 Attribute GPUAttr = F.getFnAttribute("target-cpu");
626 return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
627}
628
630 Attribute FSAttr = F.getFnAttribute("target-features");
631
632 return FSAttr.isValid() ? FSAttr.getValueAsString()
634}
635
636/// Predicate for Internalize pass.
637static bool mustPreserveGV(const GlobalValue &GV) {
638 if (const Function *F = dyn_cast<Function>(&GV))
639 return F->isDeclaration() || F->getName().starts_with("__asan_") ||
640 F->getName().starts_with("__sanitizer_") ||
641 AMDGPU::isEntryFunctionCC(F->getCallingConv());
642
644 return !GV.use_empty();
645}
646
649}
650
653 if (Params.empty())
655 Params.consume_front("strategy=");
656 auto Result = StringSwitch<std::optional<ScanOptions>>(Params)
657 .Case("dpp", ScanOptions::DPP)
658 .Cases("iterative", "", ScanOptions::Iterative)
659 .Case("none", ScanOptions::None)
660 .Default(std::nullopt);
661 if (Result)
662 return *Result;
663 return make_error<StringError>("invalid parameter", inconvertibleErrorCode());
664}
665
669 while (!Params.empty()) {
670 StringRef ParamName;
671 std::tie(ParamName, Params) = Params.split(';');
672 if (ParamName == "closed-world") {
673 Result.IsClosedWorld = true;
674 } else {
675 return make_error<StringError>(
676 formatv("invalid AMDGPUAttributor pass parameter '{0}' ", ParamName)
677 .str(),
679 }
680 }
681 return Result;
682}
683
685
686#define GET_PASS_REGISTRY "AMDGPUPassRegistry.def"
688
690 [](ModulePassManager &PM, OptimizationLevel Level) {
692 PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
693 if (EnableHipStdPar)
695 });
696
698 [](ModulePassManager &PM, OptimizationLevel Level) {
700
701 if (Level == OptimizationLevel::O0)
702 return;
703
705
706 if (InternalizeSymbols) {
709 }
710
713 });
714
716 [](FunctionPassManager &FPM, OptimizationLevel Level) {
717 if (Level == OptimizationLevel::O0)
718 return;
719
723 });
724
726 [this](CGSCCPassManager &PM, OptimizationLevel Level) {
727 if (Level == OptimizationLevel::O0)
728 return;
729
731
732 // Add promote kernel arguments pass to the opt pipeline right before
733 // infer address spaces which is needed to do actual address space
734 // rewriting.
735 if (Level.getSpeedupLevel() > OptimizationLevel::O1.getSpeedupLevel() &&
738
739 // Add infer address spaces pass to the opt pipeline after inlining
740 // but before SROA to increase SROA opportunities.
742
743 // This should run after inlining to have any chance of doing
744 // anything, and before other cleanup optimizations.
746
747 if (Level != OptimizationLevel::O0) {
748 // Promote alloca to vector before SROA and loop unroll. If we
749 // manage to eliminate allocas before unroll we may choose to unroll
750 // less.
752 }
753
754 PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
755 });
756
757 // FIXME: Why is AMDGPUAttributor not in CGSCC?
759 [this](ModulePassManager &MPM, OptimizationLevel Level) {
760 if (Level != OptimizationLevel::O0) {
762 }
763 });
764
766 [this](ModulePassManager &PM, OptimizationLevel Level) {
767 // We want to support the -lto-partitions=N option as "best effort".
768 // For that, we need to lower LDS earlier in the pipeline before the
769 // module is partitioned for codegen.
773 PM.addPass(AMDGPUAttributorPass(*this));
774 });
775
777 [](StringRef FilterName) -> RegAllocFilterFunc {
778 if (FilterName == "sgpr")
779 return onlyAllocateSGPRs;
780 if (FilterName == "vgpr")
781 return onlyAllocateVGPRs;
782 return nullptr;
783 });
784}
785
786int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
787 return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
788 AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
789 AddrSpace == AMDGPUAS::REGION_ADDRESS)
790 ? -1
791 : 0;
792}
793
795 unsigned DestAS) const {
796 return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
798}
799
801 const auto *LD = dyn_cast<LoadInst>(V);
802 if (!LD)
804
805 // It must be a generic pointer loaded.
806 assert(V->getType()->isPointerTy() &&
807 V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS);
808
809 const auto *Ptr = LD->getPointerOperand();
810 if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
812 // For a generic pointer loaded from the constant memory, it could be assumed
813 // as a global pointer since the constant memory is only populated on the
814 // host side. As implied by the offload programming model, only global
815 // pointers could be referenced on the host side.
817}
818
819std::pair<const Value *, unsigned>
821 if (auto *II = dyn_cast<IntrinsicInst>(V)) {
822 switch (II->getIntrinsicID()) {
823 case Intrinsic::amdgcn_is_shared:
824 return std::pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
825 case Intrinsic::amdgcn_is_private:
826 return std::pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
827 default:
828 break;
829 }
830 return std::pair(nullptr, -1);
831 }
832 // Check the global pointer predication based on
833 // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and
834 // the order of 'is_shared' and 'is_private' is not significant.
835 Value *Ptr;
836 if (match(
837 const_cast<Value *>(V),
838 m_c_And(m_Not(m_Intrinsic<Intrinsic::amdgcn_is_shared>(m_Value(Ptr))),
839 m_Not(m_Intrinsic<Intrinsic::amdgcn_is_private>(
840 m_Deferred(Ptr))))))
841 return std::pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
842
843 return std::pair(nullptr, -1);
844}
845
846unsigned
848 switch (Kind) {
858 }
860}
861
863 Module &M, unsigned NumParts,
864 function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback) {
865 // FIXME(?): Would be better to use an already existing Analysis/PassManager,
866 // but all current users of this API don't have one ready and would need to
867 // create one anyway. Let's hide the boilerplate for now to keep it simple.
868
873
874 PassBuilder PB(this);
878
880 MPM.addPass(AMDGPUSplitModulePass(NumParts, ModuleCallback));
881 MPM.run(M, MAM);
882 return true;
883}
884
885//===----------------------------------------------------------------------===//
886// GCN Target Machine (SI+)
887//===----------------------------------------------------------------------===//
888
890 StringRef CPU, StringRef FS,
891 const TargetOptions &Options,
892 std::optional<Reloc::Model> RM,
893 std::optional<CodeModel::Model> CM,
894 CodeGenOptLevel OL, bool JIT)
895 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
896
899 StringRef GPU = getGPUName(F);
901
902 SmallString<128> SubtargetKey(GPU);
903 SubtargetKey.append(FS);
904
905 auto &I = SubtargetMap[SubtargetKey];
906 if (!I) {
907 // This needs to be done before we create a new subtarget since any
908 // creation will depend on the TM and the code generation flags on the
909 // function that reside in TargetOptions.
911 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
912 }
913
914 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
915
916 return I.get();
917}
918
921 return TargetTransformInfo(GCNTTIImpl(this, F));
922}
923
926 CodeGenFileType FileType, const CGPassBuilderOption &Opts,
928 AMDGPUCodeGenPassBuilder CGPB(*this, Opts, PIC);
929 return CGPB.buildPipeline(MPM, Out, DwoOut, FileType);
930}
931
932//===----------------------------------------------------------------------===//
933// AMDGPU Pass Setup
934//===----------------------------------------------------------------------===//
935
936std::unique_ptr<CSEConfigBase> llvm::AMDGPUPassConfig::getCSEConfig() const {
938}
939
940namespace {
941
942class GCNPassConfig final : public AMDGPUPassConfig {
943public:
944 GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
945 : AMDGPUPassConfig(TM, PM) {
946 // It is necessary to know the register usage of the entire call graph. We
947 // allow calls without EnableAMDGPUFunctionCalls if they are marked
948 // noinline, so this is always required.
949 setRequiresCodeGenSCCOrder(true);
950 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
951 }
952
953 GCNTargetMachine &getGCNTargetMachine() const {
954 return getTM<GCNTargetMachine>();
955 }
956
958 createMachineScheduler(MachineSchedContext *C) const override;
959
961 createPostMachineScheduler(MachineSchedContext *C) const override {
963 C, std::make_unique<PostGenericScheduler>(C),
964 /*RemoveKillFlags=*/true);
965 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
967 if (ST.shouldClusterStores())
969 DAG->addMutation(ST.createFillMFMAShadowMutation(DAG->TII));
970 DAG->addMutation(
971 createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::PostRA));
972 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
974 return DAG;
975 }
976
977 bool addPreISel() override;
978 void addMachineSSAOptimization() override;
979 bool addILPOpts() override;
980 bool addInstSelector() override;
981 bool addIRTranslator() override;
982 void addPreLegalizeMachineIR() override;
983 bool addLegalizeMachineIR() override;
984 void addPreRegBankSelect() override;
985 bool addRegBankSelect() override;
986 void addPreGlobalInstructionSelect() override;
987 bool addGlobalInstructionSelect() override;
988 void addFastRegAlloc() override;
989 void addOptimizedRegAlloc() override;
990
991 FunctionPass *createSGPRAllocPass(bool Optimized);
992 FunctionPass *createVGPRAllocPass(bool Optimized);
993 FunctionPass *createRegAllocPass(bool Optimized) override;
994
995 bool addRegAssignAndRewriteFast() override;
996 bool addRegAssignAndRewriteOptimized() override;
997
998 void addPreRegAlloc() override;
999 bool addPreRewrite() override;
1000 void addPostRegAlloc() override;
1001 void addPreSched2() override;
1002 void addPreEmitPass() override;
1003};
1004
1005} // end anonymous namespace
1006
1008 : TargetPassConfig(TM, PM) {
1009 // Exceptions and StackMaps are not supported, so these passes will never do
1010 // anything.
1013 // Garbage collection is not supported.
1016}
1017
1021 else
1023}
1024
1029 // ReassociateGEPs exposes more opportunities for SLSR. See
1030 // the example in reassociate-geps-and-slsr.ll.
1032 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
1033 // EarlyCSE can reuse.
1035 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
1037 // NaryReassociate on GEPs creates redundant common expressions, so run
1038 // EarlyCSE after it.
1040}
1041
1044
1048
1049 // There is no reason to run these.
1053
1055 if (LowerCtorDtor)
1057
1060
1061 // This can be disabled by passing ::Disable here or on the command line
1062 // with --expand-variadics-override=disable.
1064
1065 // Function calls are not supported, so make sure we inline everything.
1068
1069 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
1070 if (Arch == Triple::r600)
1072
1073 // Replace OpenCL enqueued block function pointers with global variables.
1075
1076 // Runs before PromoteAlloca so the latter can account for function uses
1079 }
1080
1083
1084 // Run atomic optimizer before Atomic Expand
1089 }
1090
1092
1095
1098
1102 AAResults &AAR) {
1103 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
1104 AAR.addAAResult(WrapperPass->getResult());
1105 }));
1106 }
1107
1109 // TODO: May want to move later or split into an early and late one.
1111 }
1112
1113 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
1114 // have expanded.
1117 }
1118
1120
1121 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1122 // example, GVN can combine
1123 //
1124 // %0 = add %a, %b
1125 // %1 = add %b, %a
1126 //
1127 // and
1128 //
1129 // %0 = shl nsw %a, 2
1130 // %1 = shl %a, 2
1131 //
1132 // but EarlyCSE can do neither of them.
1135}
1136
1139 // FIXME: This pass adds 2 hacky attributes that can be replaced with an
1140 // analysis, and should be removed.
1142 }
1143
1147
1149 // This lowering has been placed after codegenprepare to take advantage of
1150 // address mode matching (which is why it isn't put with the LDS lowerings).
1151 // It could be placed anywhere before uniformity annotations (an analysis
1152 // that it changes by splitting up fat pointers into their components)
1153 // but has been put before switch lowering and CFG flattening so that those
1154 // passes can run on the more optimized control flow this pass creates in
1155 // many cases.
1156 //
1157 // FIXME: This should ideally be put after the LoadStoreVectorizer.
1158 // However, due to some annoying facts about ResourceUsageAnalysis,
1159 // (especially as exercised in the resource-usage-dead-function test),
1160 // we need all the function passes codegenprepare all the way through
1161 // said resource usage analysis to run on the call graph produced
1162 // before codegenprepare runs (because codegenprepare will knock some
1163 // nodes out of the graph, which leads to function-level passes not
1164 // being run on them, which causes crashes in the resource usage analysis).
1166 // In accordance with the above FIXME, manually force all the
1167 // function-level passes into a CGSCCPassManager.
1168 addPass(new DummyCGSCCPass());
1169 }
1170
1172
1175
1176 // LowerSwitch pass may introduce unreachable blocks that can
1177 // cause unexpected behavior for subsequent passes. Placing it
1178 // here seems better that these blocks would get cleaned up by
1179 // UnreachableBlockElim inserted next in the pass flow.
1181}
1182
1186 return false;
1187}
1188
1191 return false;
1192}
1193
1195 // Do nothing. GC is not supported.
1196 return false;
1197}
1198
1201 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1203 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1204 if (ST.shouldClusterStores())
1205 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
1206 return DAG;
1207}
1208
1210 BumpPtrAllocator &Allocator, const Function &F,
1211 const TargetSubtargetInfo *STI) const {
1212 return R600MachineFunctionInfo::create<R600MachineFunctionInfo>(
1213 Allocator, F, static_cast<const R600Subtarget *>(STI));
1214}
1215
1216//===----------------------------------------------------------------------===//
1217// GCN Pass Setup
1218//===----------------------------------------------------------------------===//
1219
1220ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
1221 MachineSchedContext *C) const {
1222 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1223 if (ST.enableSIScheduler())
1225
1228
1230}
1231
1232bool GCNPassConfig::addPreISel() {
1234
1235 if (TM->getOptLevel() > CodeGenOptLevel::None)
1236 addPass(createSinkingPass());
1237
1238 if (TM->getOptLevel() > CodeGenOptLevel::None)
1240
1241 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1242 // regions formed by them.
1246 addPass(createFixIrreduciblePass());
1247 addPass(createUnifyLoopExitsPass());
1248 }
1249 addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
1250 }
1254 // TODO: Move this right after structurizeCFG to avoid extra divergence
1255 // analysis. This depends on stopping SIAnnotateControlFlow from making
1256 // control flow modifications.
1258 }
1259 addPass(createLCSSAPass());
1260
1261 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1263
1264 return false;
1265}
1266
1267void GCNPassConfig::addMachineSSAOptimization() {
1269
1270 // We want to fold operands after PeepholeOptimizer has run (or as part of
1271 // it), because it will eliminate extra copies making it easier to fold the
1272 // real source operand. We want to eliminate dead instructions after, so that
1273 // we see fewer uses of the copies. We then need to clean up the dead
1274 // instructions leftover after the operands are folded as well.
1275 //
1276 // XXX - Can we get away without running DeadMachineInstructionElim again?
1277 addPass(&SIFoldOperandsID);
1278 if (EnableDPPCombine)
1279 addPass(&GCNDPPCombineID);
1280 addPass(&SILoadStoreOptimizerID);
1281 if (isPassEnabled(EnableSDWAPeephole)) {
1282 addPass(&SIPeepholeSDWAID);
1283 addPass(&EarlyMachineLICMID);
1284 addPass(&MachineCSEID);
1285 addPass(&SIFoldOperandsID);
1286 }
1289}
1290
1291bool GCNPassConfig::addILPOpts() {
1293 addPass(&EarlyIfConverterID);
1294
1296 return false;
1297}
1298
1299bool GCNPassConfig::addInstSelector() {
1301 addPass(&SIFixSGPRCopiesLegacyID);
1303 return false;
1304}
1305
1306bool GCNPassConfig::addIRTranslator() {
1307 addPass(new IRTranslator(getOptLevel()));
1308 return false;
1309}
1310
1311void GCNPassConfig::addPreLegalizeMachineIR() {
1312 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1313 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
1314 addPass(new Localizer());
1315}
1316
1317bool GCNPassConfig::addLegalizeMachineIR() {
1318 addPass(new Legalizer());
1319 return false;
1320}
1321
1322void GCNPassConfig::addPreRegBankSelect() {
1323 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1324 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
1326}
1327
1328bool GCNPassConfig::addRegBankSelect() {
1329 addPass(new AMDGPURegBankSelect());
1330 return false;
1331}
1332
1333void GCNPassConfig::addPreGlobalInstructionSelect() {
1334 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1335 addPass(createAMDGPURegBankCombiner(IsOptNone));
1336}
1337
1338bool GCNPassConfig::addGlobalInstructionSelect() {
1339 addPass(new InstructionSelect(getOptLevel()));
1340 return false;
1341}
1342
1343void GCNPassConfig::addPreRegAlloc() {
1344 if (LateCFGStructurize) {
1346 }
1347}
1348
1349void GCNPassConfig::addFastRegAlloc() {
1350 // FIXME: We have to disable the verifier here because of PHIElimination +
1351 // TwoAddressInstructions disabling it.
1352
1353 // This must be run immediately after phi elimination and before
1354 // TwoAddressInstructions, otherwise the processing of the tied operand of
1355 // SI_ELSE will introduce a copy of the tied operand source after the else.
1357
1359
1361}
1362
1363void GCNPassConfig::addOptimizedRegAlloc() {
1364 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
1365 // instructions that cause scheduling barriers.
1367
1368 if (OptExecMaskPreRA)
1370
1373
1374 if (isPassEnabled(EnablePreRAOptimizations))
1376
1377 // This is not an essential optimization and it has a noticeable impact on
1378 // compilation time, so we only enable it from O2.
1379 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1381
1382 // FIXME: when an instruction has a Killed operand, and the instruction is
1383 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
1384 // the register in LiveVariables, this would trigger a failure in verifier,
1385 // we should fix it and enable the verifier.
1386 if (OptVGPRLiveRange)
1388 // This must be run immediately after phi elimination and before
1389 // TwoAddressInstructions, otherwise the processing of the tied operand of
1390 // SI_ELSE will introduce a copy of the tied operand source after the else.
1392
1393 if (EnableDCEInRA)
1395
1397}
1398
1399bool GCNPassConfig::addPreRewrite() {
1400 addPass(&SILowerWWMCopiesID);
1402 addPass(&GCNNSAReassignID);
1403 return true;
1404}
1405
1406FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) {
1407 // Initialize the global default.
1408 llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag,
1409 initializeDefaultSGPRRegisterAllocatorOnce);
1410
1411 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
1412 if (Ctor != useDefaultRegisterAllocator)
1413 return Ctor();
1414
1415 if (Optimized)
1416 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
1417
1418 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
1419}
1420
1421FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) {
1422 // Initialize the global default.
1423 llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag,
1424 initializeDefaultVGPRRegisterAllocatorOnce);
1425
1426 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
1427 if (Ctor != useDefaultRegisterAllocator)
1428 return Ctor();
1429
1430 if (Optimized)
1431 return createGreedyVGPRRegisterAllocator();
1432
1433 return createFastVGPRRegisterAllocator();
1434}
1435
1436FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) {
1437 llvm_unreachable("should not be used");
1438}
1439
1441 "-regalloc not supported with amdgcn. Use -sgpr-regalloc and -vgpr-regalloc";
1442
1443bool GCNPassConfig::addRegAssignAndRewriteFast() {
1444 if (!usingDefaultRegAlloc())
1446
1447 addPass(&GCNPreRALongBranchRegID);
1448
1449 addPass(createSGPRAllocPass(false));
1450
1451 // Equivalent of PEI for SGPRs.
1452 addPass(&SILowerSGPRSpillsID);
1453 addPass(&SIPreAllocateWWMRegsID);
1454
1455 addPass(createVGPRAllocPass(false));
1456
1457 addPass(&SILowerWWMCopiesID);
1458 return true;
1459}
1460
1461bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1462 if (!usingDefaultRegAlloc())
1464
1465 addPass(&GCNPreRALongBranchRegID);
1466
1467 addPass(createSGPRAllocPass(true));
1468
1469 // Commit allocated register changes. This is mostly necessary because too
1470 // many things rely on the use lists of the physical registers, such as the
1471 // verifier. This is only necessary with allocators which use LiveIntervals,
1472 // since FastRegAlloc does the replacements itself.
1473 addPass(createVirtRegRewriter(false));
1474
1475 // Equivalent of PEI for SGPRs.
1476 addPass(&SILowerSGPRSpillsID);
1477 addPass(&SIPreAllocateWWMRegsID);
1478
1479 addPass(createVGPRAllocPass(true));
1480
1481 addPreRewrite();
1482 addPass(&VirtRegRewriterID);
1483
1485
1486 return true;
1487}
1488
1489void GCNPassConfig::addPostRegAlloc() {
1490 addPass(&SIFixVGPRCopiesID);
1491 if (getOptLevel() > CodeGenOptLevel::None)
1492 addPass(&SIOptimizeExecMaskingID);
1494}
1495
1496void GCNPassConfig::addPreSched2() {
1497 if (TM->getOptLevel() > CodeGenOptLevel::None)
1499 addPass(&SIPostRABundlerID);
1500}
1501
1502void GCNPassConfig::addPreEmitPass() {
1503 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
1504 addPass(&GCNCreateVOPDID);
1505 addPass(createSIMemoryLegalizerPass());
1506 addPass(createSIInsertWaitcntsPass());
1507
1508 addPass(createSIModeRegisterPass());
1509
1510 if (getOptLevel() > CodeGenOptLevel::None)
1511 addPass(&SIInsertHardClausesID);
1512
1514 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
1516 if (getOptLevel() > CodeGenOptLevel::None)
1517 addPass(&SIPreEmitPeepholeID);
1518 // The hazard recognizer that runs as part of the post-ra scheduler does not
1519 // guarantee to be able handle all hazards correctly. This is because if there
1520 // are multiple scheduling regions in a basic block, the regions are scheduled
1521 // bottom up, so when we begin to schedule a region we don't know what
1522 // instructions were emitted directly before it.
1523 //
1524 // Here we add a stand-alone hazard recognizer pass which can handle all
1525 // cases.
1526 addPass(&PostRAHazardRecognizerID);
1527
1530
1531 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less))
1532 addPass(&AMDGPUInsertDelayAluID);
1533
1534 addPass(&BranchRelaxationPassID);
1535}
1536
1538 return new GCNPassConfig(*this, PM);
1539}
1540
1542 MachineFunction &MF) const {
1544 MF.getRegInfo().addDelegate(MFI);
1545}
1546
1548 BumpPtrAllocator &Allocator, const Function &F,
1549 const TargetSubtargetInfo *STI) const {
1550 return SIMachineFunctionInfo::create<SIMachineFunctionInfo>(
1551 Allocator, F, static_cast<const GCNSubtarget *>(STI));
1552}
1553
1555 return new yaml::SIMachineFunctionInfo();
1556}
1557
1561 return new yaml::SIMachineFunctionInfo(
1562 *MFI, *MF.getSubtarget<GCNSubtarget>().getRegisterInfo(), MF);
1563}
1564
1567 SMDiagnostic &Error, SMRange &SourceRange) const {
1568 const yaml::SIMachineFunctionInfo &YamlMFI =
1569 static_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1570 MachineFunction &MF = PFS.MF;
1572 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1573
1574 if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange))
1575 return true;
1576
1577 if (MFI->Occupancy == 0) {
1578 // Fixup the subtarget dependent default value.
1579 MFI->Occupancy = ST.computeOccupancy(MF.getFunction(), MFI->getLDSSize());
1580 }
1581
1582 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1583 Register TempReg;
1584 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1585 SourceRange = RegName.SourceRange;
1586 return true;
1587 }
1588 RegVal = TempReg;
1589
1590 return false;
1591 };
1592
1593 auto parseOptionalRegister = [&](const yaml::StringValue &RegName,
1594 Register &RegVal) {
1595 return !RegName.Value.empty() && parseRegister(RegName, RegVal);
1596 };
1597
1598 if (parseOptionalRegister(YamlMFI.VGPRForAGPRCopy, MFI->VGPRForAGPRCopy))
1599 return true;
1600
1601 if (parseOptionalRegister(YamlMFI.SGPRForEXECCopy, MFI->SGPRForEXECCopy))
1602 return true;
1603
1604 if (parseOptionalRegister(YamlMFI.LongBranchReservedReg,
1605 MFI->LongBranchReservedReg))
1606 return true;
1607
1608 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1609 // Create a diagnostic for a the register string literal.
1610 const MemoryBuffer &Buffer =
1611 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1612 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1613 RegName.Value.size(), SourceMgr::DK_Error,
1614 "incorrect register class for field", RegName.Value,
1615 std::nullopt, std::nullopt);
1616 SourceRange = RegName.SourceRange;
1617 return true;
1618 };
1619
1620 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1621 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1622 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1623 return true;
1624
1625 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1626 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1627 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1628 }
1629
1630 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1631 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1632 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1633 }
1634
1635 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1636 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1637 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1638 }
1639
1640 for (const auto &YamlReg : YamlMFI.WWMReservedRegs) {
1641 Register ParsedReg;
1642 if (parseRegister(YamlReg, ParsedReg))
1643 return true;
1644
1645 MFI->reserveWWMRegister(ParsedReg);
1646 }
1647
1648 auto parseAndCheckArgument = [&](const std::optional<yaml::SIArgument> &A,
1649 const TargetRegisterClass &RC,
1650 ArgDescriptor &Arg, unsigned UserSGPRs,
1651 unsigned SystemSGPRs) {
1652 // Skip parsing if it's not present.
1653 if (!A)
1654 return false;
1655
1656 if (A->IsRegister) {
1657 Register Reg;
1658 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1659 SourceRange = A->RegisterName.SourceRange;
1660 return true;
1661 }
1662 if (!RC.contains(Reg))
1663 return diagnoseRegisterClass(A->RegisterName);
1665 } else
1666 Arg = ArgDescriptor::createStack(A->StackOffset);
1667 // Check and apply the optional mask.
1668 if (A->Mask)
1669 Arg = ArgDescriptor::createArg(Arg, *A->Mask);
1670
1671 MFI->NumUserSGPRs += UserSGPRs;
1672 MFI->NumSystemSGPRs += SystemSGPRs;
1673 return false;
1674 };
1675
1676 if (YamlMFI.ArgInfo &&
1677 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1678 AMDGPU::SGPR_128RegClass,
1679 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1680 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1681 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1682 2, 0) ||
1683 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1684 MFI->ArgInfo.QueuePtr, 2, 0) ||
1685 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1686 AMDGPU::SReg_64RegClass,
1687 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1688 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1689 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1690 2, 0) ||
1691 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1692 AMDGPU::SReg_64RegClass,
1693 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1694 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1695 AMDGPU::SGPR_32RegClass,
1696 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1697 parseAndCheckArgument(YamlMFI.ArgInfo->LDSKernelId,
1698 AMDGPU::SGPR_32RegClass,
1699 MFI->ArgInfo.LDSKernelId, 0, 1) ||
1700 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1701 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1702 0, 1) ||
1703 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1704 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1705 0, 1) ||
1706 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
1707 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
1708 0, 1) ||
1709 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
1710 AMDGPU::SGPR_32RegClass,
1711 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
1712 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
1713 AMDGPU::SGPR_32RegClass,
1714 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
1715 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
1716 AMDGPU::SReg_64RegClass,
1717 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
1718 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
1719 AMDGPU::SReg_64RegClass,
1720 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
1721 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
1722 AMDGPU::VGPR_32RegClass,
1723 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
1724 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
1725 AMDGPU::VGPR_32RegClass,
1726 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
1727 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
1728 AMDGPU::VGPR_32RegClass,
1729 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
1730 return true;
1731
1732 if (ST.hasIEEEMode())
1733 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
1734 if (ST.hasDX10ClampMode())
1735 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
1736
1737 // FIXME: Move proper support for denormal-fp-math into base MachineFunction
1738 MFI->Mode.FP32Denormals.Input = YamlMFI.Mode.FP32InputDenormals
1741 MFI->Mode.FP32Denormals.Output = YamlMFI.Mode.FP32OutputDenormals
1744
1751
1752 return false;
1753}
unsigned const MachineRegisterInfo * MRI
static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))
This is the AMGPU address space based alias analysis pass.
Defines an instruction selector for the AMDGPU target.
Analyzes if a function potentially memory bound and if a kernel kernel may benefit from limiting numb...
static cl::opt< bool > EnableDCEInRA("amdgpu-dce-in-ra", cl::init(true), cl::Hidden, cl::desc("Enable machine DCE inside regalloc"))
static cl::opt< bool, true > EnableLowerModuleLDS("amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"), cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true), cl::Hidden)
static cl::opt< bool, true > EnableStructurizerWorkarounds("amdgpu-enable-structurizer-workarounds", cl::desc("Enable workarounds for the StructurizeCFG pass"), cl::location(AMDGPUTargetMachine::EnableStructurizerWorkarounds), cl::init(true), cl::Hidden)
static MachineSchedRegistry SISchedRegistry("si", "Run SI's custom scheduler", createSIMachineScheduler)
static ScheduleDAGInstrs * createIterativeILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EarlyInlineAll("amdgpu-early-inline-all", cl::desc("Inline all functions early"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableLowerKernelArguments("amdgpu-ir-lower-kernel-arguments", cl::desc("Lower kernel argument loads in IR pass"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSDWAPeephole("amdgpu-sdwa-peephole", cl::desc("Enable SDWA peepholer"), cl::init(true))
static MachineSchedRegistry GCNMinRegSchedRegistry("gcn-iterative-minreg", "Run GCN iterative scheduler for minimal register usage (experimental)", createMinRegScheduler)
static cl::opt< bool > EnableImageIntrinsicOptimizer("amdgpu-enable-image-intrinsic-optimizer", cl::desc("Enable image intrinsic optimizer pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableSIModeRegisterPass("amdgpu-mode-register", cl::desc("Enable mode register pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableDPPCombine("amdgpu-dpp-combine", cl::desc("Enable DPP combiner"), cl::init(true))
static MachineSchedRegistry IterativeGCNMaxOccupancySchedRegistry("gcn-iterative-max-occupancy-experimental", "Run GCN scheduler to maximize occupancy (experimental)", createIterativeGCNMaxOccupancyMachineScheduler)
static cl::opt< bool > EnableSetWavePriority("amdgpu-set-wave-priority", cl::desc("Adjust wave priority"), cl::init(false), cl::Hidden)
static cl::opt< bool > LowerCtorDtor("amdgpu-lower-global-ctor-dtor", cl::desc("Lower GPU ctor / dtors to globals on the device."), cl::init(true), cl::Hidden)
static cl::opt< bool, true > DisableStructurizer("amdgpu-disable-structurizer", cl::desc("Disable structurizer for experiments; produces unusable code"), cl::location(AMDGPUTargetMachine::DisableStructurizer), cl::ReallyHidden)
static cl::opt< bool > OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, cl::desc("Run pre-RA exec mask optimizations"), cl::init(true))
static cl::opt< bool > EnablePromoteKernelArguments("amdgpu-enable-promote-kernel-arguments", cl::desc("Enable promotion of flat kernel pointer arguments to global"), cl::Hidden, cl::init(true))
static cl::opt< bool > EnableRewritePartialRegUses("amdgpu-enable-rewrite-partial-reg-uses", cl::desc("Enable rewrite partial reg uses pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLibCallSimplify("amdgpu-simplify-libcall", cl::desc("Enable amdgpu library simplifications"), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp", createGCNMaxILPMachineScheduler)
static cl::opt< bool > InternalizeSymbols("amdgpu-internalize-symbols", cl::desc("Enable elimination of non-kernel functions and unused globals"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableAMDGPUAttributor("amdgpu-attributor-enable", cl::desc("Enable AMDGPUAttributorPass"), cl::init(true), cl::Hidden)
static LLVM_READNONE StringRef getGPUOrDefault(const Triple &TT, StringRef GPU)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
Expected< AMDGPUAttributorOptions > parseAMDGPUAttributorPassOptions(StringRef Params)
static cl::opt< bool > EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, cl::desc("Enable AMDGPU Alias Analysis"), cl::init(true))
static Expected< ScanOptions > parseAMDGPUAtomicOptimizerStrategy(StringRef Params)
static ScheduleDAGInstrs * createMinRegScheduler(MachineSchedContext *C)
static cl::opt< bool, true > LateCFGStructurize("amdgpu-late-structurize", cl::desc("Enable late CFG structurization"), cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG), cl::Hidden)
static cl::opt< bool > EnableHipStdPar("amdgpu-enable-hipstdpar", cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableInsertDelayAlu("amdgpu-enable-delay-alu", cl::desc("Enable s_delay_alu insertion"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLoadStoreVectorizer("amdgpu-load-store-vectorizer", cl::desc("Enable load store vectorizer"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableMaxIlpSchedStrategy("amdgpu-enable-max-ilp-scheduling-strategy", cl::desc("Enable scheduling strategy to maximize ILP for a single wave."), cl::Hidden, cl::init(false))
static bool mustPreserveGV(const GlobalValue &GV)
Predicate for Internalize pass.
static cl::opt< bool > EnableLoopPrefetch("amdgpu-loop-prefetch", cl::desc("Enable loop data prefetch on AMDGPU"), cl::Hidden, cl::init(false))
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget()
static cl::opt< bool > EnableInsertSingleUseVDST("amdgpu-enable-single-use-vdst", cl::desc("Enable s_singleuse_vdst insertion"), cl::init(false), cl::Hidden)
static cl::opt< bool > RemoveIncompatibleFunctions("amdgpu-enable-remove-incompatible-functions", cl::Hidden, cl::desc("Enable removal of functions when they" "use features not supported by the target GPU"), cl::init(true))
static cl::opt< bool > EnableScalarIRPasses("amdgpu-scalar-ir-passes", cl::desc("Enable scalar IR passes"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableRegReassign("amdgpu-reassign-regs", cl::desc("Enable register reassign optimizations on gfx10+"), cl::init(true), cl::Hidden)
static cl::opt< bool > OptVGPRLiveRange("amdgpu-opt-vgpr-liverange", cl::desc("Enable VGPR liverange optimizations for if-else structure"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createSIMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnablePreRAOptimizations("amdgpu-enable-pre-ra-optimizations", cl::desc("Enable Pre-RA optimizations pass"), cl::init(true), cl::Hidden)
static cl::opt< ScanOptions > AMDGPUAtomicOptimizerStrategy("amdgpu-atomic-optimizer-strategy", cl::desc("Select DPP or Iterative strategy for scan"), cl::init(ScanOptions::Iterative), cl::values(clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"), clEnumValN(ScanOptions::Iterative, "Iterative", "Use Iterative approach for scan"), clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")))
static cl::opt< bool > EnableVOPD("amdgpu-enable-vopd", cl::desc("Enable VOPD, dual issue of VALU in wave32"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(false))
static ScheduleDAGInstrs * createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static MachineSchedRegistry GCNILPSchedRegistry("gcn-iterative-ilp", "Run GCN iterative scheduler for ILP scheduling (experimental)", createIterativeILPMachineScheduler)
static cl::opt< bool > ScalarizeGlobal("amdgpu-scalarize-global-loads", cl::desc("Enable global load scalarization"), cl::init(true), cl::Hidden)
static const char RegAllocOptNotSupportedMessage[]
static MachineSchedRegistry GCNMaxOccupancySchedRegistry("gcn-max-occupancy", "Run GCN scheduler to maximize occupancy", createGCNMaxOccupancyMachineScheduler)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file declares the AMDGPU-specific subclass of TargetLoweringObjectFile.
This file a TargetTransformInfo::Concept conforming object specific to the AMDGPU target machine.
Provides passes to inlining "always_inline" functions.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This header provides classes for managing passes over SCCs of the call graph.
Provides analysis for continuously CSEing during GISel passes.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:686
#define LLVM_READNONE
Definition: Compiler.h:220
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:135
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file defines the class GCNIterativeScheduler, which uses an iterative approach to find a best sc...
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
AcceleratorCodeSelection - Identify all functions reachable from a kernel, removing those that are un...
This file declares the IRTranslator pass.
#define RegName(no)
static LVOptions Options
Definition: LVOptions.cpp:25
static std::string computeDataLayout()
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
CGSCCAnalysisManager CGAM
ModulePassManager MPM
LoopAnalysisManager LAM
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
const char LLVMTargetMachineRef TM
PassInstrumentationCallbacks PIC
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
This header defines various interfaces for pass management in LLVM.
The AMDGPU TargetMachine interface definition for hw codegen targets.
Basic Register Allocator
This file describes the interface of the MachineFunctionPass responsible for assigning the generic vi...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Machine Scheduler interface.
static FunctionPass * useDefaultRegisterAllocator()
-regalloc=... command line option.
Target-Independent Code Generator Pass Configuration Options pass.
static std::unique_ptr< TargetLoweringObjectFile > createTLOF()
A manager for alias analyses.
void registerFunctionAnalysis()
Register a specific AA result.
void addAAResult(AAResultT &AAResult)
Register a specific AA result.
Legacy wrapper pass to provide the AMDGPUAAResult object.
Analysis pass providing a never-invalidated alias analysis result.
AMDGPUTargetMachine & getAMDGPUTargetMachine() const
std::unique_ptr< CSEConfigBase > getCSEConfig() const override
Returns the CSEConfig object to use for the current optimization level.
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
bool addPreISel() override
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
bool addInstSelector() override
addInstSelector - This method should install an instruction selector pass, which converts from LLVM c...
bool addGCPasses() override
addGCPasses - Add late codegen passes that analyze code for garbage collection.
AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
void addIRPasses() override
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
void addCodeGenPrepare() override
Add pass to prepare the LLVM IR for code generation.
Splits the module M into N linkable partitions.
static int64_t getNullPointerValue(unsigned AddrSpace)
Get the integer value of a null pointer in the given address space.
unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const override
getAddressSpaceForPseudoSourceKind - Given the kind of memory (e.g.
const TargetSubtargetInfo * getSubtargetImpl() const
void registerDefaultAliasAnalyses(AAManager &) override
Allow the target to register alias analyses with the AAManager for use with the new pass manager.
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
If the specified predicate checks whether a generic pointer falls within a specified address space,...
StringRef getFeatureString(const Function &F) const
AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL)
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
void registerPassBuilderCallbacks(PassBuilder &PB) override
Allow the target to modify the pass pipeline.
StringRef getGPUName(const Function &F) const
unsigned getAssumedAddrSpace(const Value *V) const override
If the specified generic pointer could be assumed as a pointer to a specific address space,...
bool splitModule(Module &M, unsigned NumParts, function_ref< void(std::unique_ptr< Module > MPart)> ModuleCallback) override
Entry point for module splitting.
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:392
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:203
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
Error buildPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType) const
void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
Definition: Constants.cpp:723
This pass is required by interprocedural register allocation.
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
Tagged union holding either a T or a Error.
Definition: Error.h:481
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
const SIRegisterInfo * getRegisterInfo() const override
Definition: GCNSubtarget.h:278
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
void registerMachineRegisterInfoCallback(MachineFunction &MF) const override
bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override
Parse out the target's MachineFunctionInfo from the YAML reprsentation.
yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override
Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.
Error buildCodeGenPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC) override
yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override
Allocate and return a default initialized instance of the YAML representation for the MachineFunction...
TargetPassConfig * createPassConfig(PassManagerBase &PM) override
Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...
GCNTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
Pass to remove unused function declarations.
Definition: GlobalDCE.h:36
This pass is responsible for selecting generic machine instructions to target-specific instructions.
A pass that internalizes all functions and variables other than those that must be preserved accordin...
Definition: Internalize.h:36
This class describes a target machine that is implemented with the LLVM target-independent code gener...
This pass implements the localization mechanism described at the top of this file.
Definition: Localizer.h:43
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void addDelegate(Delegate *delegate)
MachineSchedRegistry provides a selection of available machine instruction schedulers.
This interface provides simple read-only access to a block of memory, and provides simple methods for...
Definition: MemoryBuffer.h:51
virtual StringRef getBufferIdentifier() const
Return an identifier for this buffer, typically the filename it was read from.
Definition: MemoryBuffer.h:76
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
static const OptimizationLevel O0
Disable as many optimizations as possible.
unsigned getSpeedupLevel() const
static const OptimizationLevel O1
Optimize quickly without destroying debuggability.
This class provides access to building LLVM's passes.
Definition: PassBuilder.h:106
void registerPipelineEarlySimplificationEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:481
void registerPipelineStartEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:472
void crossRegisterProxies(LoopAnalysisManager &LAM, FunctionAnalysisManager &FAM, CGSCCAnalysisManager &CGAM, ModuleAnalysisManager &MAM, MachineFunctionAnalysisManager *MFAM=nullptr)
Cross register the analysis managers through their proxies.
void registerOptimizerLastEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:499
void registerPeepholeEPCallback(const std::function< void(FunctionPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:406
void registerCGSCCOptimizerLateEPCallback(const std::function< void(CGSCCPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:451
void registerRegClassFilterParsingCallback(const std::function< RegAllocFilterFunc(StringRef)> &C)
Register callbacks to parse target specific filter field if regalloc pass needs it.
Definition: PassBuilder.h:588
void registerModuleAnalyses(ModuleAnalysisManager &MAM)
Registers all available module analysis passes.
void registerFullLinkTimeOptimizationLastEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:517
void registerFunctionAnalyses(FunctionAnalysisManager &FAM)
Registers all available function analysis passes.
This class manages callbacks registration, as well as provides a way for PassInstrumentation to pass ...
LLVM_ATTRIBUTE_MINSIZE std::enable_if_t<!std::is_same_v< PassT, PassManager > > addPass(PassT &&Pass)
Definition: PassManager.h:195
PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs)
Run all of the passes in this manager over the given unit of IR.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:37
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
RegisterPassParser class - Handle the addition of new machine passes.
RegisterRegAllocBase class - Track the registration of register allocators.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange)
Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...
Definition: SourceMgr.h:281
Represents a location in source code.
Definition: SMLoc.h:23
Represents a range in source code.
Definition: SMLoc.h:48
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
const TargetInstrInfo * TII
Target instruction information.
Definition: ScheduleDAG.h:575
const TargetRegisterInfo * TRI
Target processor register info.
Definition: ScheduleDAG.h:576
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition: SmallString.h:68
unsigned getMainFileID() const
Definition: SourceMgr.h:132
const MemoryBuffer * getMemoryBuffer(unsigned i) const
Definition: SourceMgr.h:125
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:685
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition: StringRef.h:620
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
StringSwitch & Cases(StringLiteral S0, StringLiteral S1, T Value)
Definition: StringSwitch.h:90
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Triple TargetTriple
Triple string, CPU name, and target feature strings the TargetMachine instance is created with.
Definition: TargetMachine.h:96
const Triple & getTargetTriple() const
const MCSubtargetInfo * getMCSubtargetInfo() const
StringRef getTargetFeatureString() const
StringRef getTargetCPU() const
std::unique_ptr< const MCSubtargetInfo > STI
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
std::unique_ptr< const MCRegisterInfo > MRI
Target-Independent Code Generator Pass Configuration Options.
LLVMTargetMachine * TM
virtual void addCodeGenPrepare()
Add pass to prepare the LLVM IR for code generation.
virtual bool addILPOpts()
Add passes that optimize instruction level parallelism for out-of-order targets.
virtual void addPostRegAlloc()
This method may be implemented by targets that want to run passes after register allocation pass pipe...
CodeGenOptLevel getOptLevel() const
virtual void addOptimizedRegAlloc()
addOptimizedRegAlloc - Add passes related to register allocation.
virtual void addIRPasses()
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
virtual void addFastRegAlloc()
addFastRegAlloc - Add the minimum set of target-independent passes that are required for fast registe...
virtual void addMachineSSAOptimization()
addMachineSSAOptimization - Add standard passes that optimize machine instructions in SSA form.
void disablePass(AnalysisID PassID)
Allow the target to disable a specific standard pass by default.
AnalysisID addPass(AnalysisID PassID)
Utilities for targets to add passes to the pass manager.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:373
LLVM Value Representation.
Definition: Value.h:74
bool use_empty() const
Definition: Value.h:344
An efficient, type-erasing, non-owning reference to a callable.
PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...
An abstract base class for streams implementations that also support a pwrite operation.
Definition: raw_ostream.h:434
Interfaces for registering analysis passes, producing common pass manager configurations,...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
bool isFlatGlobalAddrSpace(unsigned AS)
Definition: AMDGPU.h:458
bool isEntryFunctionCC(CallingConv::ID CC)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:893
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
@ ReallyHidden
Definition: CommandLine.h:138
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:711
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:463
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
FunctionPass * createFlattenCFGPass()
void initializeSIFormMemoryClausesPass(PassRegistry &)
char & SIPreAllocateWWMRegsID
FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
char & EarlyMachineLICMID
This pass performs loop invariant code motion on machine instructions.
ImmutablePass * createAMDGPUAAWrapperPass()
char & PostRAHazardRecognizerID
PostRAHazardRecognizer - This pass runs the post-ra hazard recognizer.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
FunctionPass * createAMDGPUSetWavePriorityPass()
void initializeAMDGPUInsertSingleUseVDSTPass(PassRegistry &)
Pass * createLCSSAPass()
Definition: LCSSA.cpp:541
void initializeGCNCreateVOPDPass(PassRegistry &)
ModulePass * createAMDGPUOpenCLEnqueuedBlockLoweringPass()
char & GCNPreRAOptimizationsID
char & GCLoweringID
GCLowering Pass - Used by gc.root to perform its default lowering operations.
void initializeGCNPreRAOptimizationsPass(PassRegistry &)
Pass * createLoadStoreVectorizerPass()
Create a legacy pass manager instance of the LoadStoreVectorizer pass.
ModulePass * createExpandVariadicsPass(ExpandVariadicsMode)
void initializeGCNRewritePartialRegUsesPass(llvm::PassRegistry &)
void initializeAMDGPUAttributorLegacyPass(PassRegistry &)
char & SIPostRABundlerID
FunctionPass * createSIAnnotateControlFlowLegacyPass()
Create the annotation pass.
FunctionPass * createSIModeRegisterPass()
FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
void initializeAMDGPUAAWrapperPassPass(PassRegistry &)
ModulePass * createAMDGPULowerBufferFatPointersPass()
void initializeR600ClauseMergePassPass(PassRegistry &)
void initializeSIModeRegisterPass(PassRegistry &)
ModulePass * createAMDGPUCtorDtorLoweringLegacyPass()
void initializeSIOptimizeVGPRLiveRangePass(PassRegistry &)
ModuleToFunctionPassAdaptor createModuleToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
Definition: PassManager.h:848
void initializeAMDGPURewriteUndefForPHILegacyPass(PassRegistry &)
FunctionPass * createAMDGPUPreLegalizeCombiner(bool IsOptNone)
char & GCNRewritePartialRegUsesID
FunctionPass * createAMDGPUPostLegalizeCombiner(bool IsOptNone)
std::error_code inconvertibleErrorCode()
The value returned by this function can be returned from convertToErrorCode for Error values where no...
Definition: Error.cpp:98
void initializeSIShrinkInstructionsPass(PassRegistry &)
char & SIFoldOperandsID
void initializeGCNPreRALongBranchRegPass(PassRegistry &)
auto formatv(const char *Fmt, Ts &&...Vals) -> formatv_object< decltype(std::make_tuple(support::detail::build_format_adapter(std::forward< Ts >(Vals))...))>
char & SILoadStoreOptimizerID
std::unique_ptr< ScheduleDAGMutation > createIGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase)
Phase specifes whether or not this is a reentry into the IGroupLPDAGMutation.
void initializeAMDGPUDAGToDAGISelLegacyPass(PassRegistry &)
FunctionPass * createNaryReassociatePass()
char & PatchableFunctionID
This pass implements the "patchable-function" attribute.
char & PostRASchedulerID
PostRAScheduler - This pass performs post register allocation scheduling.
void initializeR600ExpandSpecialInstrsPassPass(PassRegistry &)
void initializeR600PacketizerPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createVOPDPairingMutation()
ModulePass * createAMDGPUAlwaysInlinePass(bool GlobalOpt=true)
void initializeSIPreEmitPeepholePass(PassRegistry &)
char & SILowerWWMCopiesID
void initializeSIFixVGPRCopiesPass(PassRegistry &)
void initializeAMDGPUGlobalISelDivergenceLoweringPass(PassRegistry &)
std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOptLevel Level)
Definition: CSEInfo.cpp:79
Target & getTheR600Target()
The target for R600 GPUs.
char & MachineSchedulerID
MachineScheduler - This pass schedules machine instructions.
Pass * createStructurizeCFGPass(bool SkipUniformRegions=false)
When SkipUniformRegions is true the structizer will not structurize regions that only contain uniform...
void initializeAMDGPURemoveIncompatibleFunctionsPass(PassRegistry &)
void initializeSILowerWWMCopiesPass(PassRegistry &)
void initializeGCNNSAReassignPass(PassRegistry &)
char & PostMachineSchedulerID
PostMachineScheduler - This pass schedules machine instructions postRA.
void initializeSIInsertWaitcntsPass(PassRegistry &)
char & AMDGPUInsertSingleUseVDSTID
Pass * createLICMPass()
Definition: LICM.cpp:381
ScheduleDAGMILive * createGenericSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
char & SIFormMemoryClausesID
void initializeAMDGPULowerModuleLDSLegacyPass(PassRegistry &)
void initializeAMDGPUCtorDtorLoweringLegacyPass(PassRegistry &)
void initializeAMDGPURegBankCombinerPass(PassRegistry &)
void initializeSILoadStoreOptimizerPass(PassRegistry &)
void initializeSILateBranchLoweringPass(PassRegistry &)
void initializeSIPeepholeSDWAPass(PassRegistry &)
char & AMDGPUUnifyDivergentExitNodesID
FunctionPass * createAMDGPUAtomicOptimizerPass(ScanOptions ScanStrategy)
char & ShadowStackGCLoweringID
ShadowStackGCLowering - Implements the custom lowering mechanism used by the shadow stack GC.
char & GCNNSAReassignID
void initializeAMDGPURewriteOutArgumentsPass(PassRegistry &)
void initializeAMDGPUExternalAAWrapperPass(PassRegistry &)
void initializeAMDGPULowerKernelArgumentsPass(PassRegistry &)
char & SILowerSGPRSpillsID
CodeModel::Model getEffectiveCodeModel(std::optional< CodeModel::Model > CM, CodeModel::Model Default)
Helper method for getting the code model, returning Default if CM does not have a value.
char & SILateBranchLoweringPassID
char & BranchRelaxationPassID
BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...
FunctionPass * createSinkingPass()
Definition: Sink.cpp:277
CGSCCToFunctionPassAdaptor createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false, bool NoRerun=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
FunctionPass * createSIShrinkInstructionsPass()
void initializeAMDGPUAnnotateKernelFeaturesPass(PassRegistry &)
CodeGenFileType
These enums are meant to be passed into addPassesToEmitFile to indicate what type of file to emit,...
Definition: CodeGen.h:83
void initializeSIPostRABundlerPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry &)
void initializeSIWholeQuadModePass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
FunctionPass * createLoopDataPrefetchPass()
FunctionPass * createAMDGPULowerKernelArgumentsPass()
char & AMDGPUInsertDelayAluID
Pass * createAMDGPUAnnotateKernelFeaturesPass()
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
std::unique_ptr< ScheduleDAGMutation > createAMDGPUMacroFusionDAGMutation()
Note that you have to add: DAG.addMutation(createAMDGPUMacroFusionDAGMutation()); to AMDGPUPassConfig...
char & StackMapLivenessID
StackMapLiveness - This pass analyses the register live-out set of stackmap/patchpoint intrinsics and...
char & SIOptimizeVGPRLiveRangeID
FunctionPass * createUnifyLoopExitsPass()
char & SIOptimizeExecMaskingPreRAID
FunctionPass * createFixIrreduciblePass()
char & FuncletLayoutID
This pass lays out funclets contiguously.
void initializeSIInsertHardClausesPass(PassRegistry &)
char & DetectDeadLanesID
This pass adds dead/undef flags after analyzing subregister lanes.
void initializeAMDGPUPostLegalizerCombinerPass(PassRegistry &)
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:54
ModulePass * createAMDGPUPrintfRuntimeBinding()
void initializeSIMemoryLegalizerPass(PassRegistry &)
Pass * createAlwaysInlinerLegacyPass(bool InsertLifetime=true)
Create a legacy pass manager instance of a pass to inline and remove functions marked as "always_inli...
void initializeR600ControlFlowFinalizerPass(PassRegistry &)
void initializeAMDGPUImageIntrinsicOptimizerPass(PassRegistry &)
ModulePass * createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPUPreLegalizerCombinerPass(PassRegistry &)
FunctionPass * createAMDGPUPromoteAlloca()
FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
char & EarlyIfConverterID
EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.
char & SIPreEmitPeepholeID
ModulePass * createAMDGPURemoveIncompatibleFunctionsPass(const TargetMachine *)
void initializeGCNRegPressurePrinterPass(PassRegistry &)
void initializeSILowerI1CopiesLegacyPass(PassRegistry &)
void initializeAMDGPUArgumentUsageInfoPass(PassRegistry &)
FunctionPass * createBasicRegisterAllocator()
BasicRegisterAllocation Pass - This pass implements a degenerate global register allocator using the ...
void initializeGlobalISel(PassRegistry &)
Initialize all passes linked into the GlobalISel library.
Definition: GlobalISel.cpp:17
void initializeSIPreAllocateWWMRegsPass(PassRegistry &)
ModulePass * createR600OpenCLImageTypeLoweringPass()
FunctionPass * createAMDGPUCodeGenPreparePass()
void initializeSIAnnotateControlFlowLegacyPass(PassRegistry &)
FunctionPass * createAMDGPUISelDag(TargetMachine &TM, CodeGenOptLevel OptLevel)
This pass converts a legalized DAG into a AMDGPU-specific.
Target & getTheGCNTarget()
The target for GCN GPUs.
void initializeSIFixSGPRCopiesLegacyPass(PassRegistry &)
void initializeAMDGPUAtomicOptimizerPass(PassRegistry &)
char & MachineCSEID
MachineCSE - This pass performs global CSE on machine instructions.
Definition: MachineCSE.cpp:165
char & GCNDPPCombineID
FunctionPass * createAMDGPURegBankCombiner(bool IsOptNone)
char & SIWholeQuadModeID
std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
void initializeSIOptimizeExecMaskingPreRAPass(PassRegistry &)
void initializeAMDGPUMarkLastScratchLoadPass(PassRegistry &)
char & LiveVariablesID
LiveVariables pass - This pass computes the set of blocks in which each variable is life and sets mac...
void initializeAMDGPUCodeGenPreparePass(PassRegistry &)
FunctionPass * createGVNPass(bool NoMemDepAnalysis=false)
Create a legacy GVN pass.
Definition: GVN.cpp:3396
FunctionPass * createAMDGPURewriteUndefForPHILegacyPass()
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
Definition: Threading.h:87
void initializeSILowerSGPRSpillsPass(PassRegistry &)
FunctionPass * createSILowerI1CopiesLegacyPass()
void initializeAMDGPULowerKernelAttributesPass(PassRegistry &)
char & SIInsertHardClausesID
FunctionPass * createAMDGPUMachineCFGStructurizerPass()
void initializeAMDGPUResourceUsageAnalysisPass(PassRegistry &)
char & SIFixSGPRCopiesLegacyID
char & GCNCreateVOPDID
FunctionPass * createInferAddressSpacesPass(unsigned AddressSpace=~0u)
char & VirtRegRewriterID
VirtRegRewriter pass.
Definition: VirtRegMap.cpp:227
char & SILowerControlFlowID
FunctionPass * createLowerSwitchPass()
FunctionPass * createVirtRegRewriter(bool ClearVirtRegs=true)
Definition: VirtRegMap.cpp:645
void initializeR600VectorRegMergerPass(PassRegistry &)
ImmutablePass * createExternalAAWrapperPass(std::function< void(Pass &, Function &, AAResults &)> Callback)
A wrapper pass around a callback which can be used to populate the AAResults in the AAResultsWrapperP...
void initializeSIOptimizeExecMaskingPass(PassRegistry &)
FunctionPass * createAMDGPUGlobalISelDivergenceLoweringPass()
FunctionPass * createSIMemoryLegalizerPass()
void initializeAMDGPULateCodeGenPrepareLegacyPass(PassRegistry &)
void initializeSIFoldOperandsPass(PassRegistry &)
void initializeSILowerControlFlowPass(PassRegistry &)
char & SIPeepholeSDWAID
char & SIFixVGPRCopiesID
char & TwoAddressInstructionPassID
TwoAddressInstruction - This pass reduces two-address instructions to use two operands.
void initializeAMDGPURegBankSelectPass(PassRegistry &)
FunctionPass * createAMDGPULateCodeGenPrepareLegacyPass()
FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
MCRegisterInfo * createGCNMCRegisterInfo(AMDGPUDwarfFlavour DwarfFlavour)
FunctionPass * createStraightLineStrengthReducePass()
FunctionPass * createAMDGPUImageIntrinsicOptimizerPass(const TargetMachine *)
void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry &)
void initializeAMDGPULowerBufferFatPointersPass(PassRegistry &)
FunctionPass * createSIInsertWaitcntsPass()
FunctionPass * createAMDGPUAnnotateUniformValuesLegacy()
FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
Definition: EarlyCSE.cpp:1932
void initializeGCNDPPCombinePass(PassRegistry &)
char & PHIEliminationID
PHIElimination - This pass eliminates machine instruction PHI nodes by inserting copy instructions.
bool parseNamedRegisterReference(PerFunctionMIParsingState &PFS, Register &Reg, StringRef Src, SMDiagnostic &Error)
Definition: MIParser.cpp:3597
char & AMDGPUMarkLastScratchLoadID
char & RenameIndependentSubregsID
This pass detects subregister lanes in a virtual register that are used independently of other lanes ...
void initializeAMDGPUAnnotateUniformValuesLegacyPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createAMDGPUExportClusteringDAGMutation()
void initializeAMDGPUPrintfRuntimeBindingPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaPass(PassRegistry &)
void initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(PassRegistry &)
void initializeAMDGPUInsertDelayAluPass(PassRegistry &)
char & SIOptimizeExecMaskingID
void initializeAMDGPUUnifyMetadataPass(PassRegistry &)
void initializeAMDGPUAlwaysInlinePass(PassRegistry &)
char & DeadMachineInstructionElimID
DeadMachineInstructionElim - This pass removes dead machine instructions.
char & AMDGPUPerfHintAnalysisLegacyID
char & GCNPreRALongBranchRegID
void initializeAMDGPUPromoteKernelArgumentsPass(PassRegistry &)
#define N
static ArgDescriptor createStack(unsigned Offset, unsigned Mask=~0u)
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask)
static ArgDescriptor createRegister(Register Reg, unsigned Mask=~0u)
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
RegisterTargetMachine - Helper template for registering a target machine implementation,...
bool DX10Clamp
Used by the vector ALU to force DX10-style treatment of NaNs: when set, clamp NaN to zero; otherwise,...
DenormalMode FP64FP16Denormals
If this is set, neither input or output denormals are flushed for both f64 and f16/v2f16 instructions...
bool IEEE
Floating point opcodes that support exception flag gathering quiet and propagate signaling NaN inputs...
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
The llvm::once_flag structure.
Definition: Threading.h:68
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
SmallVector< StringValue > WWMReservedRegs
std::optional< SIArgumentInfo > ArgInfo
A wrapper around std::string which contains a source range that's being set during parsing.