LLVM 22.0.0git
AMDGPUTargetMachine.cpp
Go to the documentation of this file.
1//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file contains both AMDGPU target machine and the CodeGen pass builder.
11/// The AMDGPU target machine contains all of the hardware specific information
12/// needed to emit code for SI+ GPUs in the legacy pass manager pipeline. The
13/// CodeGen pass builder handles the pass pipeline for new pass manager.
14//
15//===----------------------------------------------------------------------===//
16
17#include "AMDGPUTargetMachine.h"
18#include "AMDGPU.h"
19#include "AMDGPUAliasAnalysis.h"
24#include "AMDGPUIGroupLP.h"
25#include "AMDGPUISelDAGToDAG.h"
27#include "AMDGPUMacroFusion.h"
34#include "AMDGPUSplitModule.h"
39#include "GCNDPPCombine.h"
41#include "GCNNSAReassign.h"
45#include "GCNSchedStrategy.h"
46#include "GCNVOPDUtils.h"
47#include "R600.h"
48#include "R600TargetMachine.h"
49#include "SIFixSGPRCopies.h"
50#include "SIFixVGPRCopies.h"
51#include "SIFoldOperands.h"
52#include "SIFormMemoryClauses.h"
54#include "SILowerControlFlow.h"
55#include "SILowerSGPRSpills.h"
56#include "SILowerWWMCopies.h"
58#include "SIMachineScheduler.h"
62#include "SIPeepholeSDWA.h"
63#include "SIPostRABundler.h"
66#include "SIWholeQuadMode.h"
86#include "llvm/CodeGen/Passes.h"
90#include "llvm/IR/IntrinsicsAMDGPU.h"
91#include "llvm/IR/PassManager.h"
100#include "llvm/Transforms/IPO.h"
125#include <optional>
126
127using namespace llvm;
128using namespace llvm::PatternMatch;
129
130namespace {
131//===----------------------------------------------------------------------===//
132// AMDGPU CodeGen Pass Builder interface.
133//===----------------------------------------------------------------------===//
134
135class AMDGPUCodeGenPassBuilder
136 : public CodeGenPassBuilder<AMDGPUCodeGenPassBuilder, GCNTargetMachine> {
137 using Base = CodeGenPassBuilder<AMDGPUCodeGenPassBuilder, GCNTargetMachine>;
138
139public:
140 AMDGPUCodeGenPassBuilder(GCNTargetMachine &TM,
141 const CGPassBuilderOption &Opts,
142 PassInstrumentationCallbacks *PIC);
143
144 void addIRPasses(AddIRPass &) const;
145 void addCodeGenPrepare(AddIRPass &) const;
146 void addPreISel(AddIRPass &addPass) const;
147 void addILPOpts(AddMachinePass &) const;
148 void addAsmPrinter(AddMachinePass &, CreateMCStreamer) const;
149 Error addInstSelector(AddMachinePass &) const;
150 void addPreRewrite(AddMachinePass &) const;
151 void addMachineSSAOptimization(AddMachinePass &) const;
152 void addPostRegAlloc(AddMachinePass &) const;
153 void addPreEmitPass(AddMachinePass &) const;
154 void addPreEmitRegAlloc(AddMachinePass &) const;
155 Error addRegAssignmentOptimized(AddMachinePass &) const;
156 void addPreRegAlloc(AddMachinePass &) const;
157 void addOptimizedRegAlloc(AddMachinePass &) const;
158 void addPreSched2(AddMachinePass &) const;
159
160 /// Check if a pass is enabled given \p Opt option. The option always
161 /// overrides defaults if explicitly used. Otherwise its default will be used
162 /// given that a pass shall work at an optimization \p Level minimum.
163 bool isPassEnabled(const cl::opt<bool> &Opt,
164 CodeGenOptLevel Level = CodeGenOptLevel::Default) const;
165 void addEarlyCSEOrGVNPass(AddIRPass &) const;
166 void addStraightLineScalarOptimizationPasses(AddIRPass &) const;
167};
168
169class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> {
170public:
171 SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
172 : RegisterRegAllocBase(N, D, C) {}
173};
174
175class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> {
176public:
177 VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
178 : RegisterRegAllocBase(N, D, C) {}
179};
180
181class WWMRegisterRegAlloc : public RegisterRegAllocBase<WWMRegisterRegAlloc> {
182public:
183 WWMRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
184 : RegisterRegAllocBase(N, D, C) {}
185};
186
187static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI,
189 const Register Reg) {
190 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
191 return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
192}
193
194static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI,
196 const Register Reg) {
197 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
198 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
199}
200
201static bool onlyAllocateWWMRegs(const TargetRegisterInfo &TRI,
203 const Register Reg) {
204 const SIMachineFunctionInfo *MFI =
205 MRI.getMF().getInfo<SIMachineFunctionInfo>();
206 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
207 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC) &&
209}
210
211/// -{sgpr|wwm|vgpr}-regalloc=... command line option.
212static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
213
214/// A dummy default pass factory indicates whether the register allocator is
215/// overridden on the command line.
216static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag;
217static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag;
218static llvm::once_flag InitializeDefaultWWMRegisterAllocatorFlag;
219
220static SGPRRegisterRegAlloc
221defaultSGPRRegAlloc("default",
222 "pick SGPR register allocator based on -O option",
224
225static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false,
227SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
228 cl::desc("Register allocator to use for SGPRs"));
229
230static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false,
232VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
233 cl::desc("Register allocator to use for VGPRs"));
234
235static cl::opt<WWMRegisterRegAlloc::FunctionPassCtor, false,
237 WWMRegAlloc("wwm-regalloc", cl::Hidden,
239 cl::desc("Register allocator to use for WWM registers"));
240
241static void initializeDefaultSGPRRegisterAllocatorOnce() {
242 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
243
244 if (!Ctor) {
245 Ctor = SGPRRegAlloc;
246 SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
247 }
248}
249
250static void initializeDefaultVGPRRegisterAllocatorOnce() {
251 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
252
253 if (!Ctor) {
254 Ctor = VGPRRegAlloc;
255 VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
256 }
257}
258
259static void initializeDefaultWWMRegisterAllocatorOnce() {
260 RegisterRegAlloc::FunctionPassCtor Ctor = WWMRegisterRegAlloc::getDefault();
261
262 if (!Ctor) {
263 Ctor = WWMRegAlloc;
264 WWMRegisterRegAlloc::setDefault(WWMRegAlloc);
265 }
266}
267
268static FunctionPass *createBasicSGPRRegisterAllocator() {
269 return createBasicRegisterAllocator(onlyAllocateSGPRs);
270}
271
272static FunctionPass *createGreedySGPRRegisterAllocator() {
273 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
274}
275
276static FunctionPass *createFastSGPRRegisterAllocator() {
277 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
278}
279
280static FunctionPass *createBasicVGPRRegisterAllocator() {
281 return createBasicRegisterAllocator(onlyAllocateVGPRs);
282}
283
284static FunctionPass *createGreedyVGPRRegisterAllocator() {
285 return createGreedyRegisterAllocator(onlyAllocateVGPRs);
286}
287
288static FunctionPass *createFastVGPRRegisterAllocator() {
289 return createFastRegisterAllocator(onlyAllocateVGPRs, true);
290}
291
292static FunctionPass *createBasicWWMRegisterAllocator() {
293 return createBasicRegisterAllocator(onlyAllocateWWMRegs);
294}
295
296static FunctionPass *createGreedyWWMRegisterAllocator() {
297 return createGreedyRegisterAllocator(onlyAllocateWWMRegs);
298}
299
300static FunctionPass *createFastWWMRegisterAllocator() {
301 return createFastRegisterAllocator(onlyAllocateWWMRegs, false);
302}
303
304static SGPRRegisterRegAlloc basicRegAllocSGPR(
305 "basic", "basic register allocator", createBasicSGPRRegisterAllocator);
306static SGPRRegisterRegAlloc greedyRegAllocSGPR(
307 "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator);
308
309static SGPRRegisterRegAlloc fastRegAllocSGPR(
310 "fast", "fast register allocator", createFastSGPRRegisterAllocator);
311
312
313static VGPRRegisterRegAlloc basicRegAllocVGPR(
314 "basic", "basic register allocator", createBasicVGPRRegisterAllocator);
315static VGPRRegisterRegAlloc greedyRegAllocVGPR(
316 "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator);
317
318static VGPRRegisterRegAlloc fastRegAllocVGPR(
319 "fast", "fast register allocator", createFastVGPRRegisterAllocator);
320static WWMRegisterRegAlloc basicRegAllocWWMReg("basic",
321 "basic register allocator",
322 createBasicWWMRegisterAllocator);
323static WWMRegisterRegAlloc
324 greedyRegAllocWWMReg("greedy", "greedy register allocator",
325 createGreedyWWMRegisterAllocator);
326static WWMRegisterRegAlloc fastRegAllocWWMReg("fast", "fast register allocator",
327 createFastWWMRegisterAllocator);
328
332}
333} // anonymous namespace
334
335static cl::opt<bool>
337 cl::desc("Run early if-conversion"),
338 cl::init(false));
339
340static cl::opt<bool>
341OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
342 cl::desc("Run pre-RA exec mask optimizations"),
343 cl::init(true));
344
345static cl::opt<bool>
346 LowerCtorDtor("amdgpu-lower-global-ctor-dtor",
347 cl::desc("Lower GPU ctor / dtors to globals on the device."),
348 cl::init(true), cl::Hidden);
349
350// Option to disable vectorizer for tests.
352 "amdgpu-load-store-vectorizer",
353 cl::desc("Enable load store vectorizer"),
354 cl::init(true),
355 cl::Hidden);
356
357// Option to control global loads scalarization
359 "amdgpu-scalarize-global-loads",
360 cl::desc("Enable global load scalarization"),
361 cl::init(true),
362 cl::Hidden);
363
364// Option to run internalize pass.
366 "amdgpu-internalize-symbols",
367 cl::desc("Enable elimination of non-kernel functions and unused globals"),
368 cl::init(false),
369 cl::Hidden);
370
371// Option to inline all early.
373 "amdgpu-early-inline-all",
374 cl::desc("Inline all functions early"),
375 cl::init(false),
376 cl::Hidden);
377
379 "amdgpu-enable-remove-incompatible-functions", cl::Hidden,
380 cl::desc("Enable removal of functions when they"
381 "use features not supported by the target GPU"),
382 cl::init(true));
383
385 "amdgpu-sdwa-peephole",
386 cl::desc("Enable SDWA peepholer"),
387 cl::init(true));
388
390 "amdgpu-dpp-combine",
391 cl::desc("Enable DPP combiner"),
392 cl::init(true));
393
394// Enable address space based alias analysis
396 cl::desc("Enable AMDGPU Alias Analysis"),
397 cl::init(true));
398
399// Enable lib calls simplifications
401 "amdgpu-simplify-libcall",
402 cl::desc("Enable amdgpu library simplifications"),
403 cl::init(true),
404 cl::Hidden);
405
407 "amdgpu-ir-lower-kernel-arguments",
408 cl::desc("Lower kernel argument loads in IR pass"),
409 cl::init(true),
410 cl::Hidden);
411
413 "amdgpu-reassign-regs",
414 cl::desc("Enable register reassign optimizations on gfx10+"),
415 cl::init(true),
416 cl::Hidden);
417
419 "amdgpu-opt-vgpr-liverange",
420 cl::desc("Enable VGPR liverange optimizations for if-else structure"),
421 cl::init(true), cl::Hidden);
422
424 "amdgpu-atomic-optimizer-strategy",
425 cl::desc("Select DPP or Iterative strategy for scan"),
428 clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"),
430 "Use Iterative approach for scan"),
431 clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")));
432
433// Enable Mode register optimization
435 "amdgpu-mode-register",
436 cl::desc("Enable mode register pass"),
437 cl::init(true),
438 cl::Hidden);
439
440// Enable GFX11+ s_delay_alu insertion
441static cl::opt<bool>
442 EnableInsertDelayAlu("amdgpu-enable-delay-alu",
443 cl::desc("Enable s_delay_alu insertion"),
444 cl::init(true), cl::Hidden);
445
446// Enable GFX11+ VOPD
447static cl::opt<bool>
448 EnableVOPD("amdgpu-enable-vopd",
449 cl::desc("Enable VOPD, dual issue of VALU in wave32"),
450 cl::init(true), cl::Hidden);
451
452// Option is used in lit tests to prevent deadcoding of patterns inspected.
453static cl::opt<bool>
454EnableDCEInRA("amdgpu-dce-in-ra",
455 cl::init(true), cl::Hidden,
456 cl::desc("Enable machine DCE inside regalloc"));
457
458static cl::opt<bool> EnableSetWavePriority("amdgpu-set-wave-priority",
459 cl::desc("Adjust wave priority"),
460 cl::init(false), cl::Hidden);
461
463 "amdgpu-scalar-ir-passes",
464 cl::desc("Enable scalar IR passes"),
465 cl::init(true),
466 cl::Hidden);
467
469 "amdgpu-enable-lower-exec-sync",
470 cl::desc("Enable lowering of execution synchronization."), cl::init(true),
471 cl::Hidden);
472
473static cl::opt<bool>
474 EnableSwLowerLDS("amdgpu-enable-sw-lower-lds",
475 cl::desc("Enable lowering of lds to global memory pass "
476 "and asan instrument resulting IR."),
477 cl::init(true), cl::Hidden);
478
480 "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"),
482 cl::Hidden);
483
485 "amdgpu-enable-pre-ra-optimizations",
486 cl::desc("Enable Pre-RA optimizations pass"), cl::init(true),
487 cl::Hidden);
488
490 "amdgpu-enable-promote-kernel-arguments",
491 cl::desc("Enable promotion of flat kernel pointer arguments to global"),
492 cl::Hidden, cl::init(true));
493
495 "amdgpu-enable-image-intrinsic-optimizer",
496 cl::desc("Enable image intrinsic optimizer pass"), cl::init(true),
497 cl::Hidden);
498
499static cl::opt<bool>
500 EnableLoopPrefetch("amdgpu-loop-prefetch",
501 cl::desc("Enable loop data prefetch on AMDGPU"),
502 cl::Hidden, cl::init(false));
503
505 AMDGPUSchedStrategy("amdgpu-sched-strategy",
506 cl::desc("Select custom AMDGPU scheduling strategy."),
507 cl::Hidden, cl::init(""));
508
510 "amdgpu-enable-rewrite-partial-reg-uses",
511 cl::desc("Enable rewrite partial reg uses pass"), cl::init(true),
512 cl::Hidden);
513
515 "amdgpu-enable-hipstdpar",
516 cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false),
517 cl::Hidden);
518
519static cl::opt<bool>
520 EnableAMDGPUAttributor("amdgpu-attributor-enable",
521 cl::desc("Enable AMDGPUAttributorPass"),
522 cl::init(true), cl::Hidden);
523
525 "new-reg-bank-select",
526 cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of "
527 "regbankselect"),
528 cl::init(false), cl::Hidden);
529
531 "amdgpu-link-time-closed-world",
532 cl::desc("Whether has closed-world assumption at link time"),
533 cl::init(false), cl::Hidden);
534
536 "amdgpu-enable-uniform-intrinsic-combine",
537 cl::desc("Enable/Disable the Uniform Intrinsic Combine Pass"),
538 cl::init(true), cl::Hidden);
539
541 // Register the target
544
629}
630
631static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
632 return std::make_unique<AMDGPUTargetObjectFile>();
633}
634
638
639static ScheduleDAGInstrs *
641 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
642 ScheduleDAGMILive *DAG =
643 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
644 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
645 if (ST.shouldClusterStores())
646 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
648 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
649 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
650 DAG->addMutation(createAMDGPUBarrierLatencyDAGMutation(C->MF));
651 return DAG;
652}
653
654static ScheduleDAGInstrs *
656 ScheduleDAGMILive *DAG =
657 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxILPSchedStrategy>(C));
659 return DAG;
660}
661
662static ScheduleDAGInstrs *
664 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
666 C, std::make_unique<GCNMaxMemoryClauseSchedStrategy>(C));
667 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
668 if (ST.shouldClusterStores())
669 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
670 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
671 DAG->addMutation(createAMDGPUBarrierLatencyDAGMutation(C->MF));
672 return DAG;
673}
674
675static ScheduleDAGInstrs *
677 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
678 auto *DAG = new GCNIterativeScheduler(
680 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
681 if (ST.shouldClusterStores())
682 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
684 return DAG;
685}
686
693
694static ScheduleDAGInstrs *
696 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
698 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
699 if (ST.shouldClusterStores())
700 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
701 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
703 return DAG;
704}
705
707SISchedRegistry("si", "Run SI's custom scheduler",
709
712 "Run GCN scheduler to maximize occupancy",
714
716 GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp",
718
720 "gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause",
722
724 "gcn-iterative-max-occupancy-experimental",
725 "Run GCN scheduler to maximize occupancy (experimental)",
727
729 "gcn-iterative-minreg",
730 "Run GCN iterative scheduler for minimal register usage (experimental)",
732
734 "gcn-iterative-ilp",
735 "Run GCN iterative scheduler for ILP scheduling (experimental)",
737
740 if (!GPU.empty())
741 return GPU;
742
743 // Need to default to a target with flat support for HSA.
744 if (TT.isAMDGCN())
745 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
746
747 return "r600";
748}
749
751 // The AMDGPU toolchain only supports generating shared objects, so we
752 // must always use PIC.
753 return Reloc::PIC_;
754}
755
757 StringRef CPU, StringRef FS,
758 const TargetOptions &Options,
759 std::optional<Reloc::Model> RM,
760 std::optional<CodeModel::Model> CM,
763 T, TT.computeDataLayout(), TT, getGPUOrDefault(TT, CPU), FS, Options,
765 OptLevel),
767 initAsmInfo();
768 if (TT.isAMDGCN()) {
769 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
771 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
773 }
774}
775
778
780
782 Attribute GPUAttr = F.getFnAttribute("target-cpu");
783 return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
784}
785
787 Attribute FSAttr = F.getFnAttribute("target-features");
788
789 return FSAttr.isValid() ? FSAttr.getValueAsString()
791}
792
795 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
797 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
798 if (ST.shouldClusterStores())
799 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
800 return DAG;
801}
802
803/// Predicate for Internalize pass.
804static bool mustPreserveGV(const GlobalValue &GV) {
805 if (const Function *F = dyn_cast<Function>(&GV))
806 return F->isDeclaration() || F->getName().starts_with("__asan_") ||
807 F->getName().starts_with("__sanitizer_") ||
808 AMDGPU::isEntryFunctionCC(F->getCallingConv());
809
811 return !GV.use_empty();
812}
813
817
820 if (Params.empty())
822 Params.consume_front("strategy=");
823 auto Result = StringSwitch<std::optional<ScanOptions>>(Params)
824 .Case("dpp", ScanOptions::DPP)
825 .Cases({"iterative", ""}, ScanOptions::Iterative)
826 .Case("none", ScanOptions::None)
827 .Default(std::nullopt);
828 if (Result)
829 return *Result;
830 return make_error<StringError>("invalid parameter", inconvertibleErrorCode());
831}
832
836 while (!Params.empty()) {
837 StringRef ParamName;
838 std::tie(ParamName, Params) = Params.split(';');
839 if (ParamName == "closed-world") {
840 Result.IsClosedWorld = true;
841 } else {
843 formatv("invalid AMDGPUAttributor pass parameter '{0}' ", ParamName)
844 .str(),
846 }
847 }
848 return Result;
849}
850
852
853#define GET_PASS_REGISTRY "AMDGPUPassRegistry.def"
855
856 PB.registerScalarOptimizerLateEPCallback(
857 [](FunctionPassManager &FPM, OptimizationLevel Level) {
858 if (Level == OptimizationLevel::O0)
859 return;
860
862 });
863
864 PB.registerVectorizerEndEPCallback(
865 [](FunctionPassManager &FPM, OptimizationLevel Level) {
866 if (Level == OptimizationLevel::O0)
867 return;
868
870 });
871
872 PB.registerPipelineEarlySimplificationEPCallback(
875 if (!isLTOPreLink(Phase)) {
876 // When we are not using -fgpu-rdc, we can run accelerator code
877 // selection relatively early, but still after linking to prevent
878 // eager removal of potentially reachable symbols.
879 if (EnableHipStdPar) {
882 }
884 }
885
886 if (Level == OptimizationLevel::O0)
887 return;
888
889 // We don't want to run internalization at per-module stage.
893 }
894
897 });
898
899 PB.registerPeepholeEPCallback(
900 [](FunctionPassManager &FPM, OptimizationLevel Level) {
901 if (Level == OptimizationLevel::O0)
902 return;
903
907
910 });
911
912 PB.registerCGSCCOptimizerLateEPCallback(
913 [this](CGSCCPassManager &PM, OptimizationLevel Level) {
914 if (Level == OptimizationLevel::O0)
915 return;
916
918
919 // Add promote kernel arguments pass to the opt pipeline right before
920 // infer address spaces which is needed to do actual address space
921 // rewriting.
922 if (Level.getSpeedupLevel() > OptimizationLevel::O1.getSpeedupLevel() &&
925
926 // Add infer address spaces pass to the opt pipeline after inlining
927 // but before SROA to increase SROA opportunities.
929
930 // This should run after inlining to have any chance of doing
931 // anything, and before other cleanup optimizations.
933
934 if (Level != OptimizationLevel::O0) {
935 // Promote alloca to vector before SROA and loop unroll. If we
936 // manage to eliminate allocas before unroll we may choose to unroll
937 // less.
939 }
940
941 PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
942 });
943
944 // FIXME: Why is AMDGPUAttributor not in CGSCC?
945 PB.registerOptimizerLastEPCallback([this](ModulePassManager &MPM,
946 OptimizationLevel Level,
948 if (Level != OptimizationLevel::O0) {
949 if (!isLTOPreLink(Phase)) {
950 if (EnableAMDGPUAttributor && getTargetTriple().isAMDGCN()) {
952 MPM.addPass(AMDGPUAttributorPass(*this, Opts, Phase));
953 }
954 }
955 }
956 });
957
958 PB.registerFullLinkTimeOptimizationLastEPCallback(
959 [this](ModulePassManager &PM, OptimizationLevel Level) {
960 // When we are using -fgpu-rdc, we can only run accelerator code
961 // selection after linking to prevent, otherwise we end up removing
962 // potentially reachable symbols that were exported as external in other
963 // modules.
964 if (EnableHipStdPar) {
967 }
968 // We want to support the -lto-partitions=N option as "best effort".
969 // For that, we need to lower LDS earlier in the pipeline before the
970 // module is partitioned for codegen.
974 PM.addPass(AMDGPUSwLowerLDSPass(*this));
977 if (Level != OptimizationLevel::O0) {
978 // We only want to run this with O2 or higher since inliner and SROA
979 // don't run in O1.
980 if (Level != OptimizationLevel::O1) {
981 PM.addPass(
983 }
984 // Do we really need internalization in LTO?
985 if (InternalizeSymbols) {
988 }
989 if (EnableAMDGPUAttributor && getTargetTriple().isAMDGCN()) {
992 Opt.IsClosedWorld = true;
995 }
996 }
997 if (!NoKernelInfoEndLTO) {
999 FPM.addPass(KernelInfoPrinter(this));
1000 PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
1001 }
1002 });
1003
1004 PB.registerRegClassFilterParsingCallback(
1005 [](StringRef FilterName) -> RegAllocFilterFunc {
1006 if (FilterName == "sgpr")
1007 return onlyAllocateSGPRs;
1008 if (FilterName == "vgpr")
1009 return onlyAllocateVGPRs;
1010 if (FilterName == "wwm")
1011 return onlyAllocateWWMRegs;
1012 return nullptr;
1013 });
1014}
1015
1016int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
1017 return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1018 AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
1019 AddrSpace == AMDGPUAS::REGION_ADDRESS)
1020 ? -1
1021 : 0;
1022}
1023
1025 unsigned DestAS) const {
1026 return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
1028}
1029
1031 if (auto *Arg = dyn_cast<Argument>(V);
1032 Arg &&
1033 AMDGPU::isModuleEntryFunctionCC(Arg->getParent()->getCallingConv()) &&
1034 !Arg->hasByRefAttr())
1036
1037 const auto *LD = dyn_cast<LoadInst>(V);
1038 if (!LD) // TODO: Handle invariant load like constant.
1040
1041 // It must be a generic pointer loaded.
1042 assert(V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS);
1043
1044 const auto *Ptr = LD->getPointerOperand();
1045 if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
1047 // For a generic pointer loaded from the constant memory, it could be assumed
1048 // as a global pointer since the constant memory is only populated on the
1049 // host side. As implied by the offload programming model, only global
1050 // pointers could be referenced on the host side.
1052}
1053
1054std::pair<const Value *, unsigned>
1056 if (auto *II = dyn_cast<IntrinsicInst>(V)) {
1057 switch (II->getIntrinsicID()) {
1058 case Intrinsic::amdgcn_is_shared:
1059 return std::pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
1060 case Intrinsic::amdgcn_is_private:
1061 return std::pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
1062 default:
1063 break;
1064 }
1065 return std::pair(nullptr, -1);
1066 }
1067 // Check the global pointer predication based on
1068 // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and
1069 // the order of 'is_shared' and 'is_private' is not significant.
1070 Value *Ptr;
1071 if (match(
1072 const_cast<Value *>(V),
1075 m_Deferred(Ptr))))))
1076 return std::pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
1077
1078 return std::pair(nullptr, -1);
1079}
1080
1081unsigned
1096
1098 Module &M, unsigned NumParts,
1099 function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback) {
1100 // FIXME(?): Would be better to use an already existing Analysis/PassManager,
1101 // but all current users of this API don't have one ready and would need to
1102 // create one anyway. Let's hide the boilerplate for now to keep it simple.
1103
1108
1109 PassBuilder PB(this);
1110 PB.registerModuleAnalyses(MAM);
1111 PB.registerFunctionAnalyses(FAM);
1112 PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
1113
1115 MPM.addPass(AMDGPUSplitModulePass(NumParts, ModuleCallback));
1116 MPM.run(M, MAM);
1117 return true;
1118}
1119
1120//===----------------------------------------------------------------------===//
1121// GCN Target Machine (SI+)
1122//===----------------------------------------------------------------------===//
1123
1125 StringRef CPU, StringRef FS,
1126 const TargetOptions &Options,
1127 std::optional<Reloc::Model> RM,
1128 std::optional<CodeModel::Model> CM,
1129 CodeGenOptLevel OL, bool JIT)
1130 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
1131
1132const TargetSubtargetInfo *
1134 StringRef GPU = getGPUName(F);
1136
1137 SmallString<128> SubtargetKey(GPU);
1138 SubtargetKey.append(FS);
1139
1140 auto &I = SubtargetMap[SubtargetKey];
1141 if (!I) {
1142 // This needs to be done before we create a new subtarget since any
1143 // creation will depend on the TM and the code generation flags on the
1144 // function that reside in TargetOptions.
1146 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
1147 }
1148
1149 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
1150
1151 return I.get();
1152}
1153
1156 return TargetTransformInfo(std::make_unique<GCNTTIImpl>(this, F));
1157}
1158
1161 CodeGenFileType FileType, const CGPassBuilderOption &Opts,
1163 AMDGPUCodeGenPassBuilder CGPB(*this, Opts, PIC);
1164 return CGPB.buildPipeline(MPM, Out, DwoOut, FileType);
1165}
1166
1169 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1170 if (ST.enableSIScheduler())
1172
1173 Attribute SchedStrategyAttr =
1174 C->MF->getFunction().getFnAttribute("amdgpu-sched-strategy");
1175 StringRef SchedStrategy = SchedStrategyAttr.isValid()
1176 ? SchedStrategyAttr.getValueAsString()
1178
1179 if (SchedStrategy == "max-ilp")
1181
1182 if (SchedStrategy == "max-memory-clause")
1184
1185 if (SchedStrategy == "iterative-ilp")
1187
1188 if (SchedStrategy == "iterative-minreg")
1189 return createMinRegScheduler(C);
1190
1191 if (SchedStrategy == "iterative-maxocc")
1193
1195}
1196
1199 ScheduleDAGMI *DAG =
1200 new GCNPostScheduleDAGMILive(C, std::make_unique<PostGenericScheduler>(C),
1201 /*RemoveKillFlags=*/true);
1202 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1204 if (ST.shouldClusterStores())
1207 if ((EnableVOPD.getNumOccurrences() ||
1209 EnableVOPD)
1213 return DAG;
1214}
1215//===----------------------------------------------------------------------===//
1216// AMDGPU Legacy Pass Setup
1217//===----------------------------------------------------------------------===//
1218
1219std::unique_ptr<CSEConfigBase> llvm::AMDGPUPassConfig::getCSEConfig() const {
1220 return getStandardCSEConfigForOpt(TM->getOptLevel());
1221}
1222
1223namespace {
1224
1225class GCNPassConfig final : public AMDGPUPassConfig {
1226public:
1227 GCNPassConfig(TargetMachine &TM, PassManagerBase &PM)
1228 : AMDGPUPassConfig(TM, PM) {
1229 // It is necessary to know the register usage of the entire call graph. We
1230 // allow calls without EnableAMDGPUFunctionCalls if they are marked
1231 // noinline, so this is always required.
1232 setRequiresCodeGenSCCOrder(true);
1233 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
1234 }
1235
1236 GCNTargetMachine &getGCNTargetMachine() const {
1237 return getTM<GCNTargetMachine>();
1238 }
1239
1240 bool addPreISel() override;
1241 void addMachineSSAOptimization() override;
1242 bool addILPOpts() override;
1243 bool addInstSelector() override;
1244 bool addIRTranslator() override;
1245 void addPreLegalizeMachineIR() override;
1246 bool addLegalizeMachineIR() override;
1247 void addPreRegBankSelect() override;
1248 bool addRegBankSelect() override;
1249 void addPreGlobalInstructionSelect() override;
1250 bool addGlobalInstructionSelect() override;
1251 void addPreRegAlloc() override;
1252 void addFastRegAlloc() override;
1253 void addOptimizedRegAlloc() override;
1254
1255 FunctionPass *createSGPRAllocPass(bool Optimized);
1256 FunctionPass *createVGPRAllocPass(bool Optimized);
1257 FunctionPass *createWWMRegAllocPass(bool Optimized);
1258 FunctionPass *createRegAllocPass(bool Optimized) override;
1259
1260 bool addRegAssignAndRewriteFast() override;
1261 bool addRegAssignAndRewriteOptimized() override;
1262
1263 bool addPreRewrite() override;
1264 void addPostRegAlloc() override;
1265 void addPreSched2() override;
1266 void addPreEmitPass() override;
1267 void addPostBBSections() override;
1268};
1269
1270} // end anonymous namespace
1271
1273 : TargetPassConfig(TM, PM) {
1274 // Exceptions and StackMaps are not supported, so these passes will never do
1275 // anything.
1278 // Garbage collection is not supported.
1281}
1282
1289
1294 // ReassociateGEPs exposes more opportunities for SLSR. See
1295 // the example in reassociate-geps-and-slsr.ll.
1297 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
1298 // EarlyCSE can reuse.
1300 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
1302 // NaryReassociate on GEPs creates redundant common expressions, so run
1303 // EarlyCSE after it.
1305}
1306
1309
1310 if (RemoveIncompatibleFunctions && TM.getTargetTriple().isAMDGCN())
1312
1313 // There is no reason to run these.
1317
1319 if (LowerCtorDtor)
1321
1322 if (TM.getTargetTriple().isAMDGCN() &&
1325
1328
1329 // This can be disabled by passing ::Disable here or on the command line
1330 // with --expand-variadics-override=disable.
1332
1333 // Function calls are not supported, so make sure we inline everything.
1336
1337 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
1338 if (TM.getTargetTriple().getArch() == Triple::r600)
1340
1341 // Make enqueued block runtime handles externally visible.
1343
1344 // Lower special LDS accesses.
1347
1348 // Lower LDS accesses to global memory pass if address sanitizer is enabled.
1349 if (EnableSwLowerLDS)
1351
1352 // Runs before PromoteAlloca so the latter can account for function uses
1355 }
1356
1357 // Run atomic optimizer before Atomic Expand
1358 if ((TM.getTargetTriple().isAMDGCN()) &&
1359 (TM.getOptLevel() >= CodeGenOptLevel::Less) &&
1362 }
1363
1365
1366 if (TM.getOptLevel() > CodeGenOptLevel::None) {
1368
1371
1375 AAResults &AAR) {
1376 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
1377 AAR.addAAResult(WrapperPass->getResult());
1378 }));
1379 }
1380
1381 if (TM.getTargetTriple().isAMDGCN()) {
1382 // TODO: May want to move later or split into an early and late one.
1384 }
1385
1386 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
1387 // have expanded.
1388 if (TM.getOptLevel() > CodeGenOptLevel::Less)
1390 }
1391
1393
1394 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1395 // example, GVN can combine
1396 //
1397 // %0 = add %a, %b
1398 // %1 = add %b, %a
1399 //
1400 // and
1401 //
1402 // %0 = shl nsw %a, 2
1403 // %1 = shl %a, 2
1404 //
1405 // but EarlyCSE can do neither of them.
1408}
1409
1411 if (TM->getTargetTriple().isAMDGCN() &&
1412 TM->getOptLevel() > CodeGenOptLevel::None)
1414
1415 if (TM->getTargetTriple().isAMDGCN() && EnableLowerKernelArguments)
1417
1419
1422
1423 if (TM->getTargetTriple().isAMDGCN()) {
1424 // This lowering has been placed after codegenprepare to take advantage of
1425 // address mode matching (which is why it isn't put with the LDS lowerings).
1426 // It could be placed anywhere before uniformity annotations (an analysis
1427 // that it changes by splitting up fat pointers into their components)
1428 // but has been put before switch lowering and CFG flattening so that those
1429 // passes can run on the more optimized control flow this pass creates in
1430 // many cases.
1433 // In accordance with the above FIXME, manually force all the
1434 // function-level passes into a CGSCCPassManager.
1435 addPass(new DummyCGSCCPass());
1436 }
1437
1438 // LowerSwitch pass may introduce unreachable blocks that can
1439 // cause unexpected behavior for subsequent passes. Placing it
1440 // here seems better that these blocks would get cleaned up by
1441 // UnreachableBlockElim inserted next in the pass flow.
1443}
1444
1446 if (TM->getOptLevel() > CodeGenOptLevel::None)
1448 return false;
1449}
1450
1455
1457 // Do nothing. GC is not supported.
1458 return false;
1459}
1460
1461//===----------------------------------------------------------------------===//
1462// GCN Legacy Pass Setup
1463//===----------------------------------------------------------------------===//
1464
1465bool GCNPassConfig::addPreISel() {
1467
1468 if (TM->getOptLevel() > CodeGenOptLevel::None)
1469 addPass(createSinkingPass());
1470
1471 if (TM->getOptLevel() > CodeGenOptLevel::None)
1473
1474 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1475 // regions formed by them.
1477 addPass(createFixIrreduciblePass());
1478 addPass(createUnifyLoopExitsPass());
1479 addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
1480
1483 // TODO: Move this right after structurizeCFG to avoid extra divergence
1484 // analysis. This depends on stopping SIAnnotateControlFlow from making
1485 // control flow modifications.
1487
1488 // SDAG requires LCSSA, GlobalISel does not. Disable LCSSA for -global-isel
1489 // with -new-reg-bank-select and without any of the fallback options.
1491 !isGlobalISelAbortEnabled() || !NewRegBankSelect)
1492 addPass(createLCSSAPass());
1493
1494 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1496
1497 return false;
1498}
1499
1500void GCNPassConfig::addMachineSSAOptimization() {
1502
1503 // We want to fold operands after PeepholeOptimizer has run (or as part of
1504 // it), because it will eliminate extra copies making it easier to fold the
1505 // real source operand. We want to eliminate dead instructions after, so that
1506 // we see fewer uses of the copies. We then need to clean up the dead
1507 // instructions leftover after the operands are folded as well.
1508 //
1509 // XXX - Can we get away without running DeadMachineInstructionElim again?
1510 addPass(&SIFoldOperandsLegacyID);
1511 if (EnableDPPCombine)
1512 addPass(&GCNDPPCombineLegacyID);
1514 if (isPassEnabled(EnableSDWAPeephole)) {
1515 addPass(&SIPeepholeSDWALegacyID);
1516 addPass(&EarlyMachineLICMID);
1517 addPass(&MachineCSELegacyID);
1518 addPass(&SIFoldOperandsLegacyID);
1519 }
1522}
1523
1524bool GCNPassConfig::addILPOpts() {
1526 addPass(&EarlyIfConverterLegacyID);
1527
1529 return false;
1530}
1531
1532bool GCNPassConfig::addInstSelector() {
1534 addPass(&SIFixSGPRCopiesLegacyID);
1536 return false;
1537}
1538
1539bool GCNPassConfig::addIRTranslator() {
1540 addPass(new IRTranslator(getOptLevel()));
1541 return false;
1542}
1543
1544void GCNPassConfig::addPreLegalizeMachineIR() {
1545 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1546 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
1547 addPass(new Localizer());
1548}
1549
1550bool GCNPassConfig::addLegalizeMachineIR() {
1551 addPass(new Legalizer());
1552 return false;
1553}
1554
1555void GCNPassConfig::addPreRegBankSelect() {
1556 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1557 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
1559}
1560
1561bool GCNPassConfig::addRegBankSelect() {
1562 if (NewRegBankSelect) {
1565 } else {
1566 addPass(new RegBankSelect());
1567 }
1568 return false;
1569}
1570
1571void GCNPassConfig::addPreGlobalInstructionSelect() {
1572 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1573 addPass(createAMDGPURegBankCombiner(IsOptNone));
1574}
1575
1576bool GCNPassConfig::addGlobalInstructionSelect() {
1577 addPass(new InstructionSelect(getOptLevel()));
1578 return false;
1579}
1580
1581void GCNPassConfig::addFastRegAlloc() {
1582 // FIXME: We have to disable the verifier here because of PHIElimination +
1583 // TwoAddressInstructions disabling it.
1584
1585 // This must be run immediately after phi elimination and before
1586 // TwoAddressInstructions, otherwise the processing of the tied operand of
1587 // SI_ELSE will introduce a copy of the tied operand source after the else.
1589
1591
1593}
1594
1595void GCNPassConfig::addPreRegAlloc() {
1596 if (getOptLevel() != CodeGenOptLevel::None)
1598}
1599
1600void GCNPassConfig::addOptimizedRegAlloc() {
1601 if (EnableDCEInRA)
1603
1604 // FIXME: when an instruction has a Killed operand, and the instruction is
1605 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
1606 // the register in LiveVariables, this would trigger a failure in verifier,
1607 // we should fix it and enable the verifier.
1608 if (OptVGPRLiveRange)
1610
1611 // This must be run immediately after phi elimination and before
1612 // TwoAddressInstructions, otherwise the processing of the tied operand of
1613 // SI_ELSE will introduce a copy of the tied operand source after the else.
1615
1618
1619 if (isPassEnabled(EnablePreRAOptimizations))
1621
1622 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
1623 // instructions that cause scheduling barriers.
1625
1626 if (OptExecMaskPreRA)
1628
1629 // This is not an essential optimization and it has a noticeable impact on
1630 // compilation time, so we only enable it from O2.
1631 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1633
1635}
1636
1637bool GCNPassConfig::addPreRewrite() {
1639 addPass(&GCNNSAReassignID);
1640
1642 return true;
1643}
1644
1645FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) {
1646 // Initialize the global default.
1647 llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag,
1648 initializeDefaultSGPRRegisterAllocatorOnce);
1649
1650 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
1651 if (Ctor != useDefaultRegisterAllocator)
1652 return Ctor();
1653
1654 if (Optimized)
1655 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
1656
1657 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
1658}
1659
1660FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) {
1661 // Initialize the global default.
1662 llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag,
1663 initializeDefaultVGPRRegisterAllocatorOnce);
1664
1665 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
1666 if (Ctor != useDefaultRegisterAllocator)
1667 return Ctor();
1668
1669 if (Optimized)
1670 return createGreedyVGPRRegisterAllocator();
1671
1672 return createFastVGPRRegisterAllocator();
1673}
1674
1675FunctionPass *GCNPassConfig::createWWMRegAllocPass(bool Optimized) {
1676 // Initialize the global default.
1677 llvm::call_once(InitializeDefaultWWMRegisterAllocatorFlag,
1678 initializeDefaultWWMRegisterAllocatorOnce);
1679
1680 RegisterRegAlloc::FunctionPassCtor Ctor = WWMRegisterRegAlloc::getDefault();
1681 if (Ctor != useDefaultRegisterAllocator)
1682 return Ctor();
1683
1684 if (Optimized)
1685 return createGreedyWWMRegisterAllocator();
1686
1687 return createFastWWMRegisterAllocator();
1688}
1689
1690FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) {
1691 llvm_unreachable("should not be used");
1692}
1693
1695 "-regalloc not supported with amdgcn. Use -sgpr-regalloc, -wwm-regalloc, "
1696 "and -vgpr-regalloc";
1697
1698bool GCNPassConfig::addRegAssignAndRewriteFast() {
1699 if (!usingDefaultRegAlloc())
1701
1702 addPass(&GCNPreRALongBranchRegID);
1703
1704 addPass(createSGPRAllocPass(false));
1705
1706 // Equivalent of PEI for SGPRs.
1707 addPass(&SILowerSGPRSpillsLegacyID);
1708
1709 // To Allocate wwm registers used in whole quad mode operations (for shaders).
1711
1712 // For allocating other wwm register operands.
1713 addPass(createWWMRegAllocPass(false));
1714
1715 addPass(&SILowerWWMCopiesLegacyID);
1717
1718 // For allocating per-thread VGPRs.
1719 addPass(createVGPRAllocPass(false));
1720
1721 return true;
1722}
1723
1724bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1725 if (!usingDefaultRegAlloc())
1727
1728 addPass(&GCNPreRALongBranchRegID);
1729
1730 addPass(createSGPRAllocPass(true));
1731
1732 // Commit allocated register changes. This is mostly necessary because too
1733 // many things rely on the use lists of the physical registers, such as the
1734 // verifier. This is only necessary with allocators which use LiveIntervals,
1735 // since FastRegAlloc does the replacements itself.
1736 addPass(createVirtRegRewriter(false));
1737
1738 // At this point, the sgpr-regalloc has been done and it is good to have the
1739 // stack slot coloring to try to optimize the SGPR spill stack indices before
1740 // attempting the custom SGPR spill lowering.
1741 addPass(&StackSlotColoringID);
1742
1743 // Equivalent of PEI for SGPRs.
1744 addPass(&SILowerSGPRSpillsLegacyID);
1745
1746 // To Allocate wwm registers used in whole quad mode operations (for shaders).
1748
1749 // For allocating other whole wave mode registers.
1750 addPass(createWWMRegAllocPass(true));
1751 addPass(&SILowerWWMCopiesLegacyID);
1752 addPass(createVirtRegRewriter(false));
1754
1755 // For allocating per-thread VGPRs.
1756 addPass(createVGPRAllocPass(true));
1757
1758 addPreRewrite();
1759 addPass(&VirtRegRewriterID);
1760
1762
1763 return true;
1764}
1765
1766void GCNPassConfig::addPostRegAlloc() {
1767 addPass(&SIFixVGPRCopiesID);
1768 if (getOptLevel() > CodeGenOptLevel::None)
1771}
1772
1773void GCNPassConfig::addPreSched2() {
1774 if (TM->getOptLevel() > CodeGenOptLevel::None)
1776 addPass(&SIPostRABundlerLegacyID);
1777}
1778
1779void GCNPassConfig::addPreEmitPass() {
1780 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
1781 addPass(&GCNCreateVOPDID);
1782 addPass(createSIMemoryLegalizerPass());
1783 addPass(createSIInsertWaitcntsPass());
1784
1785 addPass(createSIModeRegisterPass());
1786
1787 if (getOptLevel() > CodeGenOptLevel::None)
1788 addPass(&SIInsertHardClausesID);
1789
1791 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
1793 if (getOptLevel() > CodeGenOptLevel::None)
1794 addPass(&SIPreEmitPeepholeID);
1795 // The hazard recognizer that runs as part of the post-ra scheduler does not
1796 // guarantee to be able handle all hazards correctly. This is because if there
1797 // are multiple scheduling regions in a basic block, the regions are scheduled
1798 // bottom up, so when we begin to schedule a region we don't know what
1799 // instructions were emitted directly before it.
1800 //
1801 // Here we add a stand-alone hazard recognizer pass which can handle all
1802 // cases.
1803 addPass(&PostRAHazardRecognizerID);
1804
1806
1808
1809 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less))
1810 addPass(&AMDGPUInsertDelayAluID);
1811
1812 addPass(&BranchRelaxationPassID);
1813}
1814
1815void GCNPassConfig::addPostBBSections() {
1816 // We run this later to avoid passes like livedebugvalues and BBSections
1817 // having to deal with the apparent multi-entry functions we may generate.
1819}
1820
1822 return new GCNPassConfig(*this, PM);
1823}
1824
1830
1837
1841
1848
1851 SMDiagnostic &Error, SMRange &SourceRange) const {
1852 const yaml::SIMachineFunctionInfo &YamlMFI =
1853 static_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1854 MachineFunction &MF = PFS.MF;
1856 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1857
1858 if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange))
1859 return true;
1860
1861 if (MFI->Occupancy == 0) {
1862 // Fixup the subtarget dependent default value.
1863 MFI->Occupancy = ST.getOccupancyWithWorkGroupSizes(MF).second;
1864 }
1865
1866 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1867 Register TempReg;
1868 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1869 SourceRange = RegName.SourceRange;
1870 return true;
1871 }
1872 RegVal = TempReg;
1873
1874 return false;
1875 };
1876
1877 auto parseOptionalRegister = [&](const yaml::StringValue &RegName,
1878 Register &RegVal) {
1879 return !RegName.Value.empty() && parseRegister(RegName, RegVal);
1880 };
1881
1882 if (parseOptionalRegister(YamlMFI.VGPRForAGPRCopy, MFI->VGPRForAGPRCopy))
1883 return true;
1884
1885 if (parseOptionalRegister(YamlMFI.SGPRForEXECCopy, MFI->SGPRForEXECCopy))
1886 return true;
1887
1888 if (parseOptionalRegister(YamlMFI.LongBranchReservedReg,
1889 MFI->LongBranchReservedReg))
1890 return true;
1891
1892 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1893 // Create a diagnostic for a the register string literal.
1894 const MemoryBuffer &Buffer =
1895 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1896 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1897 RegName.Value.size(), SourceMgr::DK_Error,
1898 "incorrect register class for field", RegName.Value,
1899 {}, {});
1900 SourceRange = RegName.SourceRange;
1901 return true;
1902 };
1903
1904 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1905 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1906 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1907 return true;
1908
1909 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1910 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1911 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1912 }
1913
1914 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1915 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1916 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1917 }
1918
1919 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1920 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1921 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1922 }
1923
1924 for (const auto &YamlReg : YamlMFI.WWMReservedRegs) {
1925 Register ParsedReg;
1926 if (parseRegister(YamlReg, ParsedReg))
1927 return true;
1928
1929 MFI->reserveWWMRegister(ParsedReg);
1930 }
1931
1932 for (const auto &[_, Info] : PFS.VRegInfosNamed) {
1933 MFI->setFlag(Info->VReg, Info->Flags);
1934 }
1935 for (const auto &[_, Info] : PFS.VRegInfos) {
1936 MFI->setFlag(Info->VReg, Info->Flags);
1937 }
1938
1939 for (const auto &YamlRegStr : YamlMFI.SpillPhysVGPRS) {
1940 Register ParsedReg;
1941 if (parseRegister(YamlRegStr, ParsedReg))
1942 return true;
1943 MFI->SpillPhysVGPRs.push_back(ParsedReg);
1944 }
1945
1946 auto parseAndCheckArgument = [&](const std::optional<yaml::SIArgument> &A,
1947 const TargetRegisterClass &RC,
1948 ArgDescriptor &Arg, unsigned UserSGPRs,
1949 unsigned SystemSGPRs) {
1950 // Skip parsing if it's not present.
1951 if (!A)
1952 return false;
1953
1954 if (A->IsRegister) {
1955 Register Reg;
1956 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1957 SourceRange = A->RegisterName.SourceRange;
1958 return true;
1959 }
1960 if (!RC.contains(Reg))
1961 return diagnoseRegisterClass(A->RegisterName);
1963 } else
1964 Arg = ArgDescriptor::createStack(A->StackOffset);
1965 // Check and apply the optional mask.
1966 if (A->Mask)
1967 Arg = ArgDescriptor::createArg(Arg, *A->Mask);
1968
1969 MFI->NumUserSGPRs += UserSGPRs;
1970 MFI->NumSystemSGPRs += SystemSGPRs;
1971 return false;
1972 };
1973
1974 if (YamlMFI.ArgInfo &&
1975 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1976 AMDGPU::SGPR_128RegClass,
1977 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1978 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1979 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1980 2, 0) ||
1981 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1982 MFI->ArgInfo.QueuePtr, 2, 0) ||
1983 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1984 AMDGPU::SReg_64RegClass,
1985 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1986 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1987 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1988 2, 0) ||
1989 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1990 AMDGPU::SReg_64RegClass,
1991 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1992 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1993 AMDGPU::SGPR_32RegClass,
1994 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1995 parseAndCheckArgument(YamlMFI.ArgInfo->LDSKernelId,
1996 AMDGPU::SGPR_32RegClass,
1997 MFI->ArgInfo.LDSKernelId, 0, 1) ||
1998 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1999 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
2000 0, 1) ||
2001 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
2002 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
2003 0, 1) ||
2004 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
2005 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
2006 0, 1) ||
2007 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
2008 AMDGPU::SGPR_32RegClass,
2009 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
2010 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
2011 AMDGPU::SGPR_32RegClass,
2012 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
2013 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
2014 AMDGPU::SReg_64RegClass,
2015 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
2016 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
2017 AMDGPU::SReg_64RegClass,
2018 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
2019 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
2020 AMDGPU::VGPR_32RegClass,
2021 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
2022 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
2023 AMDGPU::VGPR_32RegClass,
2024 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
2025 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
2026 AMDGPU::VGPR_32RegClass,
2027 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
2028 return true;
2029
2030 // Parse FirstKernArgPreloadReg separately, since it's a Register,
2031 // not ArgDescriptor.
2032 if (YamlMFI.ArgInfo && YamlMFI.ArgInfo->FirstKernArgPreloadReg) {
2033 const yaml::SIArgument &A = *YamlMFI.ArgInfo->FirstKernArgPreloadReg;
2034
2035 if (!A.IsRegister) {
2036 // For stack arguments, we don't have RegisterName.SourceRange,
2037 // but we should have some location info from the YAML parser
2038 const MemoryBuffer &Buffer =
2039 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
2040 // Create a minimal valid source range
2042 SMRange Range(Loc, Loc);
2043
2045 *PFS.SM, Loc, Buffer.getBufferIdentifier(), 1, 0, SourceMgr::DK_Error,
2046 "firstKernArgPreloadReg must be a register, not a stack location", "",
2047 {}, {});
2048
2049 SourceRange = Range;
2050 return true;
2051 }
2052
2053 Register Reg;
2054 if (parseNamedRegisterReference(PFS, Reg, A.RegisterName.Value, Error)) {
2055 SourceRange = A.RegisterName.SourceRange;
2056 return true;
2057 }
2058
2059 if (!AMDGPU::SGPR_32RegClass.contains(Reg))
2060 return diagnoseRegisterClass(A.RegisterName);
2061
2062 MFI->ArgInfo.FirstKernArgPreloadReg = Reg;
2063 MFI->NumUserSGPRs += YamlMFI.NumKernargPreloadSGPRs;
2064 }
2065
2066 if (ST.hasIEEEMode())
2067 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
2068 if (ST.hasDX10ClampMode())
2069 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
2070
2071 // FIXME: Move proper support for denormal-fp-math into base MachineFunction
2072 MFI->Mode.FP32Denormals.Input = YamlMFI.Mode.FP32InputDenormals
2075 MFI->Mode.FP32Denormals.Output = YamlMFI.Mode.FP32OutputDenormals
2078
2085
2086 if (YamlMFI.HasInitWholeWave)
2087 MFI->setInitWholeWave();
2088
2089 return false;
2090}
2091
2092//===----------------------------------------------------------------------===//
2093// AMDGPU CodeGen Pass Builder interface.
2094//===----------------------------------------------------------------------===//
2095
2096AMDGPUCodeGenPassBuilder::AMDGPUCodeGenPassBuilder(
2097 GCNTargetMachine &TM, const CGPassBuilderOption &Opts,
2099 : CodeGenPassBuilder(TM, Opts, PIC) {
2100 Opt.MISchedPostRA = true;
2101 Opt.RequiresCodeGenSCCOrder = true;
2102 // Exceptions and StackMaps are not supported, so these passes will never do
2103 // anything.
2104 // Garbage collection is not supported.
2105 disablePass<StackMapLivenessPass, FuncletLayoutPass,
2107}
2108
2109void AMDGPUCodeGenPassBuilder::addIRPasses(AddIRPass &addPass) const {
2110 if (RemoveIncompatibleFunctions && TM.getTargetTriple().isAMDGCN())
2112
2114 if (LowerCtorDtor)
2115 addPass(AMDGPUCtorDtorLoweringPass());
2116
2117 if (isPassEnabled(EnableImageIntrinsicOptimizer))
2119
2122 // This can be disabled by passing ::Disable here or on the command line
2123 // with --expand-variadics-override=disable.
2125
2126 addPass(AMDGPUAlwaysInlinePass());
2127 addPass(AlwaysInlinerPass());
2128
2130
2132 addPass(AMDGPULowerExecSyncPass());
2133
2134 if (EnableSwLowerLDS)
2135 addPass(AMDGPUSwLowerLDSPass(TM));
2136
2137 // Runs before PromoteAlloca so the latter can account for function uses
2139 addPass(AMDGPULowerModuleLDSPass(TM));
2140
2141 // Run atomic optimizer before Atomic Expand
2142 if (TM.getOptLevel() >= CodeGenOptLevel::Less &&
2145
2146 addPass(AtomicExpandPass(TM));
2147
2148 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2149 addPass(AMDGPUPromoteAllocaPass(TM));
2150 if (isPassEnabled(EnableScalarIRPasses))
2151 addStraightLineScalarOptimizationPasses(addPass);
2152
2153 // TODO: Handle EnableAMDGPUAliasAnalysis
2154
2155 // TODO: May want to move later or split into an early and late one.
2156 addPass(AMDGPUCodeGenPreparePass(TM));
2157
2158 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
2159 // have expanded.
2160 if (TM.getOptLevel() > CodeGenOptLevel::Less) {
2162 /*UseMemorySSA=*/true));
2163 }
2164 }
2165
2166 Base::addIRPasses(addPass);
2167
2168 // EarlyCSE is not always strong enough to clean up what LSR produces. For
2169 // example, GVN can combine
2170 //
2171 // %0 = add %a, %b
2172 // %1 = add %b, %a
2173 //
2174 // and
2175 //
2176 // %0 = shl nsw %a, 2
2177 // %1 = shl %a, 2
2178 //
2179 // but EarlyCSE can do neither of them.
2180 if (isPassEnabled(EnableScalarIRPasses))
2181 addEarlyCSEOrGVNPass(addPass);
2182}
2183
2184void AMDGPUCodeGenPassBuilder::addCodeGenPrepare(AddIRPass &addPass) const {
2185 if (TM.getOptLevel() > CodeGenOptLevel::None)
2187
2189 addPass(AMDGPULowerKernelArgumentsPass(TM));
2190
2191 Base::addCodeGenPrepare(addPass);
2192
2193 if (isPassEnabled(EnableLoadStoreVectorizer))
2194 addPass(LoadStoreVectorizerPass());
2195
2196 // This lowering has been placed after codegenprepare to take advantage of
2197 // address mode matching (which is why it isn't put with the LDS lowerings).
2198 // It could be placed anywhere before uniformity annotations (an analysis
2199 // that it changes by splitting up fat pointers into their components)
2200 // but has been put before switch lowering and CFG flattening so that those
2201 // passes can run on the more optimized control flow this pass creates in
2202 // many cases.
2204 addPass.requireCGSCCOrder();
2205
2206 addPass(AMDGPULowerIntrinsicsPass(TM));
2207
2208 // LowerSwitch pass may introduce unreachable blocks that can cause unexpected
2209 // behavior for subsequent passes. Placing it here seems better that these
2210 // blocks would get cleaned up by UnreachableBlockElim inserted next in the
2211 // pass flow.
2212 addPass(LowerSwitchPass());
2213}
2214
2215void AMDGPUCodeGenPassBuilder::addPreISel(AddIRPass &addPass) const {
2216
2217 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2218 addPass(FlattenCFGPass());
2219 addPass(SinkingPass());
2220 addPass(AMDGPULateCodeGenPreparePass(TM));
2221 }
2222
2223 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
2224 // regions formed by them.
2225
2227 addPass(FixIrreduciblePass());
2228 addPass(UnifyLoopExitsPass());
2229 addPass(StructurizeCFGPass(/*SkipUniformRegions=*/false));
2230
2232
2233 addPass(SIAnnotateControlFlowPass(TM));
2234
2235 // TODO: Move this right after structurizeCFG to avoid extra divergence
2236 // analysis. This depends on stopping SIAnnotateControlFlow from making
2237 // control flow modifications.
2239
2241 !isGlobalISelAbortEnabled() || !NewRegBankSelect)
2242 addPass(LCSSAPass());
2243
2244 if (TM.getOptLevel() > CodeGenOptLevel::Less)
2245 addPass(AMDGPUPerfHintAnalysisPass(TM));
2246
2247 // FIXME: Why isn't this queried as required from AMDGPUISelDAGToDAG, and why
2248 // isn't this in addInstSelector?
2250 /*Force=*/true);
2251}
2252
2253void AMDGPUCodeGenPassBuilder::addILPOpts(AddMachinePass &addPass) const {
2255 addPass(EarlyIfConverterPass());
2256
2257 Base::addILPOpts(addPass);
2258}
2259
2260void AMDGPUCodeGenPassBuilder::addAsmPrinter(AddMachinePass &addPass,
2261 CreateMCStreamer) const {
2262 // TODO: Add AsmPrinter.
2263}
2264
2265Error AMDGPUCodeGenPassBuilder::addInstSelector(AddMachinePass &addPass) const {
2266 addPass(AMDGPUISelDAGToDAGPass(TM));
2267 addPass(SIFixSGPRCopiesPass());
2268 addPass(SILowerI1CopiesPass());
2269 return Error::success();
2270}
2271
2272void AMDGPUCodeGenPassBuilder::addPreRewrite(AddMachinePass &addPass) const {
2273 if (EnableRegReassign) {
2274 addPass(GCNNSAReassignPass());
2275 }
2276}
2277
2278void AMDGPUCodeGenPassBuilder::addMachineSSAOptimization(
2279 AddMachinePass &addPass) const {
2280 Base::addMachineSSAOptimization(addPass);
2281
2282 addPass(SIFoldOperandsPass());
2283 if (EnableDPPCombine) {
2284 addPass(GCNDPPCombinePass());
2285 }
2286 addPass(SILoadStoreOptimizerPass());
2287 if (isPassEnabled(EnableSDWAPeephole)) {
2288 addPass(SIPeepholeSDWAPass());
2289 addPass(EarlyMachineLICMPass());
2290 addPass(MachineCSEPass());
2291 addPass(SIFoldOperandsPass());
2292 }
2294 addPass(SIShrinkInstructionsPass());
2295}
2296
2297void AMDGPUCodeGenPassBuilder::addOptimizedRegAlloc(
2298 AddMachinePass &addPass) const {
2299 if (EnableDCEInRA)
2300 insertPass<DetectDeadLanesPass>(DeadMachineInstructionElimPass());
2301
2302 // FIXME: when an instruction has a Killed operand, and the instruction is
2303 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
2304 // the register in LiveVariables, this would trigger a failure in verifier,
2305 // we should fix it and enable the verifier.
2306 if (OptVGPRLiveRange)
2307 insertPass<RequireAnalysisPass<LiveVariablesAnalysis, MachineFunction>>(
2309
2310 // This must be run immediately after phi elimination and before
2311 // TwoAddressInstructions, otherwise the processing of the tied operand of
2312 // SI_ELSE will introduce a copy of the tied operand source after the else.
2313 insertPass<PHIEliminationPass>(SILowerControlFlowPass());
2314
2316 insertPass<RenameIndependentSubregsPass>(GCNRewritePartialRegUsesPass());
2317
2318 if (isPassEnabled(EnablePreRAOptimizations))
2319 insertPass<MachineSchedulerPass>(GCNPreRAOptimizationsPass());
2320
2321 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
2322 // instructions that cause scheduling barriers.
2323 insertPass<MachineSchedulerPass>(SIWholeQuadModePass());
2324
2325 if (OptExecMaskPreRA)
2326 insertPass<MachineSchedulerPass>(SIOptimizeExecMaskingPreRAPass());
2327
2328 // This is not an essential optimization and it has a noticeable impact on
2329 // compilation time, so we only enable it from O2.
2330 if (TM.getOptLevel() > CodeGenOptLevel::Less)
2331 insertPass<MachineSchedulerPass>(SIFormMemoryClausesPass());
2332
2333 Base::addOptimizedRegAlloc(addPass);
2334}
2335
2336void AMDGPUCodeGenPassBuilder::addPreRegAlloc(AddMachinePass &addPass) const {
2337 if (getOptLevel() != CodeGenOptLevel::None)
2338 addPass(AMDGPUPrepareAGPRAllocPass());
2339}
2340
2341Error AMDGPUCodeGenPassBuilder::addRegAssignmentOptimized(
2342 AddMachinePass &addPass) const {
2343 // TODO: Check --regalloc-npm option
2344
2345 addPass(GCNPreRALongBranchRegPass());
2346
2347 addPass(RAGreedyPass({onlyAllocateSGPRs, "sgpr"}));
2348
2349 // Commit allocated register changes. This is mostly necessary because too
2350 // many things rely on the use lists of the physical registers, such as the
2351 // verifier. This is only necessary with allocators which use LiveIntervals,
2352 // since FastRegAlloc does the replacements itself.
2353 addPass(VirtRegRewriterPass(false));
2354
2355 // At this point, the sgpr-regalloc has been done and it is good to have the
2356 // stack slot coloring to try to optimize the SGPR spill stack indices before
2357 // attempting the custom SGPR spill lowering.
2358 addPass(StackSlotColoringPass());
2359
2360 // Equivalent of PEI for SGPRs.
2361 addPass(SILowerSGPRSpillsPass());
2362
2363 // To Allocate wwm registers used in whole quad mode operations (for shaders).
2364 addPass(SIPreAllocateWWMRegsPass());
2365
2366 // For allocating other wwm register operands.
2367 addPass(RAGreedyPass({onlyAllocateWWMRegs, "wwm"}));
2368 addPass(SILowerWWMCopiesPass());
2369 addPass(VirtRegRewriterPass(false));
2370 addPass(AMDGPUReserveWWMRegsPass());
2371
2372 // For allocating per-thread VGPRs.
2373 addPass(RAGreedyPass({onlyAllocateVGPRs, "vgpr"}));
2374
2375
2376 addPreRewrite(addPass);
2377 addPass(VirtRegRewriterPass(true));
2378
2380 return Error::success();
2381}
2382
2383void AMDGPUCodeGenPassBuilder::addPostRegAlloc(AddMachinePass &addPass) const {
2384 addPass(SIFixVGPRCopiesPass());
2385 if (TM.getOptLevel() > CodeGenOptLevel::None)
2386 addPass(SIOptimizeExecMaskingPass());
2387 Base::addPostRegAlloc(addPass);
2388}
2389
2390void AMDGPUCodeGenPassBuilder::addPreSched2(AddMachinePass &addPass) const {
2391 if (TM.getOptLevel() > CodeGenOptLevel::None)
2392 addPass(SIShrinkInstructionsPass());
2393 addPass(SIPostRABundlerPass());
2394}
2395
2396void AMDGPUCodeGenPassBuilder::addPreEmitPass(AddMachinePass &addPass) const {
2397 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less)) {
2398 addPass(GCNCreateVOPDPass());
2399 }
2400
2401 addPass(SIMemoryLegalizerPass());
2402 addPass(SIInsertWaitcntsPass());
2403
2404 // TODO: addPass(SIModeRegisterPass());
2405
2406 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2407 // TODO: addPass(SIInsertHardClausesPass());
2408 }
2409
2410 addPass(SILateBranchLoweringPass());
2411
2412 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
2413 addPass(AMDGPUSetWavePriorityPass());
2414
2415 if (TM.getOptLevel() > CodeGenOptLevel::None)
2416 addPass(SIPreEmitPeepholePass());
2417
2418 // The hazard recognizer that runs as part of the post-ra scheduler does not
2419 // guarantee to be able handle all hazards correctly. This is because if there
2420 // are multiple scheduling regions in a basic block, the regions are scheduled
2421 // bottom up, so when we begin to schedule a region we don't know what
2422 // instructions were emitted directly before it.
2423 //
2424 // Here we add a stand-alone hazard recognizer pass which can handle all
2425 // cases.
2426 addPass(PostRAHazardRecognizerPass());
2427 addPass(AMDGPUWaitSGPRHazardsPass());
2428 addPass(AMDGPULowerVGPREncodingPass());
2429
2430 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less)) {
2431 addPass(AMDGPUInsertDelayAluPass());
2432 }
2433
2434 addPass(BranchRelaxationPass());
2435}
2436
2437bool AMDGPUCodeGenPassBuilder::isPassEnabled(const cl::opt<bool> &Opt,
2438 CodeGenOptLevel Level) const {
2439 if (Opt.getNumOccurrences())
2440 return Opt;
2441 if (TM.getOptLevel() < Level)
2442 return false;
2443 return Opt;
2444}
2445
2446void AMDGPUCodeGenPassBuilder::addEarlyCSEOrGVNPass(AddIRPass &addPass) const {
2447 if (TM.getOptLevel() == CodeGenOptLevel::Aggressive)
2448 addPass(GVNPass());
2449 else
2450 addPass(EarlyCSEPass());
2451}
2452
2453void AMDGPUCodeGenPassBuilder::addStraightLineScalarOptimizationPasses(
2454 AddIRPass &addPass) const {
2456 addPass(LoopDataPrefetchPass());
2457
2459
2460 // ReassociateGEPs exposes more opportunities for SLSR. See
2461 // the example in reassociate-geps-and-slsr.ll.
2463
2464 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
2465 // EarlyCSE can reuse.
2466 addEarlyCSEOrGVNPass(addPass);
2467
2468 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
2469 addPass(NaryReassociatePass());
2470
2471 // NaryReassociate on GEPs creates redundant common expressions, so run
2472 // EarlyCSE after it.
2473 addPass(EarlyCSEPass());
2474}
unsigned const MachineRegisterInfo * MRI
aarch64 falkor hwpf fix Falkor HW Prefetch Fix Late Phase
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))
static std::unique_ptr< TargetLoweringObjectFile > createTLOF(const Triple &TT)
This is the AMGPU address space based alias analysis pass.
Defines an instruction selector for the AMDGPU target.
Analyzes if a function potentially memory bound and if a kernel kernel may benefit from limiting numb...
Analyzes how many registers and other resources are used by functions.
static cl::opt< bool > EnableDCEInRA("amdgpu-dce-in-ra", cl::init(true), cl::Hidden, cl::desc("Enable machine DCE inside regalloc"))
static cl::opt< bool, true > EnableLowerModuleLDS("amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"), cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxMemoryClauseSchedRegistry("gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause", createGCNMaxMemoryClauseMachineScheduler)
static Reloc::Model getEffectiveRelocModel()
static cl::opt< bool > EnableUniformIntrinsicCombine("amdgpu-enable-uniform-intrinsic-combine", cl::desc("Enable/Disable the Uniform Intrinsic Combine Pass"), cl::init(true), cl::Hidden)
static MachineSchedRegistry SISchedRegistry("si", "Run SI's custom scheduler", createSIMachineScheduler)
static ScheduleDAGInstrs * createIterativeILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EarlyInlineAll("amdgpu-early-inline-all", cl::desc("Inline all functions early"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableSwLowerLDS("amdgpu-enable-sw-lower-lds", cl::desc("Enable lowering of lds to global memory pass " "and asan instrument resulting IR."), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLowerKernelArguments("amdgpu-ir-lower-kernel-arguments", cl::desc("Lower kernel argument loads in IR pass"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSDWAPeephole("amdgpu-sdwa-peephole", cl::desc("Enable SDWA peepholer"), cl::init(true))
static MachineSchedRegistry GCNMinRegSchedRegistry("gcn-iterative-minreg", "Run GCN iterative scheduler for minimal register usage (experimental)", createMinRegScheduler)
static cl::opt< bool > EnableImageIntrinsicOptimizer("amdgpu-enable-image-intrinsic-optimizer", cl::desc("Enable image intrinsic optimizer pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > HasClosedWorldAssumption("amdgpu-link-time-closed-world", cl::desc("Whether has closed-world assumption at link time"), cl::init(false), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxMemoryClauseMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSIModeRegisterPass("amdgpu-mode-register", cl::desc("Enable mode register pass"), cl::init(true), cl::Hidden)
static cl::opt< std::string > AMDGPUSchedStrategy("amdgpu-sched-strategy", cl::desc("Select custom AMDGPU scheduling strategy."), cl::Hidden, cl::init(""))
static cl::opt< bool > EnableDPPCombine("amdgpu-dpp-combine", cl::desc("Enable DPP combiner"), cl::init(true))
static MachineSchedRegistry IterativeGCNMaxOccupancySchedRegistry("gcn-iterative-max-occupancy-experimental", "Run GCN scheduler to maximize occupancy (experimental)", createIterativeGCNMaxOccupancyMachineScheduler)
static cl::opt< bool > EnableSetWavePriority("amdgpu-set-wave-priority", cl::desc("Adjust wave priority"), cl::init(false), cl::Hidden)
static cl::opt< bool > LowerCtorDtor("amdgpu-lower-global-ctor-dtor", cl::desc("Lower GPU ctor / dtors to globals on the device."), cl::init(true), cl::Hidden)
static cl::opt< bool > OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, cl::desc("Run pre-RA exec mask optimizations"), cl::init(true))
static cl::opt< bool > EnablePromoteKernelArguments("amdgpu-enable-promote-kernel-arguments", cl::desc("Enable promotion of flat kernel pointer arguments to global"), cl::Hidden, cl::init(true))
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget()
static cl::opt< bool > EnableRewritePartialRegUses("amdgpu-enable-rewrite-partial-reg-uses", cl::desc("Enable rewrite partial reg uses pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLibCallSimplify("amdgpu-simplify-libcall", cl::desc("Enable amdgpu library simplifications"), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp", createGCNMaxILPMachineScheduler)
static cl::opt< bool > InternalizeSymbols("amdgpu-internalize-symbols", cl::desc("Enable elimination of non-kernel functions and unused globals"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableAMDGPUAttributor("amdgpu-attributor-enable", cl::desc("Enable AMDGPUAttributorPass"), cl::init(true), cl::Hidden)
static LLVM_READNONE StringRef getGPUOrDefault(const Triple &TT, StringRef GPU)
Expected< AMDGPUAttributorOptions > parseAMDGPUAttributorPassOptions(StringRef Params)
static cl::opt< bool > EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, cl::desc("Enable AMDGPU Alias Analysis"), cl::init(true))
static Expected< ScanOptions > parseAMDGPUAtomicOptimizerStrategy(StringRef Params)
static ScheduleDAGInstrs * createMinRegScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableHipStdPar("amdgpu-enable-hipstdpar", cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableInsertDelayAlu("amdgpu-enable-delay-alu", cl::desc("Enable s_delay_alu insertion"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLoadStoreVectorizer("amdgpu-load-store-vectorizer", cl::desc("Enable load store vectorizer"), cl::init(true), cl::Hidden)
static bool mustPreserveGV(const GlobalValue &GV)
Predicate for Internalize pass.
static cl::opt< bool > EnableLoopPrefetch("amdgpu-loop-prefetch", cl::desc("Enable loop data prefetch on AMDGPU"), cl::Hidden, cl::init(false))
static cl::opt< bool > NewRegBankSelect("new-reg-bank-select", cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of " "regbankselect"), cl::init(false), cl::Hidden)
static cl::opt< bool > RemoveIncompatibleFunctions("amdgpu-enable-remove-incompatible-functions", cl::Hidden, cl::desc("Enable removal of functions when they" "use features not supported by the target GPU"), cl::init(true))
static cl::opt< bool > EnableScalarIRPasses("amdgpu-scalar-ir-passes", cl::desc("Enable scalar IR passes"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableRegReassign("amdgpu-reassign-regs", cl::desc("Enable register reassign optimizations on gfx10+"), cl::init(true), cl::Hidden)
static cl::opt< bool > OptVGPRLiveRange("amdgpu-opt-vgpr-liverange", cl::desc("Enable VGPR liverange optimizations for if-else structure"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createSIMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnablePreRAOptimizations("amdgpu-enable-pre-ra-optimizations", cl::desc("Enable Pre-RA optimizations pass"), cl::init(true), cl::Hidden)
static cl::opt< ScanOptions > AMDGPUAtomicOptimizerStrategy("amdgpu-atomic-optimizer-strategy", cl::desc("Select DPP or Iterative strategy for scan"), cl::init(ScanOptions::Iterative), cl::values(clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"), clEnumValN(ScanOptions::Iterative, "Iterative", "Use Iterative approach for scan"), clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")))
static cl::opt< bool > EnableVOPD("amdgpu-enable-vopd", cl::desc("Enable VOPD, dual issue of VALU in wave32"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(false))
static ScheduleDAGInstrs * createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLowerExecSync("amdgpu-enable-lower-exec-sync", cl::desc("Enable lowering of execution synchronization."), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNILPSchedRegistry("gcn-iterative-ilp", "Run GCN iterative scheduler for ILP scheduling (experimental)", createIterativeILPMachineScheduler)
static cl::opt< bool > ScalarizeGlobal("amdgpu-scalarize-global-loads", cl::desc("Enable global load scalarization"), cl::init(true), cl::Hidden)
static const char RegAllocOptNotSupportedMessage[]
static MachineSchedRegistry GCNMaxOccupancySchedRegistry("gcn-max-occupancy", "Run GCN scheduler to maximize occupancy", createGCNMaxOccupancyMachineScheduler)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file declares the AMDGPU-specific subclass of TargetLoweringObjectFile.
This file a TargetTransformInfoImplBase conforming object specific to the AMDGPU target machine.
Provides passes to inlining "always_inline" functions.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This header provides classes for managing passes over SCCs of the call graph.
Provides analysis for continuously CSEing during GISel passes.
Interfaces for producing common pass manager configurations.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_READNONE
Definition Compiler.h:315
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
DXIL Legalizer
This file provides the interface for a simple, fast CSE pass.
This file defines the class GCNIterativeScheduler, which uses an iterative approach to find a best sc...
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
#define _
AcceleratorCodeSelection - Identify all functions reachable from a kernel, removing those that are un...
This file declares the IRTranslator pass.
This header defines various interfaces for pass management in LLVM.
#define RegName(no)
This file provides the interface for LLVM's Loop Data Prefetching Pass.
This header provides classes for managing a pipeline of passes over loops in LLVM IR.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
CGSCCAnalysisManager CGAM
LoopAnalysisManager LAM
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
PassInstrumentationCallbacks PIC
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
static bool isLTOPreLink(ThinOrFullLTOPhase Phase)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file describes the interface of the MachineFunctionPass responsible for assigning the generic vi...
const GCNTargetMachine & getTM(const GCNSubtarget *STI)
SI Machine Scheduler interface.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static FunctionPass * useDefaultRegisterAllocator()
-regalloc=... command line option.
static cl::opt< cl::boolOrDefault > EnableGlobalISelOption("global-isel", cl::Hidden, cl::desc("Enable the \"global\" instruction selector"))
Target-Independent Code Generator Pass Configuration Options pass.
LLVM IR instance of the generic uniformity analysis.
static std::unique_ptr< TargetLoweringObjectFile > createTLOF()
A manager for alias analyses.
void registerFunctionAnalysis()
Register a specific AA result.
void addAAResult(AAResultT &AAResult)
Register a specific AA result.
Legacy wrapper pass to provide the AMDGPUAAResult object.
Analysis pass providing a never-invalidated alias analysis result.
Lower llvm.global_ctors and llvm.global_dtors to special kernels.
AMDGPUTargetMachine & getAMDGPUTargetMachine() const
std::unique_ptr< CSEConfigBase > getCSEConfig() const override
Returns the CSEConfig object to use for the current optimization level.
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
bool addPreISel() override
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
bool addInstSelector() override
addInstSelector - This method should install an instruction selector pass, which converts from LLVM c...
bool addGCPasses() override
addGCPasses - Add late codegen passes that analyze code for garbage collection.
AMDGPUPassConfig(TargetMachine &TM, PassManagerBase &PM)
void addIRPasses() override
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
void addCodeGenPrepare() override
Add pass to prepare the LLVM IR for code generation.
Splits the module M into N linkable partitions.
std::unique_ptr< TargetLoweringObjectFile > TLOF
static int64_t getNullPointerValue(unsigned AddrSpace)
Get the integer value of a null pointer in the given address space.
unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const override
getAddressSpaceForPseudoSourceKind - Given the kind of memory (e.g.
const TargetSubtargetInfo * getSubtargetImpl() const
void registerDefaultAliasAnalyses(AAManager &) override
Allow the target to register alias analyses with the AAManager for use with the new pass manager.
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
If the specified predicate checks whether a generic pointer falls within a specified address space,...
StringRef getFeatureString(const Function &F) const
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL)
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
void registerPassBuilderCallbacks(PassBuilder &PB) override
Allow the target to modify the pass pipeline.
StringRef getGPUName(const Function &F) const
unsigned getAssumedAddrSpace(const Value *V) const override
If the specified generic pointer could be assumed as a pointer to a specific address space,...
bool splitModule(Module &M, unsigned NumParts, function_ref< void(std::unique_ptr< Module > MPart)> ModuleCallback) override
Entry point for module splitting.
Inlines functions marked as "always_inline".
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
This class provides access to building LLVM's passes.
CodeGenTargetMachineImpl(const Target &T, StringRef DataLayoutString, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOptLevel OL)
LLVM_ABI void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
This pass is required by interprocedural register allocation.
Lightweight error class with error context and mandatory checking.
Definition Error.h:159
static ErrorSuccess success()
Create a success value.
Definition Error.h:336
Tagged union holding either a T or a Error.
Definition Error.h:485
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
const SIRegisterInfo * getRegisterInfo() const override
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
ScheduleDAGInstrs * createPostMachineScheduler(MachineSchedContext *C) const override
Similar to createMachineScheduler but used when postRA machine scheduling is enabled.
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
void registerMachineRegisterInfoCallback(MachineFunction &MF) const override
bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override
Parse out the target's MachineFunctionInfo from the YAML reprsentation.
yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override
Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.
Error buildCodeGenPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC) override
yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override
Allocate and return a default initialized instance of the YAML representation for the MachineFunction...
TargetPassConfig * createPassConfig(PassManagerBase &PM) override
Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...
GCNTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
The core GVN pass object.
Definition GVN.h:128
Pass to remove unused function declarations.
Definition GlobalDCE.h:38
This pass is responsible for selecting generic machine instructions to target-specific instructions.
A pass that internalizes all functions and variables other than those that must be preserved accordin...
Definition Internalize.h:37
Converts loops into loop-closed SSA form.
Definition LCSSA.h:38
Performs Loop Invariant Code Motion Pass.
Definition LICM.h:66
This pass implements the localization mechanism described at the top of this file.
Definition Localizer.h:43
An optimization pass inserting data prefetches in loops.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void addDelegate(Delegate *delegate)
MachineSchedRegistry provides a selection of available machine instruction schedulers.
This interface provides simple read-only access to a block of memory, and provides simple methods for...
virtual StringRef getBufferIdentifier() const
Return an identifier for this buffer, typically the filename it was read from.
const char * getBufferStart() const
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
static LLVM_ABI const OptimizationLevel O0
Disable as many optimizations as possible.
static LLVM_ABI const OptimizationLevel O1
Optimize quickly without destroying debuggability.
This class provides access to building LLVM's passes.
This class manages callbacks registration, as well as provides a way for PassInstrumentation to pass ...
LLVM_ATTRIBUTE_MINSIZE std::enable_if_t<!std::is_same_v< PassT, PassManager > > addPass(PassT &&Pass)
PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs)
Run all of the passes in this manager over the given unit of IR.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
This pass implements the reg bank selector pass used in the GlobalISel pipeline.
RegisterPassParser class - Handle the addition of new machine passes.
RegisterRegAllocBase class - Track the registration of register allocators.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange)
void setFlag(Register Reg, uint8_t Flag)
bool checkFlag(Register Reg, uint8_t Flag) const
Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...
Definition SourceMgr.h:297
Represents a location in source code.
Definition SMLoc.h:22
static SMLoc getFromPointer(const char *Ptr)
Definition SMLoc.h:35
Represents a range in source code.
Definition SMLoc.h:47
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
const TargetInstrInfo * TII
Target instruction information.
const TargetRegisterInfo * TRI
Target processor register info.
Move instructions into successor blocks when possible.
Definition Sink.h:24
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition SmallString.h:68
unsigned getMainFileID() const
Definition SourceMgr.h:148
const MemoryBuffer * getMemoryBuffer(unsigned i) const
Definition SourceMgr.h:141
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:702
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition StringRef.h:637
A switch()-like statement whose cases are string literals.
StringSwitch & Cases(std::initializer_list< StringLiteral > CaseStrings, T Value)
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Triple TargetTriple
Triple string, CPU name, and target feature strings the TargetMachine instance is created with.
const Triple & getTargetTriple() const
const MCSubtargetInfo * getMCSubtargetInfo() const
StringRef getTargetFeatureString() const
StringRef getTargetCPU() const
std::unique_ptr< const MCSubtargetInfo > STI
TargetOptions Options
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
std::unique_ptr< const MCRegisterInfo > MRI
CodeGenOptLevel OptLevel
Target-Independent Code Generator Pass Configuration Options.
virtual void addCodeGenPrepare()
Add pass to prepare the LLVM IR for code generation.
virtual bool addILPOpts()
Add passes that optimize instruction level parallelism for out-of-order targets.
virtual void addPostRegAlloc()
This method may be implemented by targets that want to run passes after register allocation pass pipe...
CodeGenOptLevel getOptLevel() const
virtual void addOptimizedRegAlloc()
addOptimizedRegAlloc - Add passes related to register allocation.
virtual void addIRPasses()
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
virtual void addFastRegAlloc()
addFastRegAlloc - Add the minimum set of target-independent passes that are required for fast registe...
virtual void addMachineSSAOptimization()
addMachineSSAOptimization - Add standard passes that optimize machine instructions in SSA form.
void disablePass(AnalysisID PassID)
Allow the target to disable a specific standard pass by default.
AnalysisID addPass(AnalysisID PassID)
Utilities for targets to add passes to the pass manager.
TargetPassConfig(TargetMachine &TM, PassManagerBase &PM)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
LLVM Value Representation.
Definition Value.h:75
bool use_empty() const
Definition Value.h:346
int getNumOccurrences() const
An efficient, type-erasing, non-owning reference to a callable.
PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...
An abstract base class for streams implementations that also support a pwrite operation.
Interfaces for registering analysis passes, producing common pass manager configurations,...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
bool isFlatGlobalAddrSpace(unsigned AS)
LLVM_READNONE constexpr bool isModuleEntryFunctionCC(CallingConv::ID CC)
LLVM_READNONE constexpr bool isEntryFunctionCC(CallingConv::ID CC)
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
template class LLVM_TEMPLATE_ABI opt< bool >
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
This is an optimization pass for GlobalISel generic memory operations.
ScheduleDAGMILive * createSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
LLVM_ABI FunctionPass * createFlattenCFGPass()
std::unique_ptr< ScheduleDAGMutation > createAMDGPUBarrierLatencyDAGMutation(MachineFunction *MF)
LLVM_ABI FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
LLVM_ABI char & EarlyMachineLICMID
This pass performs loop invariant code motion on machine instructions.
ImmutablePass * createAMDGPUAAWrapperPass()
LLVM_ABI char & PostRAHazardRecognizerID
PostRAHazardRecognizer - This pass runs the post-ra hazard recognizer.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
FunctionPass * createAMDGPUSetWavePriorityPass()
LLVM_ABI Pass * createLCSSAPass()
Definition LCSSA.cpp:525
void initializeAMDGPUMarkLastScratchLoadLegacyPass(PassRegistry &)
void initializeAMDGPUInsertDelayAluLegacyPass(PassRegistry &)
void initializeSIOptimizeExecMaskingPreRALegacyPass(PassRegistry &)
char & GCNPreRAOptimizationsID
LLVM_ABI char & GCLoweringID
GCLowering Pass - Used by gc.root to perform its default lowering operations.
void initializeSIInsertHardClausesLegacyPass(PassRegistry &)
ModulePass * createExpandVariadicsPass(ExpandVariadicsMode)
FunctionPass * createSIAnnotateControlFlowLegacyPass()
Create the annotation pass.
FunctionPass * createSIModeRegisterPass()
void initializeGCNPreRAOptimizationsLegacyPass(PassRegistry &)
void initializeSILowerWWMCopiesLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
void initializeAMDGPUAAWrapperPassPass(PassRegistry &)
void initializeSIShrinkInstructionsLegacyPass(PassRegistry &)
ModulePass * createAMDGPULowerBufferFatPointersPass()
void initializeR600ClauseMergePassPass(PassRegistry &)
ModulePass * createAMDGPUCtorDtorLoweringLegacyPass()
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
ModuleToFunctionPassAdaptor createModuleToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
ModulePass * createAMDGPUSwLowerLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeGCNRewritePartialRegUsesLegacyPass(llvm::PassRegistry &)
void initializeAMDGPURewriteUndefForPHILegacyPass(PassRegistry &)
char & GCNRewritePartialRegUsesID
void initializeAMDGPUSwLowerLDSLegacyPass(PassRegistry &)
LLVM_ABI std::error_code inconvertibleErrorCode()
The value returned by this function can be returned from convertToErrorCode for Error values where no...
Definition Error.cpp:98
void initializeAMDGPULowerVGPREncodingLegacyPass(PassRegistry &)
char & AMDGPUWaitSGPRHazardsLegacyID
void initializeSILowerSGPRSpillsLegacyPass(PassRegistry &)
LLVM_ABI Pass * createLoadStoreVectorizerPass()
Create a legacy pass manager instance of the LoadStoreVectorizer pass.
std::unique_ptr< ScheduleDAGMutation > createIGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase)
Phase specifes whether or not this is a reentry into the IGroupLPDAGMutation.
void initializeAMDGPUDAGToDAGISelLegacyPass(PassRegistry &)
FunctionPass * createAMDGPURegBankCombiner(bool IsOptNone)
LLVM_ABI FunctionPass * createNaryReassociatePass()
char & AMDGPUReserveWWMRegsLegacyID
void initializeAMDGPUWaitSGPRHazardsLegacyPass(PassRegistry &)
LLVM_ABI char & PatchableFunctionID
This pass implements the "patchable-function" attribute.
char & SIOptimizeExecMaskingLegacyID
LLVM_ABI char & PostRASchedulerID
PostRAScheduler - This pass performs post register allocation scheduling.
void initializeR600ExpandSpecialInstrsPassPass(PassRegistry &)
void initializeR600PacketizerPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createVOPDPairingMutation()
ModulePass * createAMDGPUExportKernelRuntimeHandlesLegacyPass()
ModulePass * createAMDGPUAlwaysInlinePass(bool GlobalOpt=true)
void initializeAMDGPUAsmPrinterPass(PassRegistry &)
void initializeSIFoldOperandsLegacyPass(PassRegistry &)
char & SILoadStoreOptimizerLegacyID
void initializeAMDGPUGlobalISelDivergenceLoweringPass(PassRegistry &)
PassManager< LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &, CGSCCUpdateResult & > CGSCCPassManager
The CGSCC pass manager.
LLVM_ABI std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOptLevel Level)
Definition CSEInfo.cpp:89
Target & getTheR600Target()
The target for R600 GPUs.
LLVM_ABI char & MachineSchedulerID
MachineScheduler - This pass schedules machine instructions.
LLVM_ABI Pass * createStructurizeCFGPass(bool SkipUniformRegions=false)
When SkipUniformRegions is true the structizer will not structurize regions that only contain uniform...
LLVM_ABI char & PostMachineSchedulerID
PostMachineScheduler - This pass schedules machine instructions postRA.
LLVM_ABI Pass * createLICMPass()
Definition LICM.cpp:386
char & SIFormMemoryClausesID
void initializeSILoadStoreOptimizerLegacyPass(PassRegistry &)
void initializeAMDGPULowerModuleLDSLegacyPass(PassRegistry &)
AnalysisManager< LazyCallGraph::SCC, LazyCallGraph & > CGSCCAnalysisManager
The CGSCC analysis manager.
void initializeAMDGPUCtorDtorLoweringLegacyPass(PassRegistry &)
LLVM_ABI char & EarlyIfConverterLegacyID
EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.
AnalysisManager< Loop, LoopStandardAnalysisResults & > LoopAnalysisManager
The loop analysis manager.
FunctionPass * createAMDGPUUniformIntrinsicCombineLegacyPass()
void initializeAMDGPURegBankCombinerPass(PassRegistry &)
ThinOrFullLTOPhase
This enumerates the LLVM full LTO or ThinLTO optimization phases.
Definition Pass.h:77
@ FullLTOPreLink
Full LTO prelink phase.
Definition Pass.h:85
@ FullLTOPostLink
Full LTO postlink (backend compile) phase.
Definition Pass.h:87
@ ThinLTOPreLink
ThinLTO prelink (summary) phase.
Definition Pass.h:81
char & AMDGPUUnifyDivergentExitNodesID
void initializeAMDGPUPrepareAGPRAllocLegacyPass(PassRegistry &)
FunctionPass * createAMDGPUAtomicOptimizerPass(ScanOptions ScanStrategy)
FunctionPass * createAMDGPUPreloadKernArgPrologLegacyPass()
char & SIOptimizeVGPRLiveRangeLegacyID
LLVM_ABI char & ShadowStackGCLoweringID
ShadowStackGCLowering - Implements the custom lowering mechanism used by the shadow stack GC.
char & GCNNSAReassignID
void initializeAMDGPURewriteOutArgumentsPass(PassRegistry &)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
void initializeAMDGPUExternalAAWrapperPass(PassRegistry &)
auto formatv(bool Validate, const char *Fmt, Ts &&...Vals)
void initializeAMDGPULowerKernelArgumentsPass(PassRegistry &)
void initializeSIModeRegisterLegacyPass(PassRegistry &)
CodeModel::Model getEffectiveCodeModel(std::optional< CodeModel::Model > CM, CodeModel::Model Default)
Helper method for getting the code model, returning Default if CM does not have a value.
void initializeAMDGPUPreloadKernelArgumentsLegacyPass(PassRegistry &)
char & SILateBranchLoweringPassID
FunctionToLoopPassAdaptor createFunctionToLoopPassAdaptor(LoopPassT &&Pass, bool UseMemorySSA=false)
A function to deduce a loop pass type and wrap it in the templated adaptor.
LLVM_ABI char & BranchRelaxationPassID
BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...
LLVM_ABI FunctionPass * createSinkingPass()
Definition Sink.cpp:275
CGSCCToFunctionPassAdaptor createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false, bool NoRerun=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
void initializeSIMemoryLegalizerLegacyPass(PassRegistry &)
ModulePass * createAMDGPULowerIntrinsicsLegacyPass()
void initializeR600MachineCFGStructurizerPass(PassRegistry &)
CodeGenFileType
These enums are meant to be passed into addPassesToEmitFile to indicate what type of file to emit,...
Definition CodeGen.h:111
char & GCNDPPCombineLegacyID
PassManager< Module > ModulePassManager
Convenience typedef for a pass manager over modules.
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
LLVM_ABI FunctionPass * createLoopDataPrefetchPass()
FunctionPass * createAMDGPULowerKernelArgumentsPass()
char & AMDGPUInsertDelayAluID
std::unique_ptr< ScheduleDAGMutation > createAMDGPUMacroFusionDAGMutation()
Note that you have to add: DAG.addMutation(createAMDGPUMacroFusionDAGMutation()); to AMDGPUTargetMach...
LLVM_ABI char & StackMapLivenessID
StackMapLiveness - This pass analyses the register live-out set of stackmap/patchpoint intrinsics and...
void initializeGCNPreRALongBranchRegLegacyPass(PassRegistry &)
char & SILowerWWMCopiesLegacyID
LLVM_ABI FunctionPass * createUnifyLoopExitsPass()
char & SIOptimizeExecMaskingPreRAID
LLVM_ABI FunctionPass * createFixIrreduciblePass()
void initializeR600EmitClauseMarkersPass(PassRegistry &)
LLVM_ABI char & FuncletLayoutID
This pass lays out funclets contiguously.
LLVM_ABI char & DetectDeadLanesID
This pass adds dead/undef flags after analyzing subregister lanes.
void initializeAMDGPULowerExecSyncLegacyPass(PassRegistry &)
void initializeAMDGPUPostLegalizerCombinerPass(PassRegistry &)
void initializeAMDGPUExportKernelRuntimeHandlesLegacyPass(PassRegistry &)
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
void initializeSIInsertWaitcntsLegacyPass(PassRegistry &)
ModulePass * createAMDGPUPreloadKernelArgumentsLegacyPass(const TargetMachine *)
ModulePass * createAMDGPUPrintfRuntimeBinding()
LLVM_ABI char & StackSlotColoringID
StackSlotColoring - This pass performs stack slot coloring.
LLVM_ABI Pass * createAlwaysInlinerLegacyPass(bool InsertLifetime=true)
Create a legacy pass manager instance of a pass to inline and remove functions marked as "always_inli...
void initializeR600ControlFlowFinalizerPass(PassRegistry &)
void initializeAMDGPUImageIntrinsicOptimizerPass(PassRegistry &)
void initializeSILateBranchLoweringLegacyPass(PassRegistry &)
void initializeSILowerControlFlowLegacyPass(PassRegistry &)
void initializeSIFormMemoryClausesLegacyPass(PassRegistry &)
char & SIPreAllocateWWMRegsLegacyID
Error make_error(ArgTs &&... Args)
Make a Error instance representing failure using the given error info type.
Definition Error.h:340
ModulePass * createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPUPreLegalizerCombinerPass(PassRegistry &)
FunctionPass * createAMDGPUPromoteAlloca()
LLVM_ABI FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
void initializeAMDGPUReserveWWMRegsLegacyPass(PassRegistry &)
char & SIPreEmitPeepholeID
char & SIPostRABundlerLegacyID
ModulePass * createAMDGPURemoveIncompatibleFunctionsPass(const TargetMachine *)
void initializeGCNRegPressurePrinterPass(PassRegistry &)
void initializeSILowerI1CopiesLegacyPass(PassRegistry &)
char & SILowerSGPRSpillsLegacyID
void initializeAMDGPUArgumentUsageInfoPass(PassRegistry &)
LLVM_ABI FunctionPass * createBasicRegisterAllocator()
BasicRegisterAllocation Pass - This pass implements a degenerate global register allocator using the ...
LLVM_ABI void initializeGlobalISel(PassRegistry &)
Initialize all passes linked into the GlobalISel library.
char & SILowerControlFlowLegacyID
ModulePass * createR600OpenCLImageTypeLoweringPass()
FunctionPass * createAMDGPUCodeGenPreparePass()
void initializeSIAnnotateControlFlowLegacyPass(PassRegistry &)
FunctionPass * createAMDGPUISelDag(TargetMachine &TM, CodeGenOptLevel OptLevel)
This pass converts a legalized DAG into a AMDGPU-specific.
void initializeGCNCreateVOPDLegacyPass(PassRegistry &)
void initializeAMDGPUUniformIntrinsicCombineLegacyPass(PassRegistry &)
void initializeSIPreAllocateWWMRegsLegacyPass(PassRegistry &)
void initializeSIFixVGPRCopiesLegacyPass(PassRegistry &)
Target & getTheGCNTarget()
The target for GCN GPUs.
void initializeSIFixSGPRCopiesLegacyPass(PassRegistry &)
void initializeAMDGPUAtomicOptimizerPass(PassRegistry &)
void initializeAMDGPULowerIntrinsicsLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createGVNPass()
Create a legacy GVN pass.
Definition GVN.cpp:3402
void initializeAMDGPURewriteAGPRCopyMFMALegacyPass(PassRegistry &)
void initializeSIPostRABundlerLegacyPass(PassRegistry &)
FunctionPass * createAMDGPURegBankSelectPass()
FunctionPass * createAMDGPURegBankLegalizePass()
LLVM_ABI char & MachineCSELegacyID
MachineCSE - This pass performs global CSE on machine instructions.
char & SIWholeQuadModeID
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
PassManager< Function > FunctionPassManager
Convenience typedef for a pass manager over functions.
LLVM_ABI char & LiveVariablesID
LiveVariables pass - This pass computes the set of blocks in which each variable is life and sets mac...
void initializeAMDGPUCodeGenPreparePass(PassRegistry &)
FunctionPass * createAMDGPURewriteUndefForPHILegacyPass()
void initializeSIOptimizeExecMaskingLegacyPass(PassRegistry &)
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
Definition Threading.h:86
FunctionPass * createSILowerI1CopiesLegacyPass()
FunctionPass * createAMDGPUPostLegalizeCombiner(bool IsOptNone)
void initializeAMDGPULowerKernelAttributesPass(PassRegistry &)
char & SIInsertHardClausesID
char & SIFixSGPRCopiesLegacyID
void initializeGCNDPPCombineLegacyPass(PassRegistry &)
char & GCNCreateVOPDID
char & SIPeepholeSDWALegacyID
LLVM_ABI char & VirtRegRewriterID
VirtRegRewriter pass.
char & SIFixVGPRCopiesID
char & SIFoldOperandsLegacyID
void initializeGCNNSAReassignLegacyPass(PassRegistry &)
char & AMDGPUPrepareAGPRAllocLegacyID
LLVM_ABI FunctionPass * createLowerSwitchPass()
void initializeAMDGPUPreloadKernArgPrologLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createVirtRegRewriter(bool ClearVirtRegs=true)
void initializeR600VectorRegMergerPass(PassRegistry &)
char & AMDGPURewriteAGPRCopyMFMALegacyID
ModulePass * createAMDGPULowerExecSyncLegacyPass()
char & AMDGPULowerVGPREncodingLegacyID
FunctionPass * createAMDGPUGlobalISelDivergenceLoweringPass()
FunctionPass * createSIMemoryLegalizerPass()
void initializeAMDGPULateCodeGenPrepareLegacyPass(PassRegistry &)
void initializeSIOptimizeVGPRLiveRangeLegacyPass(PassRegistry &)
void initializeSIPeepholeSDWALegacyPass(PassRegistry &)
void initializeAMDGPURegBankLegalizePass(PassRegistry &)
LLVM_ABI char & TwoAddressInstructionPassID
TwoAddressInstruction - This pass reduces two-address instructions to use two operands.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
FunctionPass * createAMDGPUPreLegalizeCombiner(bool IsOptNone)
void initializeAMDGPURegBankSelectPass(PassRegistry &)
FunctionPass * createAMDGPULateCodeGenPrepareLegacyPass()
LLVM_ABI FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
MCRegisterInfo * createGCNMCRegisterInfo(AMDGPUDwarfFlavour DwarfFlavour)
LLVM_ABI FunctionPass * createStraightLineStrengthReducePass()
BumpPtrAllocatorImpl<> BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition Allocator.h:383
FunctionPass * createAMDGPUImageIntrinsicOptimizerPass(const TargetMachine *)
void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry &)
void initializeAMDGPULowerBufferFatPointersPass(PassRegistry &)
FunctionPass * createSIInsertWaitcntsPass()
FunctionPass * createAMDGPUAnnotateUniformValuesLegacy()
LLVM_ABI FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
void initializeSIWholeQuadModeLegacyPass(PassRegistry &)
LLVM_ABI char & PHIEliminationID
PHIElimination - This pass eliminates machine instruction PHI nodes by inserting copy instructions.
LLVM_ABI llvm::cl::opt< bool > NoKernelInfoEndLTO
bool parseNamedRegisterReference(PerFunctionMIParsingState &PFS, Register &Reg, StringRef Src, SMDiagnostic &Error)
void initializeAMDGPUResourceUsageAnalysisWrapperPassPass(PassRegistry &)
FunctionPass * createSIShrinkInstructionsLegacyPass()
char & AMDGPUMarkLastScratchLoadID
LLVM_ABI char & RenameIndependentSubregsID
This pass detects subregister lanes in a virtual register that are used independently of other lanes ...
void initializeAMDGPUAnnotateUniformValuesLegacyPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createAMDGPUExportClusteringDAGMutation()
void initializeAMDGPUPrintfRuntimeBindingPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaPass(PassRegistry &)
void initializeAMDGPURemoveIncompatibleFunctionsLegacyPass(PassRegistry &)
void initializeAMDGPUAlwaysInlinePass(PassRegistry &)
LLVM_ABI char & DeadMachineInstructionElimID
DeadMachineInstructionElim - This pass removes dead machine instructions.
void initializeSIPreEmitPeepholeLegacyPass(PassRegistry &)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
char & AMDGPUPerfHintAnalysisLegacyID
LLVM_ABI ImmutablePass * createExternalAAWrapperPass(std::function< void(Pass &, Function &, AAResults &)> Callback)
A wrapper pass around a callback which can be used to populate the AAResults in the AAResultsWrapperP...
char & GCNPreRALongBranchRegID
LLVM_ABI CGPassBuilderOption getCGPassBuilderOption()
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:180
void initializeAMDGPUPromoteKernelArgumentsPass(PassRegistry &)
#define N
static ArgDescriptor createStack(unsigned Offset, unsigned Mask=~0u)
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask)
static ArgDescriptor createRegister(Register Reg, unsigned Mask=~0u)
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
A simple and fast domtree-based CSE pass.
Definition EarlyCSE.h:31
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
static FuncInfoTy * create(BumpPtrAllocator &Allocator, const Function &F, const SubtargetTy *STI)
Factory function: default behavior is to call new using the supplied allocator.
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
StringMap< VRegInfo * > VRegInfosNamed
Definition MIParser.h:177
DenseMap< Register, VRegInfo * > VRegInfos
Definition MIParser.h:176
RegisterTargetMachine - Helper template for registering a target machine implementation,...
A utility pass template to force an analysis result to be available.
bool DX10Clamp
Used by the vector ALU to force DX10-style treatment of NaNs: when set, clamp NaN to zero; otherwise,...
DenormalMode FP64FP16Denormals
If this is set, neither input or output denormals are flushed for both f64 and f16/v2f16 instructions...
bool IEEE
Floating point opcodes that support exception flag gathering quiet and propagate signaling NaN inputs...
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
The llvm::once_flag structure.
Definition Threading.h:67
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
SmallVector< StringValue > WWMReservedRegs
std::optional< SIArgumentInfo > ArgInfo
SmallVector< StringValue, 2 > SpillPhysVGPRS
A wrapper around std::string which contains a source range that's being set during parsing.