LLVM 23.0.0git
AMDGPUTargetMachine.cpp
Go to the documentation of this file.
1//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file contains both AMDGPU target machine and the CodeGen pass builder.
11/// The AMDGPU target machine contains all of the hardware specific information
12/// needed to emit code for SI+ GPUs in the legacy pass manager pipeline. The
13/// CodeGen pass builder handles the pass pipeline for new pass manager.
14//
15//===----------------------------------------------------------------------===//
16
17#include "AMDGPUTargetMachine.h"
18#include "AMDGPU.h"
19#include "AMDGPUAliasAnalysis.h"
25#include "AMDGPUIGroupLP.h"
26#include "AMDGPUISelDAGToDAG.h"
28#include "AMDGPUMacroFusion.h"
35#include "AMDGPUSplitModule.h"
40#include "GCNDPPCombine.h"
42#include "GCNNSAReassign.h"
46#include "GCNSchedStrategy.h"
47#include "GCNVOPDUtils.h"
48#include "R600.h"
49#include "R600TargetMachine.h"
50#include "SIFixSGPRCopies.h"
51#include "SIFixVGPRCopies.h"
52#include "SIFoldOperands.h"
53#include "SIFormMemoryClauses.h"
55#include "SILowerControlFlow.h"
56#include "SILowerSGPRSpills.h"
57#include "SILowerWWMCopies.h"
59#include "SIMachineScheduler.h"
63#include "SIPeepholeSDWA.h"
64#include "SIPostRABundler.h"
67#include "SIWholeQuadMode.h"
88#include "llvm/CodeGen/Passes.h"
92#include "llvm/IR/IntrinsicsAMDGPU.h"
93#include "llvm/IR/PassManager.h"
102#include "llvm/Transforms/IPO.h"
127#include <optional>
128
129using namespace llvm;
130using namespace llvm::PatternMatch;
131
132namespace {
133//===----------------------------------------------------------------------===//
134// AMDGPU CodeGen Pass Builder interface.
135//===----------------------------------------------------------------------===//
136
137class AMDGPUCodeGenPassBuilder
138 : public CodeGenPassBuilder<AMDGPUCodeGenPassBuilder, GCNTargetMachine> {
139 using Base = CodeGenPassBuilder<AMDGPUCodeGenPassBuilder, GCNTargetMachine>;
140
141public:
142 AMDGPUCodeGenPassBuilder(GCNTargetMachine &TM,
143 const CGPassBuilderOption &Opts,
144 PassInstrumentationCallbacks *PIC);
145
146 void addIRPasses(PassManagerWrapper &PMW) const;
147 void addCodeGenPrepare(PassManagerWrapper &PMW) const;
148 void addPreISel(PassManagerWrapper &PMW) const;
149 void addILPOpts(PassManagerWrapper &PMWM) const;
150 void addAsmPrinter(PassManagerWrapper &PMW, CreateMCStreamer) const;
151 Error addInstSelector(PassManagerWrapper &PMW) const;
152 void addPreRewrite(PassManagerWrapper &PMW) const;
153 void addMachineSSAOptimization(PassManagerWrapper &PMW) const;
154 void addPostRegAlloc(PassManagerWrapper &PMW) const;
155 void addPreEmitPass(PassManagerWrapper &PMWM) const;
156 void addPreEmitRegAlloc(PassManagerWrapper &PMW) const;
157 Error addRegAssignmentFast(PassManagerWrapper &PMW) const;
158 Error addRegAssignmentOptimized(PassManagerWrapper &PMW) const;
159 void addPreRegAlloc(PassManagerWrapper &PMW) const;
160 Error addFastRegAlloc(PassManagerWrapper &PMW) const;
161 void addOptimizedRegAlloc(PassManagerWrapper &PMW) const;
162 void addPreSched2(PassManagerWrapper &PMW) const;
163 void addPostBBSections(PassManagerWrapper &PMW) const;
164
165 /// Check if a pass is enabled given \p Opt option. The option always
166 /// overrides defaults if explicitly used. Otherwise its default will be used
167 /// given that a pass shall work at an optimization \p Level minimum.
168 bool isPassEnabled(const cl::opt<bool> &Opt,
169 CodeGenOptLevel Level = CodeGenOptLevel::Default) const;
170 void addEarlyCSEOrGVNPass(PassManagerWrapper &PMW) const;
171 void addStraightLineScalarOptimizationPasses(PassManagerWrapper &PMW) const;
172};
173
174class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> {
175public:
176 SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
177 : RegisterRegAllocBase(N, D, C) {}
178};
179
180class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> {
181public:
182 VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
183 : RegisterRegAllocBase(N, D, C) {}
184};
185
186class WWMRegisterRegAlloc : public RegisterRegAllocBase<WWMRegisterRegAlloc> {
187public:
188 WWMRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
189 : RegisterRegAllocBase(N, D, C) {}
190};
191
192static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI,
194 const Register Reg) {
195 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
196 return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
197}
198
199static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI,
201 const Register Reg) {
202 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
203 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
204}
205
206static bool onlyAllocateWWMRegs(const TargetRegisterInfo &TRI,
208 const Register Reg) {
209 const SIMachineFunctionInfo *MFI =
210 MRI.getMF().getInfo<SIMachineFunctionInfo>();
211 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
212 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC) &&
214}
215
216/// -{sgpr|wwm|vgpr}-regalloc=... command line option.
217static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
218
219/// A dummy default pass factory indicates whether the register allocator is
220/// overridden on the command line.
221static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag;
222static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag;
223static llvm::once_flag InitializeDefaultWWMRegisterAllocatorFlag;
224
225static SGPRRegisterRegAlloc
226defaultSGPRRegAlloc("default",
227 "pick SGPR register allocator based on -O option",
229
230static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false,
232SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
233 cl::desc("Register allocator to use for SGPRs"));
234
235static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false,
237VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
238 cl::desc("Register allocator to use for VGPRs"));
239
240static cl::opt<WWMRegisterRegAlloc::FunctionPassCtor, false,
242 WWMRegAlloc("wwm-regalloc", cl::Hidden,
244 cl::desc("Register allocator to use for WWM registers"));
245
246static void initializeDefaultSGPRRegisterAllocatorOnce() {
247 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
248
249 if (!Ctor) {
250 Ctor = SGPRRegAlloc;
251 SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
252 }
253}
254
255static void initializeDefaultVGPRRegisterAllocatorOnce() {
256 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
257
258 if (!Ctor) {
259 Ctor = VGPRRegAlloc;
260 VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
261 }
262}
263
264static void initializeDefaultWWMRegisterAllocatorOnce() {
265 RegisterRegAlloc::FunctionPassCtor Ctor = WWMRegisterRegAlloc::getDefault();
266
267 if (!Ctor) {
268 Ctor = WWMRegAlloc;
269 WWMRegisterRegAlloc::setDefault(WWMRegAlloc);
270 }
271}
272
273static FunctionPass *createBasicSGPRRegisterAllocator() {
274 return createBasicRegisterAllocator(onlyAllocateSGPRs);
275}
276
277static FunctionPass *createGreedySGPRRegisterAllocator() {
278 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
279}
280
281static FunctionPass *createFastSGPRRegisterAllocator() {
282 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
283}
284
285static FunctionPass *createBasicVGPRRegisterAllocator() {
286 return createBasicRegisterAllocator(onlyAllocateVGPRs);
287}
288
289static FunctionPass *createGreedyVGPRRegisterAllocator() {
290 return createGreedyRegisterAllocator(onlyAllocateVGPRs);
291}
292
293static FunctionPass *createFastVGPRRegisterAllocator() {
294 return createFastRegisterAllocator(onlyAllocateVGPRs, true);
295}
296
297static FunctionPass *createBasicWWMRegisterAllocator() {
298 return createBasicRegisterAllocator(onlyAllocateWWMRegs);
299}
300
301static FunctionPass *createGreedyWWMRegisterAllocator() {
302 return createGreedyRegisterAllocator(onlyAllocateWWMRegs);
303}
304
305static FunctionPass *createFastWWMRegisterAllocator() {
306 return createFastRegisterAllocator(onlyAllocateWWMRegs, false);
307}
308
309static SGPRRegisterRegAlloc basicRegAllocSGPR(
310 "basic", "basic register allocator", createBasicSGPRRegisterAllocator);
311static SGPRRegisterRegAlloc greedyRegAllocSGPR(
312 "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator);
313
314static SGPRRegisterRegAlloc fastRegAllocSGPR(
315 "fast", "fast register allocator", createFastSGPRRegisterAllocator);
316
317
318static VGPRRegisterRegAlloc basicRegAllocVGPR(
319 "basic", "basic register allocator", createBasicVGPRRegisterAllocator);
320static VGPRRegisterRegAlloc greedyRegAllocVGPR(
321 "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator);
322
323static VGPRRegisterRegAlloc fastRegAllocVGPR(
324 "fast", "fast register allocator", createFastVGPRRegisterAllocator);
325static WWMRegisterRegAlloc basicRegAllocWWMReg("basic",
326 "basic register allocator",
327 createBasicWWMRegisterAllocator);
328static WWMRegisterRegAlloc
329 greedyRegAllocWWMReg("greedy", "greedy register allocator",
330 createGreedyWWMRegisterAllocator);
331static WWMRegisterRegAlloc fastRegAllocWWMReg("fast", "fast register allocator",
332 createFastWWMRegisterAllocator);
333
337}
338} // anonymous namespace
339
340static cl::opt<bool>
342 cl::desc("Run early if-conversion"),
343 cl::init(false));
344
345static cl::opt<bool>
346OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
347 cl::desc("Run pre-RA exec mask optimizations"),
348 cl::init(true));
349
350static cl::opt<bool>
351 LowerCtorDtor("amdgpu-lower-global-ctor-dtor",
352 cl::desc("Lower GPU ctor / dtors to globals on the device."),
353 cl::init(true), cl::Hidden);
354
355// Option to disable vectorizer for tests.
357 "amdgpu-load-store-vectorizer",
358 cl::desc("Enable load store vectorizer"),
359 cl::init(true),
360 cl::Hidden);
361
362// Option to control global loads scalarization
364 "amdgpu-scalarize-global-loads",
365 cl::desc("Enable global load scalarization"),
366 cl::init(true),
367 cl::Hidden);
368
369// Option to run internalize pass.
371 "amdgpu-internalize-symbols",
372 cl::desc("Enable elimination of non-kernel functions and unused globals"),
373 cl::init(false),
374 cl::Hidden);
375
376// Option to inline all early.
378 "amdgpu-early-inline-all",
379 cl::desc("Inline all functions early"),
380 cl::init(false),
381 cl::Hidden);
382
384 "amdgpu-enable-remove-incompatible-functions", cl::Hidden,
385 cl::desc("Enable removal of functions when they"
386 "use features not supported by the target GPU"),
387 cl::init(true));
388
390 "amdgpu-sdwa-peephole",
391 cl::desc("Enable SDWA peepholer"),
392 cl::init(true));
393
395 "amdgpu-dpp-combine",
396 cl::desc("Enable DPP combiner"),
397 cl::init(true));
398
399// Enable address space based alias analysis
401 cl::desc("Enable AMDGPU Alias Analysis"),
402 cl::init(true));
403
404// Enable lib calls simplifications
406 "amdgpu-simplify-libcall",
407 cl::desc("Enable amdgpu library simplifications"),
408 cl::init(true),
409 cl::Hidden);
410
412 "amdgpu-ir-lower-kernel-arguments",
413 cl::desc("Lower kernel argument loads in IR pass"),
414 cl::init(true),
415 cl::Hidden);
416
418 "amdgpu-reassign-regs",
419 cl::desc("Enable register reassign optimizations on gfx10+"),
420 cl::init(true),
421 cl::Hidden);
422
424 "amdgpu-opt-vgpr-liverange",
425 cl::desc("Enable VGPR liverange optimizations for if-else structure"),
426 cl::init(true), cl::Hidden);
427
429 "amdgpu-atomic-optimizer-strategy",
430 cl::desc("Select DPP or Iterative strategy for scan"),
433 clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"),
435 "Use Iterative approach for scan"),
436 clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")));
437
438// Enable Mode register optimization
440 "amdgpu-mode-register",
441 cl::desc("Enable mode register pass"),
442 cl::init(true),
443 cl::Hidden);
444
445// Enable GFX11+ s_delay_alu insertion
446static cl::opt<bool>
447 EnableInsertDelayAlu("amdgpu-enable-delay-alu",
448 cl::desc("Enable s_delay_alu insertion"),
449 cl::init(true), cl::Hidden);
450
451// Enable GFX11+ VOPD
452static cl::opt<bool>
453 EnableVOPD("amdgpu-enable-vopd",
454 cl::desc("Enable VOPD, dual issue of VALU in wave32"),
455 cl::init(true), cl::Hidden);
456
457// Option is used in lit tests to prevent deadcoding of patterns inspected.
458static cl::opt<bool>
459EnableDCEInRA("amdgpu-dce-in-ra",
460 cl::init(true), cl::Hidden,
461 cl::desc("Enable machine DCE inside regalloc"));
462
463static cl::opt<bool> EnableSetWavePriority("amdgpu-set-wave-priority",
464 cl::desc("Adjust wave priority"),
465 cl::init(false), cl::Hidden);
466
468 "amdgpu-scalar-ir-passes",
469 cl::desc("Enable scalar IR passes"),
470 cl::init(true),
471 cl::Hidden);
472
474 "amdgpu-enable-lower-exec-sync",
475 cl::desc("Enable lowering of execution synchronization."), cl::init(true),
476 cl::Hidden);
477
478static cl::opt<bool>
479 EnableSwLowerLDS("amdgpu-enable-sw-lower-lds",
480 cl::desc("Enable lowering of lds to global memory pass "
481 "and asan instrument resulting IR."),
482 cl::init(true), cl::Hidden);
483
485 "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"),
487 cl::Hidden);
488
490 "amdgpu-enable-pre-ra-optimizations",
491 cl::desc("Enable Pre-RA optimizations pass"), cl::init(true),
492 cl::Hidden);
493
495 "amdgpu-enable-promote-kernel-arguments",
496 cl::desc("Enable promotion of flat kernel pointer arguments to global"),
497 cl::Hidden, cl::init(true));
498
500 "amdgpu-enable-image-intrinsic-optimizer",
501 cl::desc("Enable image intrinsic optimizer pass"), cl::init(true),
502 cl::Hidden);
503
504static cl::opt<bool>
505 EnableLoopPrefetch("amdgpu-loop-prefetch",
506 cl::desc("Enable loop data prefetch on AMDGPU"),
507 cl::Hidden, cl::init(false));
508
510 AMDGPUSchedStrategy("amdgpu-sched-strategy",
511 cl::desc("Select custom AMDGPU scheduling strategy."),
512 cl::Hidden, cl::init(""));
513
515 "amdgpu-enable-rewrite-partial-reg-uses",
516 cl::desc("Enable rewrite partial reg uses pass"), cl::init(true),
517 cl::Hidden);
518
520 "amdgpu-enable-hipstdpar",
521 cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false),
522 cl::Hidden);
523
524static cl::opt<bool>
525 EnableAMDGPUAttributor("amdgpu-attributor-enable",
526 cl::desc("Enable AMDGPUAttributorPass"),
527 cl::init(true), cl::Hidden);
528
530 "new-reg-bank-select",
531 cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of "
532 "regbankselect"),
533 cl::init(false), cl::Hidden);
534
536 "amdgpu-link-time-closed-world",
537 cl::desc("Whether has closed-world assumption at link time"),
538 cl::init(false), cl::Hidden);
539
541 "amdgpu-enable-uniform-intrinsic-combine",
542 cl::desc("Enable/Disable the Uniform Intrinsic Combine Pass"),
543 cl::init(true), cl::Hidden);
544
546 // Register the target
549
634}
635
636static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
637 return std::make_unique<AMDGPUTargetObjectFile>();
638}
639
643
644static ScheduleDAGInstrs *
646 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
647 ScheduleDAGMILive *DAG =
648 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
649 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
650 if (ST.shouldClusterStores())
651 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
653 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
654 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
655 DAG->addMutation(createAMDGPUBarrierLatencyDAGMutation(C->MF));
656 return DAG;
657}
658
659static ScheduleDAGInstrs *
661 ScheduleDAGMILive *DAG =
662 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxILPSchedStrategy>(C));
664 return DAG;
665}
666
667static ScheduleDAGInstrs *
669 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
671 C, std::make_unique<GCNMaxMemoryClauseSchedStrategy>(C));
672 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
673 if (ST.shouldClusterStores())
674 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
675 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
676 DAG->addMutation(createAMDGPUBarrierLatencyDAGMutation(C->MF));
677 return DAG;
678}
679
680static ScheduleDAGInstrs *
682 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
683 auto *DAG = new GCNIterativeScheduler(
685 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
686 if (ST.shouldClusterStores())
687 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
689 return DAG;
690}
691
698
699static ScheduleDAGInstrs *
701 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
703 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
704 if (ST.shouldClusterStores())
705 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
706 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
708 return DAG;
709}
710
712SISchedRegistry("si", "Run SI's custom scheduler",
714
717 "Run GCN scheduler to maximize occupancy",
719
721 GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp",
723
725 "gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause",
727
729 "gcn-iterative-max-occupancy-experimental",
730 "Run GCN scheduler to maximize occupancy (experimental)",
732
734 "gcn-iterative-minreg",
735 "Run GCN iterative scheduler for minimal register usage (experimental)",
737
739 "gcn-iterative-ilp",
740 "Run GCN iterative scheduler for ILP scheduling (experimental)",
742
745 if (!GPU.empty())
746 return GPU;
747
748 // Need to default to a target with flat support for HSA.
749 if (TT.isAMDGCN())
750 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
751
752 return "r600";
753}
754
756 // The AMDGPU toolchain only supports generating shared objects, so we
757 // must always use PIC.
758 return Reloc::PIC_;
759}
760
762 StringRef CPU, StringRef FS,
763 const TargetOptions &Options,
764 std::optional<Reloc::Model> RM,
765 std::optional<CodeModel::Model> CM,
768 T, TT.computeDataLayout(), TT, getGPUOrDefault(TT, CPU), FS, Options,
770 OptLevel),
772 initAsmInfo();
773 if (TT.isAMDGCN()) {
774 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
776 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
778 }
779}
780
783
785
787 Attribute GPUAttr = F.getFnAttribute("target-cpu");
788 return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
789}
790
792 Attribute FSAttr = F.getFnAttribute("target-features");
793
794 return FSAttr.isValid() ? FSAttr.getValueAsString()
796}
797
800 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
802 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
803 if (ST.shouldClusterStores())
804 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
805 return DAG;
806}
807
808/// Predicate for Internalize pass.
809static bool mustPreserveGV(const GlobalValue &GV) {
810 if (const Function *F = dyn_cast<Function>(&GV))
811 return F->isDeclaration() || F->getName().starts_with("__asan_") ||
812 F->getName().starts_with("__sanitizer_") ||
813 AMDGPU::isEntryFunctionCC(F->getCallingConv());
814
816 return !GV.use_empty();
817}
818
823
826 if (Params.empty())
828 Params.consume_front("strategy=");
829 auto Result = StringSwitch<std::optional<ScanOptions>>(Params)
830 .Case("dpp", ScanOptions::DPP)
831 .Cases({"iterative", ""}, ScanOptions::Iterative)
832 .Case("none", ScanOptions::None)
833 .Default(std::nullopt);
834 if (Result)
835 return *Result;
836 return make_error<StringError>("invalid parameter", inconvertibleErrorCode());
837}
838
842 while (!Params.empty()) {
843 StringRef ParamName;
844 std::tie(ParamName, Params) = Params.split(';');
845 if (ParamName == "closed-world") {
846 Result.IsClosedWorld = true;
847 } else {
849 formatv("invalid AMDGPUAttributor pass parameter '{0}' ", ParamName)
850 .str(),
852 }
853 }
854 return Result;
855}
856
858
859#define GET_PASS_REGISTRY "AMDGPUPassRegistry.def"
861
862 PB.registerScalarOptimizerLateEPCallback(
863 [](FunctionPassManager &FPM, OptimizationLevel Level) {
864 if (Level == OptimizationLevel::O0)
865 return;
866
868 });
869
870 PB.registerVectorizerEndEPCallback(
871 [](FunctionPassManager &FPM, OptimizationLevel Level) {
872 if (Level == OptimizationLevel::O0)
873 return;
874
876 });
877
878 PB.registerPipelineEarlySimplificationEPCallback(
881 if (!isLTOPreLink(Phase)) {
882 // When we are not using -fgpu-rdc, we can run accelerator code
883 // selection relatively early, but still after linking to prevent
884 // eager removal of potentially reachable symbols.
885 if (EnableHipStdPar) {
888 }
890 }
891
892 if (Level == OptimizationLevel::O0)
893 return;
894
895 // We don't want to run internalization at per-module stage.
899 }
900
903 });
904
905 PB.registerPeepholeEPCallback(
906 [](FunctionPassManager &FPM, OptimizationLevel Level) {
907 if (Level == OptimizationLevel::O0)
908 return;
909
913
916 });
917
918 PB.registerCGSCCOptimizerLateEPCallback(
919 [this](CGSCCPassManager &PM, OptimizationLevel Level) {
920 if (Level == OptimizationLevel::O0)
921 return;
922
924
925 // Add promote kernel arguments pass to the opt pipeline right before
926 // infer address spaces which is needed to do actual address space
927 // rewriting.
928 if (Level.getSpeedupLevel() > OptimizationLevel::O1.getSpeedupLevel() &&
931
932 // Add infer address spaces pass to the opt pipeline after inlining
933 // but before SROA to increase SROA opportunities.
935
936 // This should run after inlining to have any chance of doing
937 // anything, and before other cleanup optimizations.
939
940 if (Level != OptimizationLevel::O0) {
941 // Promote alloca to vector before SROA and loop unroll. If we
942 // manage to eliminate allocas before unroll we may choose to unroll
943 // less.
945 }
946
947 PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
948 });
949
950 // FIXME: Why is AMDGPUAttributor not in CGSCC?
951 PB.registerOptimizerLastEPCallback([this](ModulePassManager &MPM,
952 OptimizationLevel Level,
954 if (Level != OptimizationLevel::O0) {
955 if (!isLTOPreLink(Phase)) {
956 if (EnableAMDGPUAttributor && getTargetTriple().isAMDGCN()) {
958 MPM.addPass(AMDGPUAttributorPass(*this, Opts, Phase));
959 }
960 }
961 }
962 });
963
964 PB.registerFullLinkTimeOptimizationLastEPCallback(
965 [this](ModulePassManager &PM, OptimizationLevel Level) {
966 // When we are using -fgpu-rdc, we can only run accelerator code
967 // selection after linking to prevent, otherwise we end up removing
968 // potentially reachable symbols that were exported as external in other
969 // modules.
970 if (EnableHipStdPar) {
973 }
974 // We want to support the -lto-partitions=N option as "best effort".
975 // For that, we need to lower LDS earlier in the pipeline before the
976 // module is partitioned for codegen.
980 PM.addPass(AMDGPUSwLowerLDSPass(*this));
983 if (Level != OptimizationLevel::O0) {
984 // We only want to run this with O2 or higher since inliner and SROA
985 // don't run in O1.
986 if (Level != OptimizationLevel::O1) {
987 PM.addPass(
989 }
990 // Do we really need internalization in LTO?
991 if (InternalizeSymbols) {
994 }
995 if (EnableAMDGPUAttributor && getTargetTriple().isAMDGCN()) {
998 Opt.IsClosedWorld = true;
1001 }
1002 }
1003 if (!NoKernelInfoEndLTO) {
1005 FPM.addPass(KernelInfoPrinter(this));
1006 PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
1007 }
1008 });
1009
1010 PB.registerRegClassFilterParsingCallback(
1011 [](StringRef FilterName) -> RegAllocFilterFunc {
1012 if (FilterName == "sgpr")
1013 return onlyAllocateSGPRs;
1014 if (FilterName == "vgpr")
1015 return onlyAllocateVGPRs;
1016 if (FilterName == "wwm")
1017 return onlyAllocateWWMRegs;
1018 return nullptr;
1019 });
1020}
1021
1022int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
1023 return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1024 AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
1025 AddrSpace == AMDGPUAS::REGION_ADDRESS)
1026 ? -1
1027 : 0;
1028}
1029
1031 unsigned DestAS) const {
1032 return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
1034}
1035
1037 if (auto *Arg = dyn_cast<Argument>(V);
1038 Arg &&
1039 AMDGPU::isModuleEntryFunctionCC(Arg->getParent()->getCallingConv()) &&
1040 !Arg->hasByRefAttr())
1042
1043 const auto *LD = dyn_cast<LoadInst>(V);
1044 if (!LD) // TODO: Handle invariant load like constant.
1046
1047 // It must be a generic pointer loaded.
1048 assert(V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS);
1049
1050 const auto *Ptr = LD->getPointerOperand();
1051 if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
1053 // For a generic pointer loaded from the constant memory, it could be assumed
1054 // as a global pointer since the constant memory is only populated on the
1055 // host side. As implied by the offload programming model, only global
1056 // pointers could be referenced on the host side.
1058}
1059
1060std::pair<const Value *, unsigned>
1062 if (auto *II = dyn_cast<IntrinsicInst>(V)) {
1063 switch (II->getIntrinsicID()) {
1064 case Intrinsic::amdgcn_is_shared:
1065 return std::pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
1066 case Intrinsic::amdgcn_is_private:
1067 return std::pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
1068 default:
1069 break;
1070 }
1071 return std::pair(nullptr, -1);
1072 }
1073 // Check the global pointer predication based on
1074 // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and
1075 // the order of 'is_shared' and 'is_private' is not significant.
1076 Value *Ptr;
1077 if (match(
1078 const_cast<Value *>(V),
1081 m_Deferred(Ptr))))))
1082 return std::pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
1083
1084 return std::pair(nullptr, -1);
1085}
1086
1087unsigned
1102
1104 Module &M, unsigned NumParts,
1105 function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback) {
1106 // FIXME(?): Would be better to use an already existing Analysis/PassManager,
1107 // but all current users of this API don't have one ready and would need to
1108 // create one anyway. Let's hide the boilerplate for now to keep it simple.
1109
1114
1115 PassBuilder PB(this);
1116 PB.registerModuleAnalyses(MAM);
1117 PB.registerFunctionAnalyses(FAM);
1118 PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
1119
1121 MPM.addPass(AMDGPUSplitModulePass(NumParts, ModuleCallback));
1122 MPM.run(M, MAM);
1123 return true;
1124}
1125
1126//===----------------------------------------------------------------------===//
1127// GCN Target Machine (SI+)
1128//===----------------------------------------------------------------------===//
1129
1131 StringRef CPU, StringRef FS,
1132 const TargetOptions &Options,
1133 std::optional<Reloc::Model> RM,
1134 std::optional<CodeModel::Model> CM,
1135 CodeGenOptLevel OL, bool JIT)
1136 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
1137
1138const TargetSubtargetInfo *
1140 StringRef GPU = getGPUName(F);
1142
1143 SmallString<128> SubtargetKey(GPU);
1144 SubtargetKey.append(FS);
1145
1146 auto &I = SubtargetMap[SubtargetKey];
1147 if (!I) {
1148 // This needs to be done before we create a new subtarget since any
1149 // creation will depend on the TM and the code generation flags on the
1150 // function that reside in TargetOptions.
1152 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
1153 }
1154
1155 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
1156
1157 return I.get();
1158}
1159
1162 return TargetTransformInfo(std::make_unique<GCNTTIImpl>(this, F));
1163}
1164
1167 CodeGenFileType FileType, const CGPassBuilderOption &Opts,
1169 AMDGPUCodeGenPassBuilder CGPB(*this, Opts, PIC);
1170 return CGPB.buildPipeline(MPM, Out, DwoOut, FileType);
1171}
1172
1175 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1176 if (ST.enableSIScheduler())
1178
1179 Attribute SchedStrategyAttr =
1180 C->MF->getFunction().getFnAttribute("amdgpu-sched-strategy");
1181 StringRef SchedStrategy = SchedStrategyAttr.isValid()
1182 ? SchedStrategyAttr.getValueAsString()
1184
1185 if (SchedStrategy == "max-ilp")
1187
1188 if (SchedStrategy == "max-memory-clause")
1190
1191 if (SchedStrategy == "iterative-ilp")
1193
1194 if (SchedStrategy == "iterative-minreg")
1195 return createMinRegScheduler(C);
1196
1197 if (SchedStrategy == "iterative-maxocc")
1199
1201}
1202
1205 ScheduleDAGMI *DAG =
1206 new GCNPostScheduleDAGMILive(C, std::make_unique<PostGenericScheduler>(C),
1207 /*RemoveKillFlags=*/true);
1208 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1210 if (ST.shouldClusterStores())
1213 if ((EnableVOPD.getNumOccurrences() ||
1215 EnableVOPD)
1219 return DAG;
1220}
1221//===----------------------------------------------------------------------===//
1222// AMDGPU Legacy Pass Setup
1223//===----------------------------------------------------------------------===//
1224
1225std::unique_ptr<CSEConfigBase> llvm::AMDGPUPassConfig::getCSEConfig() const {
1226 return getStandardCSEConfigForOpt(TM->getOptLevel());
1227}
1228
1229namespace {
1230
1231class GCNPassConfig final : public AMDGPUPassConfig {
1232public:
1233 GCNPassConfig(TargetMachine &TM, PassManagerBase &PM)
1234 : AMDGPUPassConfig(TM, PM) {
1235 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
1236 }
1237
1238 GCNTargetMachine &getGCNTargetMachine() const {
1239 return getTM<GCNTargetMachine>();
1240 }
1241
1242 bool addPreISel() override;
1243 void addMachineSSAOptimization() override;
1244 bool addILPOpts() override;
1245 bool addInstSelector() override;
1246 bool addIRTranslator() override;
1247 void addPreLegalizeMachineIR() override;
1248 bool addLegalizeMachineIR() override;
1249 void addPreRegBankSelect() override;
1250 bool addRegBankSelect() override;
1251 void addPreGlobalInstructionSelect() override;
1252 bool addGlobalInstructionSelect() override;
1253 void addPreRegAlloc() override;
1254 void addFastRegAlloc() override;
1255 void addOptimizedRegAlloc() override;
1256
1257 FunctionPass *createSGPRAllocPass(bool Optimized);
1258 FunctionPass *createVGPRAllocPass(bool Optimized);
1259 FunctionPass *createWWMRegAllocPass(bool Optimized);
1260 FunctionPass *createRegAllocPass(bool Optimized) override;
1261
1262 bool addRegAssignAndRewriteFast() override;
1263 bool addRegAssignAndRewriteOptimized() override;
1264
1265 bool addPreRewrite() override;
1266 void addPostRegAlloc() override;
1267 void addPreSched2() override;
1268 void addPreEmitPass() override;
1269 void addPostBBSections() override;
1270};
1271
1272} // end anonymous namespace
1273
1275 : TargetPassConfig(TM, PM) {
1276 // Exceptions and StackMaps are not supported, so these passes will never do
1277 // anything.
1280 // Garbage collection is not supported.
1283}
1284
1291
1296 // ReassociateGEPs exposes more opportunities for SLSR. See
1297 // the example in reassociate-geps-and-slsr.ll.
1299 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
1300 // EarlyCSE can reuse.
1302 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
1304 // NaryReassociate on GEPs creates redundant common expressions, so run
1305 // EarlyCSE after it.
1307}
1308
1311
1312 if (RemoveIncompatibleFunctions && TM.getTargetTriple().isAMDGCN())
1314
1315 // There is no reason to run these.
1319
1321 if (LowerCtorDtor)
1323
1324 if (TM.getTargetTriple().isAMDGCN() &&
1327
1330
1331 // This can be disabled by passing ::Disable here or on the command line
1332 // with --expand-variadics-override=disable.
1334
1335 // Function calls are not supported, so make sure we inline everything.
1338
1339 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
1340 if (TM.getTargetTriple().getArch() == Triple::r600)
1342
1343 // Make enqueued block runtime handles externally visible.
1345
1346 // Lower special LDS accesses.
1349
1350 // Lower LDS accesses to global memory pass if address sanitizer is enabled.
1351 if (EnableSwLowerLDS)
1353
1354 // Runs before PromoteAlloca so the latter can account for function uses
1357 }
1358
1359 // Run atomic optimizer before Atomic Expand
1360 if ((TM.getTargetTriple().isAMDGCN()) &&
1361 (TM.getOptLevel() >= CodeGenOptLevel::Less) &&
1364 }
1365
1367
1368 if (TM.getOptLevel() > CodeGenOptLevel::None) {
1370
1373
1377 AAResults &AAR) {
1378 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
1379 AAR.addAAResult(WrapperPass->getResult());
1380 }));
1381 }
1382
1383 if (TM.getTargetTriple().isAMDGCN()) {
1384 // TODO: May want to move later or split into an early and late one.
1386 }
1387
1388 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
1389 // have expanded.
1390 if (TM.getOptLevel() > CodeGenOptLevel::Less)
1392 }
1393
1395
1396 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1397 // example, GVN can combine
1398 //
1399 // %0 = add %a, %b
1400 // %1 = add %b, %a
1401 //
1402 // and
1403 //
1404 // %0 = shl nsw %a, 2
1405 // %1 = shl %a, 2
1406 //
1407 // but EarlyCSE can do neither of them.
1410}
1411
1413 if (TM->getTargetTriple().isAMDGCN() &&
1414 TM->getOptLevel() > CodeGenOptLevel::None)
1416
1417 if (TM->getTargetTriple().isAMDGCN() && EnableLowerKernelArguments)
1419
1421
1424
1425 if (TM->getTargetTriple().isAMDGCN()) {
1426 // This lowering has been placed after codegenprepare to take advantage of
1427 // address mode matching (which is why it isn't put with the LDS lowerings).
1428 // It could be placed anywhere before uniformity annotations (an analysis
1429 // that it changes by splitting up fat pointers into their components)
1430 // but has been put before switch lowering and CFG flattening so that those
1431 // passes can run on the more optimized control flow this pass creates in
1432 // many cases.
1435 }
1436
1437 // LowerSwitch pass may introduce unreachable blocks that can
1438 // cause unexpected behavior for subsequent passes. Placing it
1439 // here seems better that these blocks would get cleaned up by
1440 // UnreachableBlockElim inserted next in the pass flow.
1442}
1443
1445 if (TM->getOptLevel() > CodeGenOptLevel::None)
1447 return false;
1448}
1449
1454
1456 // Do nothing. GC is not supported.
1457 return false;
1458}
1459
1460//===----------------------------------------------------------------------===//
1461// GCN Legacy Pass Setup
1462//===----------------------------------------------------------------------===//
1463
1464bool GCNPassConfig::addPreISel() {
1466
1467 if (TM->getOptLevel() > CodeGenOptLevel::None)
1468 addPass(createSinkingPass());
1469
1470 if (TM->getOptLevel() > CodeGenOptLevel::None)
1472
1473 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1474 // regions formed by them.
1476 addPass(createFixIrreduciblePass());
1477 addPass(createUnifyLoopExitsPass());
1478 addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
1479
1482 // TODO: Move this right after structurizeCFG to avoid extra divergence
1483 // analysis. This depends on stopping SIAnnotateControlFlow from making
1484 // control flow modifications.
1486
1487 // SDAG requires LCSSA, GlobalISel does not. Disable LCSSA for -global-isel
1488 // with -new-reg-bank-select and without any of the fallback options.
1490 !isGlobalISelAbortEnabled() || !NewRegBankSelect)
1491 addPass(createLCSSAPass());
1492
1493 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1495
1496 return false;
1497}
1498
1499void GCNPassConfig::addMachineSSAOptimization() {
1501
1502 // We want to fold operands after PeepholeOptimizer has run (or as part of
1503 // it), because it will eliminate extra copies making it easier to fold the
1504 // real source operand. We want to eliminate dead instructions after, so that
1505 // we see fewer uses of the copies. We then need to clean up the dead
1506 // instructions leftover after the operands are folded as well.
1507 //
1508 // XXX - Can we get away without running DeadMachineInstructionElim again?
1509 addPass(&SIFoldOperandsLegacyID);
1510 if (EnableDPPCombine)
1511 addPass(&GCNDPPCombineLegacyID);
1513 if (isPassEnabled(EnableSDWAPeephole)) {
1514 addPass(&SIPeepholeSDWALegacyID);
1515 addPass(&EarlyMachineLICMID);
1516 addPass(&MachineCSELegacyID);
1517 addPass(&SIFoldOperandsLegacyID);
1518 }
1521}
1522
1523bool GCNPassConfig::addILPOpts() {
1525 addPass(&EarlyIfConverterLegacyID);
1526
1528 return false;
1529}
1530
1531bool GCNPassConfig::addInstSelector() {
1533 addPass(&SIFixSGPRCopiesLegacyID);
1535 return false;
1536}
1537
1538bool GCNPassConfig::addIRTranslator() {
1539 addPass(new IRTranslator(getOptLevel()));
1540 return false;
1541}
1542
1543void GCNPassConfig::addPreLegalizeMachineIR() {
1544 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1545 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
1546 addPass(new Localizer());
1547}
1548
1549bool GCNPassConfig::addLegalizeMachineIR() {
1550 addPass(new Legalizer());
1551 return false;
1552}
1553
1554void GCNPassConfig::addPreRegBankSelect() {
1555 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1556 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
1558}
1559
1560bool GCNPassConfig::addRegBankSelect() {
1561 if (NewRegBankSelect) {
1564 } else {
1565 addPass(new RegBankSelect());
1566 }
1567 return false;
1568}
1569
1570void GCNPassConfig::addPreGlobalInstructionSelect() {
1571 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1572 addPass(createAMDGPURegBankCombiner(IsOptNone));
1573}
1574
1575bool GCNPassConfig::addGlobalInstructionSelect() {
1576 addPass(new InstructionSelect(getOptLevel()));
1577 return false;
1578}
1579
1580void GCNPassConfig::addFastRegAlloc() {
1581 // FIXME: We have to disable the verifier here because of PHIElimination +
1582 // TwoAddressInstructions disabling it.
1583
1584 // This must be run immediately after phi elimination and before
1585 // TwoAddressInstructions, otherwise the processing of the tied operand of
1586 // SI_ELSE will introduce a copy of the tied operand source after the else.
1588
1590
1592}
1593
1594void GCNPassConfig::addPreRegAlloc() {
1595 if (getOptLevel() != CodeGenOptLevel::None)
1597}
1598
1599void GCNPassConfig::addOptimizedRegAlloc() {
1600 if (EnableDCEInRA)
1602
1603 // FIXME: when an instruction has a Killed operand, and the instruction is
1604 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
1605 // the register in LiveVariables, this would trigger a failure in verifier,
1606 // we should fix it and enable the verifier.
1607 if (OptVGPRLiveRange)
1609
1610 // This must be run immediately after phi elimination and before
1611 // TwoAddressInstructions, otherwise the processing of the tied operand of
1612 // SI_ELSE will introduce a copy of the tied operand source after the else.
1614
1617
1618 if (isPassEnabled(EnablePreRAOptimizations))
1620
1621 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
1622 // instructions that cause scheduling barriers.
1624
1625 if (OptExecMaskPreRA)
1627
1628 // This is not an essential optimization and it has a noticeable impact on
1629 // compilation time, so we only enable it from O2.
1630 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1632
1634}
1635
1636bool GCNPassConfig::addPreRewrite() {
1638 addPass(&GCNNSAReassignID);
1639
1641 return true;
1642}
1643
1644FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) {
1645 // Initialize the global default.
1646 llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag,
1647 initializeDefaultSGPRRegisterAllocatorOnce);
1648
1649 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
1650 if (Ctor != useDefaultRegisterAllocator)
1651 return Ctor();
1652
1653 if (Optimized)
1654 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
1655
1656 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
1657}
1658
1659FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) {
1660 // Initialize the global default.
1661 llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag,
1662 initializeDefaultVGPRRegisterAllocatorOnce);
1663
1664 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
1665 if (Ctor != useDefaultRegisterAllocator)
1666 return Ctor();
1667
1668 if (Optimized)
1669 return createGreedyVGPRRegisterAllocator();
1670
1671 return createFastVGPRRegisterAllocator();
1672}
1673
1674FunctionPass *GCNPassConfig::createWWMRegAllocPass(bool Optimized) {
1675 // Initialize the global default.
1676 llvm::call_once(InitializeDefaultWWMRegisterAllocatorFlag,
1677 initializeDefaultWWMRegisterAllocatorOnce);
1678
1679 RegisterRegAlloc::FunctionPassCtor Ctor = WWMRegisterRegAlloc::getDefault();
1680 if (Ctor != useDefaultRegisterAllocator)
1681 return Ctor();
1682
1683 if (Optimized)
1684 return createGreedyWWMRegisterAllocator();
1685
1686 return createFastWWMRegisterAllocator();
1687}
1688
1689FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) {
1690 llvm_unreachable("should not be used");
1691}
1692
1694 "-regalloc not supported with amdgcn. Use -sgpr-regalloc, -wwm-regalloc, "
1695 "and -vgpr-regalloc";
1696
1697bool GCNPassConfig::addRegAssignAndRewriteFast() {
1698 if (!usingDefaultRegAlloc())
1700
1701 addPass(&GCNPreRALongBranchRegID);
1702
1703 addPass(createSGPRAllocPass(false));
1704
1705 // Equivalent of PEI for SGPRs.
1706 addPass(&SILowerSGPRSpillsLegacyID);
1707
1708 // To Allocate wwm registers used in whole quad mode operations (for shaders).
1710
1711 // For allocating other wwm register operands.
1712 addPass(createWWMRegAllocPass(false));
1713
1714 addPass(&SILowerWWMCopiesLegacyID);
1716
1717 // For allocating per-thread VGPRs.
1718 addPass(createVGPRAllocPass(false));
1719
1720 return true;
1721}
1722
1723bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1724 if (!usingDefaultRegAlloc())
1726
1727 addPass(&GCNPreRALongBranchRegID);
1728
1729 addPass(createSGPRAllocPass(true));
1730
1731 // Commit allocated register changes. This is mostly necessary because too
1732 // many things rely on the use lists of the physical registers, such as the
1733 // verifier. This is only necessary with allocators which use LiveIntervals,
1734 // since FastRegAlloc does the replacements itself.
1735 addPass(createVirtRegRewriter(false));
1736
1737 // At this point, the sgpr-regalloc has been done and it is good to have the
1738 // stack slot coloring to try to optimize the SGPR spill stack indices before
1739 // attempting the custom SGPR spill lowering.
1740 addPass(&StackSlotColoringID);
1741
1742 // Equivalent of PEI for SGPRs.
1743 addPass(&SILowerSGPRSpillsLegacyID);
1744
1745 // To Allocate wwm registers used in whole quad mode operations (for shaders).
1747
1748 // For allocating other whole wave mode registers.
1749 addPass(createWWMRegAllocPass(true));
1750 addPass(&SILowerWWMCopiesLegacyID);
1751 addPass(createVirtRegRewriter(false));
1753
1754 // For allocating per-thread VGPRs.
1755 addPass(createVGPRAllocPass(true));
1756
1757 addPreRewrite();
1758 addPass(&VirtRegRewriterID);
1759
1761
1762 return true;
1763}
1764
1765void GCNPassConfig::addPostRegAlloc() {
1766 addPass(&SIFixVGPRCopiesID);
1767 if (getOptLevel() > CodeGenOptLevel::None)
1770}
1771
1772void GCNPassConfig::addPreSched2() {
1773 if (TM->getOptLevel() > CodeGenOptLevel::None)
1775 addPass(&SIPostRABundlerLegacyID);
1776}
1777
1778void GCNPassConfig::addPreEmitPass() {
1779 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
1780 addPass(&GCNCreateVOPDID);
1781 addPass(createSIMemoryLegalizerPass());
1782 addPass(createSIInsertWaitcntsPass());
1783
1784 addPass(createSIModeRegisterPass());
1785
1786 if (getOptLevel() > CodeGenOptLevel::None)
1787 addPass(&SIInsertHardClausesID);
1788
1790 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
1792 if (getOptLevel() > CodeGenOptLevel::None)
1793 addPass(&SIPreEmitPeepholeID);
1794 // The hazard recognizer that runs as part of the post-ra scheduler does not
1795 // guarantee to be able handle all hazards correctly. This is because if there
1796 // are multiple scheduling regions in a basic block, the regions are scheduled
1797 // bottom up, so when we begin to schedule a region we don't know what
1798 // instructions were emitted directly before it.
1799 //
1800 // Here we add a stand-alone hazard recognizer pass which can handle all
1801 // cases.
1802 addPass(&PostRAHazardRecognizerID);
1803
1805
1807
1808 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less))
1809 addPass(&AMDGPUInsertDelayAluID);
1810
1811 addPass(&BranchRelaxationPassID);
1812}
1813
1814void GCNPassConfig::addPostBBSections() {
1815 // We run this later to avoid passes like livedebugvalues and BBSections
1816 // having to deal with the apparent multi-entry functions we may generate.
1818}
1819
1821 return new GCNPassConfig(*this, PM);
1822}
1823
1829
1836
1840
1847
1850 SMDiagnostic &Error, SMRange &SourceRange) const {
1851 const yaml::SIMachineFunctionInfo &YamlMFI =
1852 static_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1853 MachineFunction &MF = PFS.MF;
1855 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1856
1857 if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange))
1858 return true;
1859
1860 if (MFI->Occupancy == 0) {
1861 // Fixup the subtarget dependent default value.
1862 MFI->Occupancy = ST.getOccupancyWithWorkGroupSizes(MF).second;
1863 }
1864
1865 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1866 Register TempReg;
1867 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1868 SourceRange = RegName.SourceRange;
1869 return true;
1870 }
1871 RegVal = TempReg;
1872
1873 return false;
1874 };
1875
1876 auto parseOptionalRegister = [&](const yaml::StringValue &RegName,
1877 Register &RegVal) {
1878 return !RegName.Value.empty() && parseRegister(RegName, RegVal);
1879 };
1880
1881 if (parseOptionalRegister(YamlMFI.VGPRForAGPRCopy, MFI->VGPRForAGPRCopy))
1882 return true;
1883
1884 if (parseOptionalRegister(YamlMFI.SGPRForEXECCopy, MFI->SGPRForEXECCopy))
1885 return true;
1886
1887 if (parseOptionalRegister(YamlMFI.LongBranchReservedReg,
1888 MFI->LongBranchReservedReg))
1889 return true;
1890
1891 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1892 // Create a diagnostic for a the register string literal.
1893 const MemoryBuffer &Buffer =
1894 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1895 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1896 RegName.Value.size(), SourceMgr::DK_Error,
1897 "incorrect register class for field", RegName.Value,
1898 {}, {});
1899 SourceRange = RegName.SourceRange;
1900 return true;
1901 };
1902
1903 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1904 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1905 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1906 return true;
1907
1908 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1909 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1910 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1911 }
1912
1913 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1914 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1915 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1916 }
1917
1918 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1919 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1920 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1921 }
1922
1923 for (const auto &YamlReg : YamlMFI.WWMReservedRegs) {
1924 Register ParsedReg;
1925 if (parseRegister(YamlReg, ParsedReg))
1926 return true;
1927
1928 MFI->reserveWWMRegister(ParsedReg);
1929 }
1930
1931 for (const auto &[_, Info] : PFS.VRegInfosNamed) {
1932 MFI->setFlag(Info->VReg, Info->Flags);
1933 }
1934 for (const auto &[_, Info] : PFS.VRegInfos) {
1935 MFI->setFlag(Info->VReg, Info->Flags);
1936 }
1937
1938 for (const auto &YamlRegStr : YamlMFI.SpillPhysVGPRS) {
1939 Register ParsedReg;
1940 if (parseRegister(YamlRegStr, ParsedReg))
1941 return true;
1942 MFI->SpillPhysVGPRs.push_back(ParsedReg);
1943 }
1944
1945 auto parseAndCheckArgument = [&](const std::optional<yaml::SIArgument> &A,
1946 const TargetRegisterClass &RC,
1947 ArgDescriptor &Arg, unsigned UserSGPRs,
1948 unsigned SystemSGPRs) {
1949 // Skip parsing if it's not present.
1950 if (!A)
1951 return false;
1952
1953 if (A->IsRegister) {
1954 Register Reg;
1955 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1956 SourceRange = A->RegisterName.SourceRange;
1957 return true;
1958 }
1959 if (!RC.contains(Reg))
1960 return diagnoseRegisterClass(A->RegisterName);
1962 } else
1963 Arg = ArgDescriptor::createStack(A->StackOffset);
1964 // Check and apply the optional mask.
1965 if (A->Mask)
1966 Arg = ArgDescriptor::createArg(Arg, *A->Mask);
1967
1968 MFI->NumUserSGPRs += UserSGPRs;
1969 MFI->NumSystemSGPRs += SystemSGPRs;
1970 return false;
1971 };
1972
1973 if (YamlMFI.ArgInfo &&
1974 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1975 AMDGPU::SGPR_128RegClass,
1976 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1977 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1978 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1979 2, 0) ||
1980 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1981 MFI->ArgInfo.QueuePtr, 2, 0) ||
1982 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1983 AMDGPU::SReg_64RegClass,
1984 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1985 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1986 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1987 2, 0) ||
1988 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1989 AMDGPU::SReg_64RegClass,
1990 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1991 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1992 AMDGPU::SGPR_32RegClass,
1993 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1994 parseAndCheckArgument(YamlMFI.ArgInfo->LDSKernelId,
1995 AMDGPU::SGPR_32RegClass,
1996 MFI->ArgInfo.LDSKernelId, 0, 1) ||
1997 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1998 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1999 0, 1) ||
2000 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
2001 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
2002 0, 1) ||
2003 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
2004 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
2005 0, 1) ||
2006 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
2007 AMDGPU::SGPR_32RegClass,
2008 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
2009 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
2010 AMDGPU::SGPR_32RegClass,
2011 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
2012 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
2013 AMDGPU::SReg_64RegClass,
2014 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
2015 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
2016 AMDGPU::SReg_64RegClass,
2017 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
2018 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
2019 AMDGPU::VGPR_32RegClass,
2020 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
2021 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
2022 AMDGPU::VGPR_32RegClass,
2023 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
2024 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
2025 AMDGPU::VGPR_32RegClass,
2026 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
2027 return true;
2028
2029 // Parse FirstKernArgPreloadReg separately, since it's a Register,
2030 // not ArgDescriptor.
2031 if (YamlMFI.ArgInfo && YamlMFI.ArgInfo->FirstKernArgPreloadReg) {
2032 const yaml::SIArgument &A = *YamlMFI.ArgInfo->FirstKernArgPreloadReg;
2033
2034 if (!A.IsRegister) {
2035 // For stack arguments, we don't have RegisterName.SourceRange,
2036 // but we should have some location info from the YAML parser
2037 const MemoryBuffer &Buffer =
2038 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
2039 // Create a minimal valid source range
2041 SMRange Range(Loc, Loc);
2042
2044 *PFS.SM, Loc, Buffer.getBufferIdentifier(), 1, 0, SourceMgr::DK_Error,
2045 "firstKernArgPreloadReg must be a register, not a stack location", "",
2046 {}, {});
2047
2048 SourceRange = Range;
2049 return true;
2050 }
2051
2052 Register Reg;
2053 if (parseNamedRegisterReference(PFS, Reg, A.RegisterName.Value, Error)) {
2054 SourceRange = A.RegisterName.SourceRange;
2055 return true;
2056 }
2057
2058 if (!AMDGPU::SGPR_32RegClass.contains(Reg))
2059 return diagnoseRegisterClass(A.RegisterName);
2060
2061 MFI->ArgInfo.FirstKernArgPreloadReg = Reg;
2062 MFI->NumUserSGPRs += YamlMFI.NumKernargPreloadSGPRs;
2063 }
2064
2065 if (ST.hasIEEEMode())
2066 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
2067 if (ST.hasDX10ClampMode())
2068 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
2069
2070 // FIXME: Move proper support for denormal-fp-math into base MachineFunction
2071 MFI->Mode.FP32Denormals.Input = YamlMFI.Mode.FP32InputDenormals
2074 MFI->Mode.FP32Denormals.Output = YamlMFI.Mode.FP32OutputDenormals
2077
2084
2085 if (YamlMFI.HasInitWholeWave)
2086 MFI->setInitWholeWave();
2087
2088 return false;
2089}
2090
2091//===----------------------------------------------------------------------===//
2092// AMDGPU CodeGen Pass Builder interface.
2093//===----------------------------------------------------------------------===//
2094
2095AMDGPUCodeGenPassBuilder::AMDGPUCodeGenPassBuilder(
2096 GCNTargetMachine &TM, const CGPassBuilderOption &Opts,
2098 : CodeGenPassBuilder(TM, Opts, PIC) {
2099 Opt.MISchedPostRA = true;
2100 Opt.RequiresCodeGenSCCOrder = true;
2101 // Exceptions and StackMaps are not supported, so these passes will never do
2102 // anything.
2103 // Garbage collection is not supported.
2104 disablePass<StackMapLivenessPass, FuncletLayoutPass, PatchableFunctionPass,
2106}
2107
2108void AMDGPUCodeGenPassBuilder::addIRPasses(PassManagerWrapper &PMW) const {
2109 if (RemoveIncompatibleFunctions && TM.getTargetTriple().isAMDGCN()) {
2110 flushFPMsToMPM(PMW);
2111 addModulePass(AMDGPURemoveIncompatibleFunctionsPass(TM), PMW);
2112 }
2113
2114 flushFPMsToMPM(PMW);
2115 addModulePass(AMDGPUPrintfRuntimeBindingPass(), PMW);
2116 if (LowerCtorDtor)
2117 addModulePass(AMDGPUCtorDtorLoweringPass(), PMW);
2118
2119 if (isPassEnabled(EnableImageIntrinsicOptimizer))
2120 addFunctionPass(AMDGPUImageIntrinsicOptimizerPass(TM), PMW);
2121
2123 addFunctionPass(AMDGPUUniformIntrinsicCombinePass(), PMW);
2124 // This can be disabled by passing ::Disable here or on the command line
2125 // with --expand-variadics-override=disable.
2126 flushFPMsToMPM(PMW);
2128
2129 addModulePass(AMDGPUAlwaysInlinePass(), PMW);
2130 addModulePass(AlwaysInlinerPass(), PMW);
2131
2132 addModulePass(AMDGPUExportKernelRuntimeHandlesPass(), PMW);
2133
2135 addModulePass(AMDGPULowerExecSyncPass(), PMW);
2136
2137 if (EnableSwLowerLDS)
2138 addModulePass(AMDGPUSwLowerLDSPass(TM), PMW);
2139
2140 // Runs before PromoteAlloca so the latter can account for function uses
2142 addModulePass(AMDGPULowerModuleLDSPass(TM), PMW);
2143
2144 // Run atomic optimizer before Atomic Expand
2145 if (TM.getOptLevel() >= CodeGenOptLevel::Less &&
2147 addFunctionPass(
2149
2150 addFunctionPass(AtomicExpandPass(TM), PMW);
2151
2152 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2153 addFunctionPass(AMDGPUPromoteAllocaPass(TM), PMW);
2154 if (isPassEnabled(EnableScalarIRPasses))
2155 addStraightLineScalarOptimizationPasses(PMW);
2156
2157 // TODO: Handle EnableAMDGPUAliasAnalysis
2158
2159 // TODO: May want to move later or split into an early and late one.
2160 addFunctionPass(AMDGPUCodeGenPreparePass(TM), PMW);
2161
2162 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
2163 // have expanded.
2164 if (TM.getOptLevel() > CodeGenOptLevel::Less) {
2166 /*UseMemorySSA=*/true),
2167 PMW);
2168 }
2169 }
2170
2171 Base::addIRPasses(PMW);
2172
2173 // EarlyCSE is not always strong enough to clean up what LSR produces. For
2174 // example, GVN can combine
2175 //
2176 // %0 = add %a, %b
2177 // %1 = add %b, %a
2178 //
2179 // and
2180 //
2181 // %0 = shl nsw %a, 2
2182 // %1 = shl %a, 2
2183 //
2184 // but EarlyCSE can do neither of them.
2185 if (isPassEnabled(EnableScalarIRPasses))
2186 addEarlyCSEOrGVNPass(PMW);
2187}
2188
2189void AMDGPUCodeGenPassBuilder::addCodeGenPrepare(
2190 PassManagerWrapper &PMW) const {
2191 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2192 flushFPMsToMPM(PMW);
2193 addModulePass(AMDGPUPreloadKernelArgumentsPass(TM), PMW);
2194 }
2195
2197 addFunctionPass(AMDGPULowerKernelArgumentsPass(TM), PMW);
2198
2199 Base::addCodeGenPrepare(PMW);
2200
2201 if (isPassEnabled(EnableLoadStoreVectorizer))
2202 addFunctionPass(LoadStoreVectorizerPass(), PMW);
2203
2204 // This lowering has been placed after codegenprepare to take advantage of
2205 // address mode matching (which is why it isn't put with the LDS lowerings).
2206 // It could be placed anywhere before uniformity annotations (an analysis
2207 // that it changes by splitting up fat pointers into their components)
2208 // but has been put before switch lowering and CFG flattening so that those
2209 // passes can run on the more optimized control flow this pass creates in
2210 // many cases.
2211 flushFPMsToMPM(PMW);
2212 addModulePass(AMDGPULowerBufferFatPointersPass(TM), PMW);
2213 flushFPMsToMPM(PMW);
2214 requireCGSCCOrder(PMW);
2215
2216 addModulePass(AMDGPULowerIntrinsicsPass(TM), PMW);
2217
2218 // LowerSwitch pass may introduce unreachable blocks that can cause unexpected
2219 // behavior for subsequent passes. Placing it here seems better that these
2220 // blocks would get cleaned up by UnreachableBlockElim inserted next in the
2221 // pass flow.
2222 addFunctionPass(LowerSwitchPass(), PMW);
2223}
2224
2225void AMDGPUCodeGenPassBuilder::addPreISel(PassManagerWrapper &PMW) const {
2226
2227 // Require AMDGPUArgumentUsageAnalysis so that it's available during ISel.
2228 flushFPMsToMPM(PMW);
2230 PMW);
2231
2232 if (TM.getOptLevel() > CodeGenOptLevel::None) {
2233 addFunctionPass(FlattenCFGPass(), PMW);
2234 addFunctionPass(SinkingPass(), PMW);
2235 addFunctionPass(AMDGPULateCodeGenPreparePass(TM), PMW);
2236 }
2237
2238 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
2239 // regions formed by them.
2240
2241 addFunctionPass(AMDGPUUnifyDivergentExitNodesPass(), PMW);
2242 addFunctionPass(FixIrreduciblePass(), PMW);
2243 addFunctionPass(UnifyLoopExitsPass(), PMW);
2244 addFunctionPass(StructurizeCFGPass(/*SkipUniformRegions=*/false), PMW);
2245
2246 addFunctionPass(AMDGPUAnnotateUniformValuesPass(), PMW);
2247
2248 addFunctionPass(SIAnnotateControlFlowPass(TM), PMW);
2249
2250 // TODO: Move this right after structurizeCFG to avoid extra divergence
2251 // analysis. This depends on stopping SIAnnotateControlFlow from making
2252 // control flow modifications.
2253 addFunctionPass(AMDGPURewriteUndefForPHIPass(), PMW);
2254
2256 !isGlobalISelAbortEnabled() || !NewRegBankSelect)
2257 addFunctionPass(LCSSAPass(), PMW);
2258
2259 if (TM.getOptLevel() > CodeGenOptLevel::Less) {
2260 flushFPMsToMPM(PMW);
2261 addModulePass(AMDGPUPerfHintAnalysisPass(TM), PMW);
2262 }
2263
2264 // FIXME: Why isn't this queried as required from AMDGPUISelDAGToDAG, and why
2265 // isn't this in addInstSelector?
2267 /*Force=*/true);
2268}
2269
2270void AMDGPUCodeGenPassBuilder::addILPOpts(PassManagerWrapper &PMW) const {
2272 addMachineFunctionPass(EarlyIfConverterPass(), PMW);
2273
2274 Base::addILPOpts(PMW);
2275}
2276
2277void AMDGPUCodeGenPassBuilder::addAsmPrinter(PassManagerWrapper &PMW,
2278 CreateMCStreamer) const {
2279 // TODO: Add AsmPrinter.
2280}
2281
2282Error AMDGPUCodeGenPassBuilder::addInstSelector(PassManagerWrapper &PMW) const {
2283 addMachineFunctionPass(AMDGPUISelDAGToDAGPass(TM), PMW);
2284 addMachineFunctionPass(SIFixSGPRCopiesPass(), PMW);
2285 addMachineFunctionPass(SILowerI1CopiesPass(), PMW);
2286 return Error::success();
2287}
2288
2289void AMDGPUCodeGenPassBuilder::addPreRewrite(PassManagerWrapper &PMW) const {
2290 if (EnableRegReassign) {
2291 addMachineFunctionPass(GCNNSAReassignPass(), PMW);
2292 }
2293
2294 addMachineFunctionPass(AMDGPURewriteAGPRCopyMFMAPass(), PMW);
2295}
2296
2297void AMDGPUCodeGenPassBuilder::addMachineSSAOptimization(
2298 PassManagerWrapper &PMW) const {
2299 Base::addMachineSSAOptimization(PMW);
2300
2301 addMachineFunctionPass(SIFoldOperandsPass(), PMW);
2302 if (EnableDPPCombine) {
2303 addMachineFunctionPass(GCNDPPCombinePass(), PMW);
2304 }
2305 addMachineFunctionPass(SILoadStoreOptimizerPass(), PMW);
2306 if (isPassEnabled(EnableSDWAPeephole)) {
2307 addMachineFunctionPass(SIPeepholeSDWAPass(), PMW);
2308 addMachineFunctionPass(EarlyMachineLICMPass(), PMW);
2309 addMachineFunctionPass(MachineCSEPass(), PMW);
2310 addMachineFunctionPass(SIFoldOperandsPass(), PMW);
2311 }
2312 addMachineFunctionPass(DeadMachineInstructionElimPass(), PMW);
2313 addMachineFunctionPass(SIShrinkInstructionsPass(), PMW);
2314}
2315
2316Error AMDGPUCodeGenPassBuilder::addFastRegAlloc(PassManagerWrapper &PMW) const {
2317 insertPass<PHIEliminationPass>(SILowerControlFlowPass());
2318
2319 insertPass<TwoAddressInstructionPass>(SIWholeQuadModePass());
2320
2321 return Base::addFastRegAlloc(PMW);
2322}
2323
2324Error AMDGPUCodeGenPassBuilder::addRegAssignmentFast(
2325 PassManagerWrapper &PMW) const {
2326 // TODO: handle default regalloc override error (with regalloc-npm)
2327
2328 addMachineFunctionPass(GCNPreRALongBranchRegPass(), PMW);
2329
2330 addMachineFunctionPass(RegAllocFastPass({onlyAllocateSGPRs, "sgpr", false}),
2331 PMW);
2332
2333 // Equivalent of PEI for SGPRs.
2334 addMachineFunctionPass(SILowerSGPRSpillsPass(), PMW);
2335
2336 // To Allocate wwm registers used in whole quad mode operations (for shaders).
2337 addMachineFunctionPass(SIPreAllocateWWMRegsPass(), PMW);
2338
2339 // For allocating other wwm register operands.
2340 addMachineFunctionPass(RegAllocFastPass({onlyAllocateWWMRegs, "wwm", false}),
2341 PMW);
2342
2343 addMachineFunctionPass(SILowerWWMCopiesPass(), PMW);
2344 addMachineFunctionPass(AMDGPUReserveWWMRegsPass(), PMW);
2345
2346 // For allocating per-thread VGPRs.
2347 addMachineFunctionPass(RegAllocFastPass({onlyAllocateVGPRs, "vgpr"}), PMW);
2348
2349 return Error::success();
2350}
2351
2352void AMDGPUCodeGenPassBuilder::addOptimizedRegAlloc(
2353 PassManagerWrapper &PMW) const {
2354 if (EnableDCEInRA)
2355 insertPass<DetectDeadLanesPass>(DeadMachineInstructionElimPass());
2356
2357 // FIXME: when an instruction has a Killed operand, and the instruction is
2358 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
2359 // the register in LiveVariables, this would trigger a failure in verifier,
2360 // we should fix it and enable the verifier.
2361 if (OptVGPRLiveRange)
2362 insertPass<RequireAnalysisPass<LiveVariablesAnalysis, MachineFunction>>(
2364
2365 // This must be run immediately after phi elimination and before
2366 // TwoAddressInstructions, otherwise the processing of the tied operand of
2367 // SI_ELSE will introduce a copy of the tied operand source after the else.
2368 insertPass<PHIEliminationPass>(SILowerControlFlowPass());
2369
2371 insertPass<RenameIndependentSubregsPass>(GCNRewritePartialRegUsesPass());
2372
2373 if (isPassEnabled(EnablePreRAOptimizations))
2374 insertPass<MachineSchedulerPass>(GCNPreRAOptimizationsPass());
2375
2376 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
2377 // instructions that cause scheduling barriers.
2378 insertPass<MachineSchedulerPass>(SIWholeQuadModePass());
2379
2380 if (OptExecMaskPreRA)
2381 insertPass<MachineSchedulerPass>(SIOptimizeExecMaskingPreRAPass());
2382
2383 // This is not an essential optimization and it has a noticeable impact on
2384 // compilation time, so we only enable it from O2.
2385 if (TM.getOptLevel() > CodeGenOptLevel::Less)
2386 insertPass<MachineSchedulerPass>(SIFormMemoryClausesPass());
2387
2388 Base::addOptimizedRegAlloc(PMW);
2389}
2390
2391void AMDGPUCodeGenPassBuilder::addPreRegAlloc(PassManagerWrapper &PMW) const {
2392 if (getOptLevel() != CodeGenOptLevel::None)
2393 addMachineFunctionPass(AMDGPUPrepareAGPRAllocPass(), PMW);
2394}
2395
2396Error AMDGPUCodeGenPassBuilder::addRegAssignmentOptimized(
2397 PassManagerWrapper &PMW) const {
2398 // TODO: Check --regalloc-npm option
2399
2400 addMachineFunctionPass(GCNPreRALongBranchRegPass(), PMW);
2401
2402 addMachineFunctionPass(RAGreedyPass({onlyAllocateSGPRs, "sgpr"}), PMW);
2403
2404 // Commit allocated register changes. This is mostly necessary because too
2405 // many things rely on the use lists of the physical registers, such as the
2406 // verifier. This is only necessary with allocators which use LiveIntervals,
2407 // since FastRegAlloc does the replacements itself.
2408 addMachineFunctionPass(VirtRegRewriterPass(false), PMW);
2409
2410 // At this point, the sgpr-regalloc has been done and it is good to have the
2411 // stack slot coloring to try to optimize the SGPR spill stack indices before
2412 // attempting the custom SGPR spill lowering.
2413 addMachineFunctionPass(StackSlotColoringPass(), PMW);
2414
2415 // Equivalent of PEI for SGPRs.
2416 addMachineFunctionPass(SILowerSGPRSpillsPass(), PMW);
2417
2418 // To Allocate wwm registers used in whole quad mode operations (for shaders).
2419 addMachineFunctionPass(SIPreAllocateWWMRegsPass(), PMW);
2420
2421 // For allocating other wwm register operands.
2422 addMachineFunctionPass(RAGreedyPass({onlyAllocateWWMRegs, "wwm"}), PMW);
2423 addMachineFunctionPass(SILowerWWMCopiesPass(), PMW);
2424 addMachineFunctionPass(VirtRegRewriterPass(false), PMW);
2425 addMachineFunctionPass(AMDGPUReserveWWMRegsPass(), PMW);
2426
2427 // For allocating per-thread VGPRs.
2428 addMachineFunctionPass(RAGreedyPass({onlyAllocateVGPRs, "vgpr"}), PMW);
2429
2430 addPreRewrite(PMW);
2431 addMachineFunctionPass(VirtRegRewriterPass(true), PMW);
2432
2433 addMachineFunctionPass(AMDGPUMarkLastScratchLoadPass(), PMW);
2434 return Error::success();
2435}
2436
2437void AMDGPUCodeGenPassBuilder::addPostRegAlloc(PassManagerWrapper &PMW) const {
2438 addMachineFunctionPass(SIFixVGPRCopiesPass(), PMW);
2439 if (TM.getOptLevel() > CodeGenOptLevel::None)
2440 addMachineFunctionPass(SIOptimizeExecMaskingPass(), PMW);
2441 Base::addPostRegAlloc(PMW);
2442}
2443
2444void AMDGPUCodeGenPassBuilder::addPreSched2(PassManagerWrapper &PMW) const {
2445 if (TM.getOptLevel() > CodeGenOptLevel::None)
2446 addMachineFunctionPass(SIShrinkInstructionsPass(), PMW);
2447 addMachineFunctionPass(SIPostRABundlerPass(), PMW);
2448}
2449
2450void AMDGPUCodeGenPassBuilder::addPostBBSections(
2451 PassManagerWrapper &PMW) const {
2452 // We run this later to avoid passes like livedebugvalues and BBSections
2453 // having to deal with the apparent multi-entry functions we may generate.
2454 addMachineFunctionPass(AMDGPUPreloadKernArgPrologPass(), PMW);
2455}
2456
2457void AMDGPUCodeGenPassBuilder::addPreEmitPass(PassManagerWrapper &PMW) const {
2458 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less)) {
2459 addMachineFunctionPass(GCNCreateVOPDPass(), PMW);
2460 }
2461
2462 addMachineFunctionPass(SIMemoryLegalizerPass(), PMW);
2463 addMachineFunctionPass(SIInsertWaitcntsPass(), PMW);
2464
2465 addMachineFunctionPass(SIModeRegisterPass(), PMW);
2466
2467 if (TM.getOptLevel() > CodeGenOptLevel::None)
2468 addMachineFunctionPass(SIInsertHardClausesPass(), PMW);
2469
2470 addMachineFunctionPass(SILateBranchLoweringPass(), PMW);
2471
2472 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
2473 addMachineFunctionPass(AMDGPUSetWavePriorityPass(), PMW);
2474
2475 if (TM.getOptLevel() > CodeGenOptLevel::None)
2476 addMachineFunctionPass(SIPreEmitPeepholePass(), PMW);
2477
2478 // The hazard recognizer that runs as part of the post-ra scheduler does not
2479 // guarantee to be able handle all hazards correctly. This is because if there
2480 // are multiple scheduling regions in a basic block, the regions are scheduled
2481 // bottom up, so when we begin to schedule a region we don't know what
2482 // instructions were emitted directly before it.
2483 //
2484 // Here we add a stand-alone hazard recognizer pass which can handle all
2485 // cases.
2486 addMachineFunctionPass(PostRAHazardRecognizerPass(), PMW);
2487 addMachineFunctionPass(AMDGPUWaitSGPRHazardsPass(), PMW);
2488 addMachineFunctionPass(AMDGPULowerVGPREncodingPass(), PMW);
2489
2490 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less)) {
2491 addMachineFunctionPass(AMDGPUInsertDelayAluPass(), PMW);
2492 }
2493
2494 addMachineFunctionPass(BranchRelaxationPass(), PMW);
2495}
2496
2497bool AMDGPUCodeGenPassBuilder::isPassEnabled(const cl::opt<bool> &Opt,
2498 CodeGenOptLevel Level) const {
2499 if (Opt.getNumOccurrences())
2500 return Opt;
2501 if (TM.getOptLevel() < Level)
2502 return false;
2503 return Opt;
2504}
2505
2506void AMDGPUCodeGenPassBuilder::addEarlyCSEOrGVNPass(
2507 PassManagerWrapper &PMW) const {
2508 if (TM.getOptLevel() == CodeGenOptLevel::Aggressive)
2509 addFunctionPass(GVNPass(), PMW);
2510 else
2511 addFunctionPass(EarlyCSEPass(), PMW);
2512}
2513
2514void AMDGPUCodeGenPassBuilder::addStraightLineScalarOptimizationPasses(
2515 PassManagerWrapper &PMW) const {
2517 addFunctionPass(LoopDataPrefetchPass(), PMW);
2518
2519 addFunctionPass(SeparateConstOffsetFromGEPPass(), PMW);
2520
2521 // ReassociateGEPs exposes more opportunities for SLSR. See
2522 // the example in reassociate-geps-and-slsr.ll.
2523 addFunctionPass(StraightLineStrengthReducePass(), PMW);
2524
2525 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
2526 // EarlyCSE can reuse.
2527 addEarlyCSEOrGVNPass(PMW);
2528
2529 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
2530 addFunctionPass(NaryReassociatePass(), PMW);
2531
2532 // NaryReassociate on GEPs creates redundant common expressions, so run
2533 // EarlyCSE after it.
2534 addFunctionPass(EarlyCSEPass(), PMW);
2535}
unsigned const MachineRegisterInfo * MRI
aarch64 falkor hwpf fix Falkor HW Prefetch Fix Late Phase
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))
static std::unique_ptr< TargetLoweringObjectFile > createTLOF(const Triple &TT)
This is the AMGPU address space based alias analysis pass.
Defines an instruction selector for the AMDGPU target.
Analyzes if a function potentially memory bound and if a kernel kernel may benefit from limiting numb...
Analyzes how many registers and other resources are used by functions.
static cl::opt< bool > EnableDCEInRA("amdgpu-dce-in-ra", cl::init(true), cl::Hidden, cl::desc("Enable machine DCE inside regalloc"))
static cl::opt< bool, true > EnableLowerModuleLDS("amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"), cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxMemoryClauseSchedRegistry("gcn-max-memory-clause", "Run GCN scheduler to maximize memory clause", createGCNMaxMemoryClauseMachineScheduler)
static Reloc::Model getEffectiveRelocModel()
static cl::opt< bool > EnableUniformIntrinsicCombine("amdgpu-enable-uniform-intrinsic-combine", cl::desc("Enable/Disable the Uniform Intrinsic Combine Pass"), cl::init(true), cl::Hidden)
static MachineSchedRegistry SISchedRegistry("si", "Run SI's custom scheduler", createSIMachineScheduler)
static ScheduleDAGInstrs * createIterativeILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EarlyInlineAll("amdgpu-early-inline-all", cl::desc("Inline all functions early"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableSwLowerLDS("amdgpu-enable-sw-lower-lds", cl::desc("Enable lowering of lds to global memory pass " "and asan instrument resulting IR."), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLowerKernelArguments("amdgpu-ir-lower-kernel-arguments", cl::desc("Lower kernel argument loads in IR pass"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSDWAPeephole("amdgpu-sdwa-peephole", cl::desc("Enable SDWA peepholer"), cl::init(true))
static MachineSchedRegistry GCNMinRegSchedRegistry("gcn-iterative-minreg", "Run GCN iterative scheduler for minimal register usage (experimental)", createMinRegScheduler)
static cl::opt< bool > EnableImageIntrinsicOptimizer("amdgpu-enable-image-intrinsic-optimizer", cl::desc("Enable image intrinsic optimizer pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > HasClosedWorldAssumption("amdgpu-link-time-closed-world", cl::desc("Whether has closed-world assumption at link time"), cl::init(false), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxMemoryClauseMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSIModeRegisterPass("amdgpu-mode-register", cl::desc("Enable mode register pass"), cl::init(true), cl::Hidden)
static cl::opt< std::string > AMDGPUSchedStrategy("amdgpu-sched-strategy", cl::desc("Select custom AMDGPU scheduling strategy."), cl::Hidden, cl::init(""))
static cl::opt< bool > EnableDPPCombine("amdgpu-dpp-combine", cl::desc("Enable DPP combiner"), cl::init(true))
static MachineSchedRegistry IterativeGCNMaxOccupancySchedRegistry("gcn-iterative-max-occupancy-experimental", "Run GCN scheduler to maximize occupancy (experimental)", createIterativeGCNMaxOccupancyMachineScheduler)
static cl::opt< bool > EnableSetWavePriority("amdgpu-set-wave-priority", cl::desc("Adjust wave priority"), cl::init(false), cl::Hidden)
static cl::opt< bool > LowerCtorDtor("amdgpu-lower-global-ctor-dtor", cl::desc("Lower GPU ctor / dtors to globals on the device."), cl::init(true), cl::Hidden)
static cl::opt< bool > OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, cl::desc("Run pre-RA exec mask optimizations"), cl::init(true))
static cl::opt< bool > EnablePromoteKernelArguments("amdgpu-enable-promote-kernel-arguments", cl::desc("Enable promotion of flat kernel pointer arguments to global"), cl::Hidden, cl::init(true))
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget()
static cl::opt< bool > EnableRewritePartialRegUses("amdgpu-enable-rewrite-partial-reg-uses", cl::desc("Enable rewrite partial reg uses pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLibCallSimplify("amdgpu-simplify-libcall", cl::desc("Enable amdgpu library simplifications"), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp", createGCNMaxILPMachineScheduler)
static cl::opt< bool > InternalizeSymbols("amdgpu-internalize-symbols", cl::desc("Enable elimination of non-kernel functions and unused globals"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableAMDGPUAttributor("amdgpu-attributor-enable", cl::desc("Enable AMDGPUAttributorPass"), cl::init(true), cl::Hidden)
static LLVM_READNONE StringRef getGPUOrDefault(const Triple &TT, StringRef GPU)
Expected< AMDGPUAttributorOptions > parseAMDGPUAttributorPassOptions(StringRef Params)
static cl::opt< bool > EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, cl::desc("Enable AMDGPU Alias Analysis"), cl::init(true))
static Expected< ScanOptions > parseAMDGPUAtomicOptimizerStrategy(StringRef Params)
static ScheduleDAGInstrs * createMinRegScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableHipStdPar("amdgpu-enable-hipstdpar", cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableInsertDelayAlu("amdgpu-enable-delay-alu", cl::desc("Enable s_delay_alu insertion"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLoadStoreVectorizer("amdgpu-load-store-vectorizer", cl::desc("Enable load store vectorizer"), cl::init(true), cl::Hidden)
static bool mustPreserveGV(const GlobalValue &GV)
Predicate for Internalize pass.
static cl::opt< bool > EnableLoopPrefetch("amdgpu-loop-prefetch", cl::desc("Enable loop data prefetch on AMDGPU"), cl::Hidden, cl::init(false))
static cl::opt< bool > NewRegBankSelect("new-reg-bank-select", cl::desc("Run amdgpu-regbankselect and amdgpu-regbanklegalize instead of " "regbankselect"), cl::init(false), cl::Hidden)
static cl::opt< bool > RemoveIncompatibleFunctions("amdgpu-enable-remove-incompatible-functions", cl::Hidden, cl::desc("Enable removal of functions when they" "use features not supported by the target GPU"), cl::init(true))
static cl::opt< bool > EnableScalarIRPasses("amdgpu-scalar-ir-passes", cl::desc("Enable scalar IR passes"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableRegReassign("amdgpu-reassign-regs", cl::desc("Enable register reassign optimizations on gfx10+"), cl::init(true), cl::Hidden)
static cl::opt< bool > OptVGPRLiveRange("amdgpu-opt-vgpr-liverange", cl::desc("Enable VGPR liverange optimizations for if-else structure"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createSIMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnablePreRAOptimizations("amdgpu-enable-pre-ra-optimizations", cl::desc("Enable Pre-RA optimizations pass"), cl::init(true), cl::Hidden)
static cl::opt< ScanOptions > AMDGPUAtomicOptimizerStrategy("amdgpu-atomic-optimizer-strategy", cl::desc("Select DPP or Iterative strategy for scan"), cl::init(ScanOptions::Iterative), cl::values(clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"), clEnumValN(ScanOptions::Iterative, "Iterative", "Use Iterative approach for scan"), clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")))
static cl::opt< bool > EnableVOPD("amdgpu-enable-vopd", cl::desc("Enable VOPD, dual issue of VALU in wave32"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(false))
static ScheduleDAGInstrs * createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLowerExecSync("amdgpu-enable-lower-exec-sync", cl::desc("Enable lowering of execution synchronization."), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNILPSchedRegistry("gcn-iterative-ilp", "Run GCN iterative scheduler for ILP scheduling (experimental)", createIterativeILPMachineScheduler)
static cl::opt< bool > ScalarizeGlobal("amdgpu-scalarize-global-loads", cl::desc("Enable global load scalarization"), cl::init(true), cl::Hidden)
static const char RegAllocOptNotSupportedMessage[]
static MachineSchedRegistry GCNMaxOccupancySchedRegistry("gcn-max-occupancy", "Run GCN scheduler to maximize occupancy", createGCNMaxOccupancyMachineScheduler)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file declares the AMDGPU-specific subclass of TargetLoweringObjectFile.
This file a TargetTransformInfoImplBase conforming object specific to the AMDGPU target machine.
Provides passes to inlining "always_inline" functions.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This header provides classes for managing passes over SCCs of the call graph.
Provides analysis for continuously CSEing during GISel passes.
Interfaces for producing common pass manager configurations.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_READNONE
Definition Compiler.h:315
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
DXIL Legalizer
This file provides the interface for a simple, fast CSE pass.
This file defines the class GCNIterativeScheduler, which uses an iterative approach to find a best sc...
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
#define _
AcceleratorCodeSelection - Identify all functions reachable from a kernel, removing those that are un...
This file declares the IRTranslator pass.
This header defines various interfaces for pass management in LLVM.
#define RegName(no)
This file provides the interface for LLVM's Loop Data Prefetching Pass.
This header provides classes for managing a pipeline of passes over loops in LLVM IR.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
CGSCCAnalysisManager CGAM
LoopAnalysisManager LAM
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
PassInstrumentationCallbacks PIC
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
static bool isLTOPreLink(ThinOrFullLTOPhase Phase)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file describes the interface of the MachineFunctionPass responsible for assigning the generic vi...
const GCNTargetMachine & getTM(const GCNSubtarget *STI)
SI Machine Scheduler interface.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static FunctionPass * useDefaultRegisterAllocator()
-regalloc=... command line option.
static cl::opt< cl::boolOrDefault > EnableGlobalISelOption("global-isel", cl::Hidden, cl::desc("Enable the \"global\" instruction selector"))
Target-Independent Code Generator Pass Configuration Options pass.
LLVM IR instance of the generic uniformity analysis.
static std::unique_ptr< TargetLoweringObjectFile > createTLOF()
A manager for alias analyses.
void registerFunctionAnalysis()
Register a specific AA result.
void addAAResult(AAResultT &AAResult)
Register a specific AA result.
Legacy wrapper pass to provide the AMDGPUAAResult object.
Analysis pass providing a never-invalidated alias analysis result.
Lower llvm.global_ctors and llvm.global_dtors to special kernels.
AMDGPUTargetMachine & getAMDGPUTargetMachine() const
std::unique_ptr< CSEConfigBase > getCSEConfig() const override
Returns the CSEConfig object to use for the current optimization level.
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
bool addPreISel() override
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
bool addInstSelector() override
addInstSelector - This method should install an instruction selector pass, which converts from LLVM c...
bool addGCPasses() override
addGCPasses - Add late codegen passes that analyze code for garbage collection.
AMDGPUPassConfig(TargetMachine &TM, PassManagerBase &PM)
void addIRPasses() override
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
void addCodeGenPrepare() override
Add pass to prepare the LLVM IR for code generation.
Splits the module M into N linkable partitions.
std::unique_ptr< TargetLoweringObjectFile > TLOF
static int64_t getNullPointerValue(unsigned AddrSpace)
Get the integer value of a null pointer in the given address space.
unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const override
getAddressSpaceForPseudoSourceKind - Given the kind of memory (e.g.
const TargetSubtargetInfo * getSubtargetImpl() const
void registerDefaultAliasAnalyses(AAManager &) override
Allow the target to register alias analyses with the AAManager for use with the new pass manager.
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
If the specified predicate checks whether a generic pointer falls within a specified address space,...
StringRef getFeatureString(const Function &F) const
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL)
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
void registerPassBuilderCallbacks(PassBuilder &PB) override
Allow the target to modify the pass pipeline.
StringRef getGPUName(const Function &F) const
unsigned getAssumedAddrSpace(const Value *V) const override
If the specified generic pointer could be assumed as a pointer to a specific address space,...
bool splitModule(Module &M, unsigned NumParts, function_ref< void(std::unique_ptr< Module > MPart)> ModuleCallback) override
Entry point for module splitting.
Inlines functions marked as "always_inline".
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:103
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:259
This class provides access to building LLVM's passes.
CodeGenTargetMachineImpl(const Target &T, StringRef DataLayoutString, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOptLevel OL)
LLVM_ABI void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
Lightweight error class with error context and mandatory checking.
Definition Error.h:159
static ErrorSuccess success()
Create a success value.
Definition Error.h:336
Tagged union holding either a T or a Error.
Definition Error.h:485
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
LowerIntrinsics - This pass rewrites calls to the llvm.gcread or llvm.gcwrite intrinsics,...
Definition GCMetadata.h:229
const SIRegisterInfo * getRegisterInfo() const override
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
ScheduleDAGInstrs * createPostMachineScheduler(MachineSchedContext *C) const override
Similar to createMachineScheduler but used when postRA machine scheduling is enabled.
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
void registerMachineRegisterInfoCallback(MachineFunction &MF) const override
bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override
Parse out the target's MachineFunctionInfo from the YAML reprsentation.
yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override
Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.
Error buildCodeGenPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC) override
yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override
Allocate and return a default initialized instance of the YAML representation for the MachineFunction...
TargetPassConfig * createPassConfig(PassManagerBase &PM) override
Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...
GCNTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
The core GVN pass object.
Definition GVN.h:128
Pass to remove unused function declarations.
Definition GlobalDCE.h:38
This pass is responsible for selecting generic machine instructions to target-specific instructions.
A pass that internalizes all functions and variables other than those that must be preserved accordin...
Definition Internalize.h:37
Converts loops into loop-closed SSA form.
Definition LCSSA.h:38
Performs Loop Invariant Code Motion Pass.
Definition LICM.h:66
This pass implements the localization mechanism described at the top of this file.
Definition Localizer.h:43
An optimization pass inserting data prefetches in loops.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void addDelegate(Delegate *delegate)
MachineSchedRegistry provides a selection of available machine instruction schedulers.
This interface provides simple read-only access to a block of memory, and provides simple methods for...
virtual StringRef getBufferIdentifier() const
Return an identifier for this buffer, typically the filename it was read from.
const char * getBufferStart() const
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
static LLVM_ABI const OptimizationLevel O0
Disable as many optimizations as possible.
static LLVM_ABI const OptimizationLevel O1
Optimize quickly without destroying debuggability.
This class provides access to building LLVM's passes.
This class manages callbacks registration, as well as provides a way for PassInstrumentation to pass ...
LLVM_ATTRIBUTE_MINSIZE std::enable_if_t<!std::is_same_v< PassT, PassManager > > addPass(PassT &&Pass)
PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs)
Run all of the passes in this manager over the given unit of IR.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
This pass implements the reg bank selector pass used in the GlobalISel pipeline.
RegisterPassParser class - Handle the addition of new machine passes.
RegisterRegAllocBase class - Track the registration of register allocators.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange)
void setFlag(Register Reg, uint8_t Flag)
bool checkFlag(Register Reg, uint8_t Flag) const
Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...
Definition SourceMgr.h:297
Represents a location in source code.
Definition SMLoc.h:22
static SMLoc getFromPointer(const char *Ptr)
Definition SMLoc.h:35
Represents a range in source code.
Definition SMLoc.h:47
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
const TargetInstrInfo * TII
Target instruction information.
const TargetRegisterInfo * TRI
Target processor register info.
Move instructions into successor blocks when possible.
Definition Sink.h:24
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition SmallString.h:68
unsigned getMainFileID() const
Definition SourceMgr.h:148
const MemoryBuffer * getMemoryBuffer(unsigned i) const
Definition SourceMgr.h:141
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:712
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
bool consume_front(char Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition StringRef.h:637
A switch()-like statement whose cases are string literals.
StringSwitch & Cases(std::initializer_list< StringLiteral > CaseStrings, T Value)
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Triple TargetTriple
Triple string, CPU name, and target feature strings the TargetMachine instance is created with.
const Triple & getTargetTriple() const
const MCSubtargetInfo * getMCSubtargetInfo() const
StringRef getTargetFeatureString() const
StringRef getTargetCPU() const
std::unique_ptr< const MCSubtargetInfo > STI
TargetOptions Options
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
std::unique_ptr< const MCRegisterInfo > MRI
CodeGenOptLevel OptLevel
Target-Independent Code Generator Pass Configuration Options.
virtual void addCodeGenPrepare()
Add pass to prepare the LLVM IR for code generation.
virtual bool addILPOpts()
Add passes that optimize instruction level parallelism for out-of-order targets.
virtual void addPostRegAlloc()
This method may be implemented by targets that want to run passes after register allocation pass pipe...
CodeGenOptLevel getOptLevel() const
virtual void addOptimizedRegAlloc()
addOptimizedRegAlloc - Add passes related to register allocation.
virtual void addIRPasses()
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
virtual void addFastRegAlloc()
addFastRegAlloc - Add the minimum set of target-independent passes that are required for fast registe...
virtual void addMachineSSAOptimization()
addMachineSSAOptimization - Add standard passes that optimize machine instructions in SSA form.
void disablePass(AnalysisID PassID)
Allow the target to disable a specific standard pass by default.
AnalysisID addPass(AnalysisID PassID)
Utilities for targets to add passes to the pass manager.
TargetPassConfig(TargetMachine &TM, PassManagerBase &PM)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
LLVM Value Representation.
Definition Value.h:75
bool use_empty() const
Definition Value.h:346
int getNumOccurrences() const
An efficient, type-erasing, non-owning reference to a callable.
PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...
An abstract base class for streams implementations that also support a pwrite operation.
Interfaces for registering analysis passes, producing common pass manager configurations,...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
bool isFlatGlobalAddrSpace(unsigned AS)
LLVM_READNONE constexpr bool isModuleEntryFunctionCC(CallingConv::ID CC)
LLVM_READNONE constexpr bool isEntryFunctionCC(CallingConv::ID CC)
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
template class LLVM_TEMPLATE_ABI opt< bool >
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
ScheduleDAGMILive * createSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
LLVM_ABI FunctionPass * createFlattenCFGPass()
std::unique_ptr< ScheduleDAGMutation > createAMDGPUBarrierLatencyDAGMutation(MachineFunction *MF)
LLVM_ABI FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
LLVM_ABI char & EarlyMachineLICMID
This pass performs loop invariant code motion on machine instructions.
ImmutablePass * createAMDGPUAAWrapperPass()
LLVM_ABI char & PostRAHazardRecognizerID
PostRAHazardRecognizer - This pass runs the post-ra hazard recognizer.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
FunctionPass * createAMDGPUSetWavePriorityPass()
LLVM_ABI Pass * createLCSSAPass()
Definition LCSSA.cpp:525
void initializeAMDGPUMarkLastScratchLoadLegacyPass(PassRegistry &)
void initializeAMDGPUInsertDelayAluLegacyPass(PassRegistry &)
void initializeSIOptimizeExecMaskingPreRALegacyPass(PassRegistry &)
char & GCNPreRAOptimizationsID
LLVM_ABI char & GCLoweringID
GCLowering Pass - Used by gc.root to perform its default lowering operations.
void initializeSIInsertHardClausesLegacyPass(PassRegistry &)
ModulePass * createExpandVariadicsPass(ExpandVariadicsMode)
FunctionPass * createSIAnnotateControlFlowLegacyPass()
Create the annotation pass.
FunctionPass * createSIModeRegisterPass()
void initializeGCNPreRAOptimizationsLegacyPass(PassRegistry &)
void initializeSILowerWWMCopiesLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
void initializeAMDGPUAAWrapperPassPass(PassRegistry &)
void initializeSIShrinkInstructionsLegacyPass(PassRegistry &)
ModulePass * createAMDGPULowerBufferFatPointersPass()
void initializeR600ClauseMergePassPass(PassRegistry &)
ModulePass * createAMDGPUCtorDtorLoweringLegacyPass()
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
ModuleToFunctionPassAdaptor createModuleToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
ModulePass * createAMDGPUSwLowerLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeGCNRewritePartialRegUsesLegacyPass(llvm::PassRegistry &)
void initializeAMDGPURewriteUndefForPHILegacyPass(PassRegistry &)
char & GCNRewritePartialRegUsesID
void initializeAMDGPUSwLowerLDSLegacyPass(PassRegistry &)
LLVM_ABI std::error_code inconvertibleErrorCode()
The value returned by this function can be returned from convertToErrorCode for Error values where no...
Definition Error.cpp:94
void initializeAMDGPULowerVGPREncodingLegacyPass(PassRegistry &)
char & AMDGPUWaitSGPRHazardsLegacyID
void initializeSILowerSGPRSpillsLegacyPass(PassRegistry &)
LLVM_ABI Pass * createLoadStoreVectorizerPass()
Create a legacy pass manager instance of the LoadStoreVectorizer pass.
std::unique_ptr< ScheduleDAGMutation > createIGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase)
Phase specifes whether or not this is a reentry into the IGroupLPDAGMutation.
void initializeAMDGPUDAGToDAGISelLegacyPass(PassRegistry &)
FunctionPass * createAMDGPURegBankCombiner(bool IsOptNone)
LLVM_ABI FunctionPass * createNaryReassociatePass()
char & AMDGPUReserveWWMRegsLegacyID
void initializeAMDGPUWaitSGPRHazardsLegacyPass(PassRegistry &)
LLVM_ABI char & PatchableFunctionID
This pass implements the "patchable-function" attribute.
char & SIOptimizeExecMaskingLegacyID
LLVM_ABI char & PostRASchedulerID
PostRAScheduler - This pass performs post register allocation scheduling.
void initializeR600ExpandSpecialInstrsPassPass(PassRegistry &)
void initializeR600PacketizerPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createVOPDPairingMutation()
ModulePass * createAMDGPUExportKernelRuntimeHandlesLegacyPass()
ModulePass * createAMDGPUAlwaysInlinePass(bool GlobalOpt=true)
void initializeAMDGPUAsmPrinterPass(PassRegistry &)
void initializeSIFoldOperandsLegacyPass(PassRegistry &)
char & SILoadStoreOptimizerLegacyID
void initializeAMDGPUGlobalISelDivergenceLoweringPass(PassRegistry &)
PassManager< LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &, CGSCCUpdateResult & > CGSCCPassManager
The CGSCC pass manager.
LLVM_ABI std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOptLevel Level)
Definition CSEInfo.cpp:85
Target & getTheR600Target()
The target for R600 GPUs.
LLVM_ABI char & MachineSchedulerID
MachineScheduler - This pass schedules machine instructions.
LLVM_ABI Pass * createStructurizeCFGPass(bool SkipUniformRegions=false)
When SkipUniformRegions is true the structizer will not structurize regions that only contain uniform...
LLVM_ABI char & PostMachineSchedulerID
PostMachineScheduler - This pass schedules machine instructions postRA.
LLVM_ABI Pass * createLICMPass()
Definition LICM.cpp:386
char & SIFormMemoryClausesID
void initializeSILoadStoreOptimizerLegacyPass(PassRegistry &)
void initializeAMDGPULowerModuleLDSLegacyPass(PassRegistry &)
AnalysisManager< LazyCallGraph::SCC, LazyCallGraph & > CGSCCAnalysisManager
The CGSCC analysis manager.
void initializeAMDGPUCtorDtorLoweringLegacyPass(PassRegistry &)
LLVM_ABI char & EarlyIfConverterLegacyID
EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.
AnalysisManager< Loop, LoopStandardAnalysisResults & > LoopAnalysisManager
The loop analysis manager.
FunctionPass * createAMDGPUUniformIntrinsicCombineLegacyPass()
void initializeAMDGPURegBankCombinerPass(PassRegistry &)
ThinOrFullLTOPhase
This enumerates the LLVM full LTO or ThinLTO optimization phases.
Definition Pass.h:77
@ FullLTOPreLink
Full LTO prelink phase.
Definition Pass.h:85
@ FullLTOPostLink
Full LTO postlink (backend compile) phase.
Definition Pass.h:87
@ ThinLTOPreLink
ThinLTO prelink (summary) phase.
Definition Pass.h:81
char & AMDGPUUnifyDivergentExitNodesID
void initializeAMDGPUPrepareAGPRAllocLegacyPass(PassRegistry &)
FunctionPass * createAMDGPUAtomicOptimizerPass(ScanOptions ScanStrategy)
FunctionPass * createAMDGPUPreloadKernArgPrologLegacyPass()
char & SIOptimizeVGPRLiveRangeLegacyID
LLVM_ABI char & ShadowStackGCLoweringID
ShadowStackGCLowering - Implements the custom lowering mechanism used by the shadow stack GC.
char & GCNNSAReassignID
void initializeAMDGPURewriteOutArgumentsPass(PassRegistry &)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
void initializeAMDGPUExternalAAWrapperPass(PassRegistry &)
auto formatv(bool Validate, const char *Fmt, Ts &&...Vals)
void initializeAMDGPULowerKernelArgumentsPass(PassRegistry &)
void initializeSIModeRegisterLegacyPass(PassRegistry &)
CodeModel::Model getEffectiveCodeModel(std::optional< CodeModel::Model > CM, CodeModel::Model Default)
Helper method for getting the code model, returning Default if CM does not have a value.
void initializeAMDGPUPreloadKernelArgumentsLegacyPass(PassRegistry &)
char & SILateBranchLoweringPassID
FunctionToLoopPassAdaptor createFunctionToLoopPassAdaptor(LoopPassT &&Pass, bool UseMemorySSA=false)
A function to deduce a loop pass type and wrap it in the templated adaptor.
LLVM_ABI char & BranchRelaxationPassID
BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...
LLVM_ABI FunctionPass * createSinkingPass()
Definition Sink.cpp:275
CGSCCToFunctionPassAdaptor createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false, bool NoRerun=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
void initializeSIMemoryLegalizerLegacyPass(PassRegistry &)
ModulePass * createAMDGPULowerIntrinsicsLegacyPass()
void initializeR600MachineCFGStructurizerPass(PassRegistry &)
CodeGenFileType
These enums are meant to be passed into addPassesToEmitFile to indicate what type of file to emit,...
Definition CodeGen.h:111
char & GCNDPPCombineLegacyID
PassManager< Module > ModulePassManager
Convenience typedef for a pass manager over modules.
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
LLVM_ABI FunctionPass * createLoopDataPrefetchPass()
FunctionPass * createAMDGPULowerKernelArgumentsPass()
char & AMDGPUInsertDelayAluID
std::unique_ptr< ScheduleDAGMutation > createAMDGPUMacroFusionDAGMutation()
Note that you have to add: DAG.addMutation(createAMDGPUMacroFusionDAGMutation()); to AMDGPUTargetMach...
LLVM_ABI char & StackMapLivenessID
StackMapLiveness - This pass analyses the register live-out set of stackmap/patchpoint intrinsics and...
void initializeGCNPreRALongBranchRegLegacyPass(PassRegistry &)
char & SILowerWWMCopiesLegacyID
LLVM_ABI FunctionPass * createUnifyLoopExitsPass()
char & SIOptimizeExecMaskingPreRAID
LLVM_ABI FunctionPass * createFixIrreduciblePass()
void initializeR600EmitClauseMarkersPass(PassRegistry &)
LLVM_ABI char & FuncletLayoutID
This pass lays out funclets contiguously.
LLVM_ABI char & DetectDeadLanesID
This pass adds dead/undef flags after analyzing subregister lanes.
void initializeAMDGPULowerExecSyncLegacyPass(PassRegistry &)
void initializeAMDGPUPostLegalizerCombinerPass(PassRegistry &)
void initializeAMDGPUExportKernelRuntimeHandlesLegacyPass(PassRegistry &)
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
void initializeSIInsertWaitcntsLegacyPass(PassRegistry &)
ModulePass * createAMDGPUPreloadKernelArgumentsLegacyPass(const TargetMachine *)
ModulePass * createAMDGPUPrintfRuntimeBinding()
LLVM_ABI char & StackSlotColoringID
StackSlotColoring - This pass performs stack slot coloring.
LLVM_ABI Pass * createAlwaysInlinerLegacyPass(bool InsertLifetime=true)
Create a legacy pass manager instance of a pass to inline and remove functions marked as "always_inli...
void initializeR600ControlFlowFinalizerPass(PassRegistry &)
void initializeAMDGPUImageIntrinsicOptimizerPass(PassRegistry &)
void initializeSILateBranchLoweringLegacyPass(PassRegistry &)
void initializeSILowerControlFlowLegacyPass(PassRegistry &)
void initializeSIFormMemoryClausesLegacyPass(PassRegistry &)
char & SIPreAllocateWWMRegsLegacyID
Error make_error(ArgTs &&... Args)
Make a Error instance representing failure using the given error info type.
Definition Error.h:340
ModulePass * createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPUPreLegalizerCombinerPass(PassRegistry &)
FunctionPass * createAMDGPUPromoteAlloca()
void initializeAMDGPUArgumentUsageInfoWrapperLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
void initializeAMDGPUReserveWWMRegsLegacyPass(PassRegistry &)
char & SIPreEmitPeepholeID
char & SIPostRABundlerLegacyID
ModulePass * createAMDGPURemoveIncompatibleFunctionsPass(const TargetMachine *)
void initializeGCNRegPressurePrinterPass(PassRegistry &)
void initializeSILowerI1CopiesLegacyPass(PassRegistry &)
char & SILowerSGPRSpillsLegacyID
LLVM_ABI FunctionPass * createBasicRegisterAllocator()
BasicRegisterAllocation Pass - This pass implements a degenerate global register allocator using the ...
LLVM_ABI void initializeGlobalISel(PassRegistry &)
Initialize all passes linked into the GlobalISel library.
char & SILowerControlFlowLegacyID
ModulePass * createR600OpenCLImageTypeLoweringPass()
FunctionPass * createAMDGPUCodeGenPreparePass()
void initializeSIAnnotateControlFlowLegacyPass(PassRegistry &)
FunctionPass * createAMDGPUISelDag(TargetMachine &TM, CodeGenOptLevel OptLevel)
This pass converts a legalized DAG into a AMDGPU-specific.
void initializeGCNCreateVOPDLegacyPass(PassRegistry &)
void initializeAMDGPUUniformIntrinsicCombineLegacyPass(PassRegistry &)
void initializeSIPreAllocateWWMRegsLegacyPass(PassRegistry &)
void initializeSIFixVGPRCopiesLegacyPass(PassRegistry &)
Target & getTheGCNTarget()
The target for GCN GPUs.
void initializeSIFixSGPRCopiesLegacyPass(PassRegistry &)
void initializeAMDGPUAtomicOptimizerPass(PassRegistry &)
void initializeAMDGPULowerIntrinsicsLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createGVNPass()
Create a legacy GVN pass.
Definition GVN.cpp:3405
void initializeAMDGPURewriteAGPRCopyMFMALegacyPass(PassRegistry &)
void initializeSIPostRABundlerLegacyPass(PassRegistry &)
FunctionPass * createAMDGPURegBankSelectPass()
FunctionPass * createAMDGPURegBankLegalizePass()
LLVM_ABI char & MachineCSELegacyID
MachineCSE - This pass performs global CSE on machine instructions.
char & SIWholeQuadModeID
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
PassManager< Function > FunctionPassManager
Convenience typedef for a pass manager over functions.
LLVM_ABI char & LiveVariablesID
LiveVariables pass - This pass computes the set of blocks in which each variable is life and sets mac...
void initializeAMDGPUCodeGenPreparePass(PassRegistry &)
FunctionPass * createAMDGPURewriteUndefForPHILegacyPass()
void initializeSIOptimizeExecMaskingLegacyPass(PassRegistry &)
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
Definition Threading.h:86
FunctionPass * createSILowerI1CopiesLegacyPass()
FunctionPass * createAMDGPUPostLegalizeCombiner(bool IsOptNone)
void initializeAMDGPULowerKernelAttributesPass(PassRegistry &)
char & SIInsertHardClausesID
char & SIFixSGPRCopiesLegacyID
void initializeGCNDPPCombineLegacyPass(PassRegistry &)
char & GCNCreateVOPDID
char & SIPeepholeSDWALegacyID
LLVM_ABI char & VirtRegRewriterID
VirtRegRewriter pass.
char & SIFixVGPRCopiesID
char & SIFoldOperandsLegacyID
void initializeGCNNSAReassignLegacyPass(PassRegistry &)
char & AMDGPUPrepareAGPRAllocLegacyID
LLVM_ABI FunctionPass * createLowerSwitchPass()
void initializeAMDGPUPreloadKernArgPrologLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createVirtRegRewriter(bool ClearVirtRegs=true)
void initializeR600VectorRegMergerPass(PassRegistry &)
char & AMDGPURewriteAGPRCopyMFMALegacyID
ModulePass * createAMDGPULowerExecSyncLegacyPass()
char & AMDGPULowerVGPREncodingLegacyID
FunctionPass * createAMDGPUGlobalISelDivergenceLoweringPass()
FunctionPass * createSIMemoryLegalizerPass()
void initializeAMDGPULateCodeGenPrepareLegacyPass(PassRegistry &)
void initializeSIOptimizeVGPRLiveRangeLegacyPass(PassRegistry &)
void initializeSIPeepholeSDWALegacyPass(PassRegistry &)
void initializeAMDGPURegBankLegalizePass(PassRegistry &)
LLVM_ABI char & TwoAddressInstructionPassID
TwoAddressInstruction - This pass reduces two-address instructions to use two operands.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
FunctionPass * createAMDGPUPreLegalizeCombiner(bool IsOptNone)
void initializeAMDGPURegBankSelectPass(PassRegistry &)
FunctionPass * createAMDGPULateCodeGenPrepareLegacyPass()
LLVM_ABI FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
MCRegisterInfo * createGCNMCRegisterInfo(AMDGPUDwarfFlavour DwarfFlavour)
LLVM_ABI FunctionPass * createStraightLineStrengthReducePass()
BumpPtrAllocatorImpl<> BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition Allocator.h:383
FunctionPass * createAMDGPUImageIntrinsicOptimizerPass(const TargetMachine *)
void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry &)
void initializeAMDGPULowerBufferFatPointersPass(PassRegistry &)
FunctionPass * createSIInsertWaitcntsPass()
FunctionPass * createAMDGPUAnnotateUniformValuesLegacy()
LLVM_ABI FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
void initializeSIWholeQuadModeLegacyPass(PassRegistry &)
LLVM_ABI char & PHIEliminationID
PHIElimination - This pass eliminates machine instruction PHI nodes by inserting copy instructions.
LLVM_ABI llvm::cl::opt< bool > NoKernelInfoEndLTO
bool parseNamedRegisterReference(PerFunctionMIParsingState &PFS, Register &Reg, StringRef Src, SMDiagnostic &Error)
void initializeAMDGPUResourceUsageAnalysisWrapperPassPass(PassRegistry &)
FunctionPass * createSIShrinkInstructionsLegacyPass()
char & AMDGPUMarkLastScratchLoadID
LLVM_ABI char & RenameIndependentSubregsID
This pass detects subregister lanes in a virtual register that are used independently of other lanes ...
void initializeAMDGPUAnnotateUniformValuesLegacyPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createAMDGPUExportClusteringDAGMutation()
void initializeAMDGPUPrintfRuntimeBindingPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaPass(PassRegistry &)
void initializeAMDGPURemoveIncompatibleFunctionsLegacyPass(PassRegistry &)
void initializeAMDGPUAlwaysInlinePass(PassRegistry &)
LLVM_ABI char & DeadMachineInstructionElimID
DeadMachineInstructionElim - This pass removes dead machine instructions.
void initializeSIPreEmitPeepholeLegacyPass(PassRegistry &)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
char & AMDGPUPerfHintAnalysisLegacyID
LLVM_ABI ImmutablePass * createExternalAAWrapperPass(std::function< void(Pass &, Function &, AAResults &)> Callback)
A wrapper pass around a callback which can be used to populate the AAResults in the AAResultsWrapperP...
char & GCNPreRALongBranchRegID
LLVM_ABI CGPassBuilderOption getCGPassBuilderOption()
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
void initializeAMDGPUPromoteKernelArgumentsPass(PassRegistry &)
#define N
static ArgDescriptor createStack(unsigned Offset, unsigned Mask=~0u)
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask)
static ArgDescriptor createRegister(Register Reg, unsigned Mask=~0u)
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
A simple and fast domtree-based CSE pass.
Definition EarlyCSE.h:31
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
static FuncInfoTy * create(BumpPtrAllocator &Allocator, const Function &F, const SubtargetTy *STI)
Factory function: default behavior is to call new using the supplied allocator.
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
StringMap< VRegInfo * > VRegInfosNamed
Definition MIParser.h:177
DenseMap< Register, VRegInfo * > VRegInfos
Definition MIParser.h:176
RegisterTargetMachine - Helper template for registering a target machine implementation,...
A utility pass template to force an analysis result to be available.
bool DX10Clamp
Used by the vector ALU to force DX10-style treatment of NaNs: when set, clamp NaN to zero; otherwise,...
DenormalMode FP64FP16Denormals
If this is set, neither input or output denormals are flushed for both f64 and f16/v2f16 instructions...
bool IEEE
Floating point opcodes that support exception flag gathering quiet and propagate signaling NaN inputs...
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
The llvm::once_flag structure.
Definition Threading.h:67
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
SmallVector< StringValue > WWMReservedRegs
std::optional< SIArgumentInfo > ArgInfo
SmallVector< StringValue, 2 > SpillPhysVGPRS
A wrapper around std::string which contains a source range that's being set during parsing.