LLVM 23.0.0git
AArch64TargetMachine.cpp
Go to the documentation of this file.
1//===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9//
10//===----------------------------------------------------------------------===//
11
13#include "AArch64.h"
16#include "AArch64MacroFusion.h"
17#include "AArch64Subtarget.h"
35#include "llvm/CodeGen/Passes.h"
38#include "llvm/IR/Attributes.h"
39#include "llvm/IR/Function.h"
41#include "llvm/MC/MCAsmInfo.h"
44#include "llvm/Pass.h"
56#include <memory>
57
58using namespace llvm;
59
60static cl::opt<bool> EnableCCMP("aarch64-enable-ccmp",
61 cl::desc("Enable the CCMP formation pass"),
62 cl::init(true), cl::Hidden);
63
64static cl::opt<bool>
65 EnableCondBrTuning("aarch64-enable-cond-br-tune",
66 cl::desc("Enable the conditional branch tuning pass"),
67 cl::init(true), cl::Hidden);
68
70 "aarch64-enable-copy-propagation",
71 cl::desc("Enable the copy propagation with AArch64 copy instr"),
72 cl::init(true), cl::Hidden);
73
74static cl::opt<bool> EnableMCR("aarch64-enable-mcr",
75 cl::desc("Enable the machine combiner pass"),
76 cl::init(true), cl::Hidden);
77
78static cl::opt<bool> EnableStPairSuppress("aarch64-enable-stp-suppress",
79 cl::desc("Suppress STP for AArch64"),
80 cl::init(true), cl::Hidden);
81
83 "aarch64-enable-simd-scalar",
84 cl::desc("Enable use of AdvSIMD scalar integer instructions"),
85 cl::init(false), cl::Hidden);
86
87static cl::opt<bool>
88 EnablePromoteConstant("aarch64-enable-promote-const",
89 cl::desc("Enable the promote constant pass"),
90 cl::init(true), cl::Hidden);
91
93 "aarch64-enable-collect-loh",
94 cl::desc("Enable the pass that emits the linker optimization hints (LOH)"),
95 cl::init(true), cl::Hidden);
96
97static cl::opt<bool>
98 EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden,
99 cl::desc("Enable the pass that removes dead"
100 " definitions and replaces stores to"
101 " them with stores to the zero"
102 " register"),
103 cl::init(true));
104
106 "aarch64-enable-copyelim",
107 cl::desc("Enable the redundant copy elimination pass"), cl::init(true),
108 cl::Hidden);
109
110static cl::opt<bool> EnableLoadStoreOpt("aarch64-enable-ldst-opt",
111 cl::desc("Enable the load/store pair"
112 " optimization pass"),
113 cl::init(true), cl::Hidden);
114
116 "aarch64-enable-atomic-cfg-tidy", cl::Hidden,
117 cl::desc("Run SimplifyCFG after expanding atomic operations"
118 " to make use of cmpxchg flow-based information"),
119 cl::init(true));
120
121static cl::opt<bool>
122EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden,
123 cl::desc("Run early if-conversion"),
124 cl::init(true));
125
126static cl::opt<bool>
127 EnableCondOpt("aarch64-enable-condopt",
128 cl::desc("Enable the condition optimizer pass"),
129 cl::init(true), cl::Hidden);
130
131static cl::opt<bool>
132 EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden,
133 cl::desc("Enable optimizations on complex GEPs"),
134 cl::init(false));
135
136static cl::opt<bool>
137 EnableSelectOpt("aarch64-select-opt", cl::Hidden,
138 cl::desc("Enable select to branch optimizations"),
139 cl::init(true));
140
141static cl::opt<bool>
142 BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true),
143 cl::desc("Relax out of range conditional branches"));
144
146 "aarch64-enable-compress-jump-tables", cl::Hidden, cl::init(true),
147 cl::desc("Use smallest entry possible for jump tables"));
148
149// FIXME: Unify control over GlobalMerge.
151 EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden,
152 cl::desc("Enable the global merge pass"));
153
154static cl::opt<bool>
155 EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden,
156 cl::desc("Enable the loop data prefetch pass"),
157 cl::init(true));
158
160 "aarch64-enable-global-isel-at-O", cl::Hidden,
161 cl::desc("Enable GlobalISel at or below an opt level (-1 to disable)"),
162 cl::init(0));
163
164static cl::opt<bool>
165 EnableSVEIntrinsicOpts("aarch64-enable-sve-intrinsic-opts", cl::Hidden,
166 cl::desc("Enable SVE intrinsic opts"),
167 cl::init(true));
168
169static cl::opt<bool>
170 EnableSMEPeepholeOpt("enable-aarch64-sme-peephole-opt", cl::init(true),
172 cl::desc("Perform SME peephole optimization"));
173
174static cl::opt<bool> EnableFalkorHWPFFix("aarch64-enable-falkor-hwpf-fix",
175 cl::init(true), cl::Hidden);
176
177static cl::opt<bool>
178 EnableBranchTargets("aarch64-enable-branch-targets", cl::Hidden,
179 cl::desc("Enable the AArch64 branch target pass"),
180 cl::init(true));
181
183 "aarch64-sve-vector-bits-max",
184 cl::desc("Assume SVE vector registers are at most this big, "
185 "with zero meaning no maximum size is assumed."),
186 cl::init(0), cl::Hidden);
187
189 "aarch64-sve-vector-bits-min",
190 cl::desc("Assume SVE vector registers are at least this big, "
191 "with zero meaning no minimum size is assumed."),
192 cl::init(0), cl::Hidden);
193
195 "force-streaming",
196 cl::desc("Force the use of streaming code for all functions"),
197 cl::init(false), cl::Hidden);
198
200 "force-streaming-compatible",
201 cl::desc("Force the use of streaming-compatible code for all functions"),
202 cl::init(false), cl::Hidden);
203
205
207 "aarch64-enable-gisel-ldst-prelegal",
208 cl::desc("Enable GlobalISel's pre-legalizer load/store optimization pass"),
209 cl::init(true), cl::Hidden);
210
212 "aarch64-enable-gisel-ldst-postlegal",
213 cl::desc("Enable GlobalISel's post-legalizer load/store optimization pass"),
214 cl::init(false), cl::Hidden);
215
216static cl::opt<bool>
217 EnableSinkFold("aarch64-enable-sink-fold",
218 cl::desc("Enable sinking and folding of instruction copies"),
219 cl::init(true), cl::Hidden);
220
221static cl::opt<bool>
222 EnableMachinePipeliner("aarch64-enable-pipeliner",
223 cl::desc("Enable Machine Pipeliner for AArch64"),
224 cl::init(false), cl::Hidden);
225
227 "aarch64-srlt-mitigate-sr2r",
228 cl::desc("Enable SUBREG_TO_REG mitigation by adding 'implicit-def' for "
229 "super-regs when using Subreg Liveness Tracking"),
230 cl::init(true), cl::Hidden);
231
234 // Register the target.
240 auto &PR = *PassRegistry::getPassRegistry();
284}
285
287
288//===----------------------------------------------------------------------===//
289// AArch64 Lowering public interface.
290//===----------------------------------------------------------------------===//
291static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
292 if (TT.isOSBinFormatMachO())
293 return std::make_unique<AArch64_MachoTargetObjectFile>();
294 if (TT.isOSBinFormatCOFF())
295 return std::make_unique<AArch64_COFFTargetObjectFile>();
296
297 return std::make_unique<AArch64_ELFTargetObjectFile>();
298}
299
301 if (CPU.empty() && TT.isArm64e())
302 return "apple-a12";
303 return CPU;
304}
305
307 std::optional<Reloc::Model> RM) {
308 // AArch64 Darwin and Windows are always PIC.
309 if (TT.isOSDarwin() || TT.isOSWindows())
310 return Reloc::PIC_;
311 // On ELF platforms the default static relocation model has a smart enough
312 // linker to cope with referencing external symbols defined in a shared
313 // library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
314 if (!RM || *RM == Reloc::DynamicNoPIC)
315 return Reloc::Static;
316 return *RM;
317}
318
319static CodeModel::Model
321 std::optional<CodeModel::Model> CM, bool JIT) {
322 if (CM) {
323 if (*CM != CodeModel::Small && *CM != CodeModel::Tiny &&
324 *CM != CodeModel::Large) {
326 "Only small, tiny and large code models are allowed on AArch64");
327 } else if (*CM == CodeModel::Tiny && !TT.isOSBinFormatELF()) {
328 report_fatal_error("tiny code model is only supported on ELF");
329 }
330 return *CM;
331 }
332 // The default MCJIT memory managers make no guarantees about where they can
333 // find an executable page; JITed code needs to be able to refer to globals
334 // no matter how far away they are.
335 // We should set the CodeModel::Small for Windows ARM64 in JIT mode,
336 // since with large code model LLVM generating 4 MOV instructions, and
337 // Windows doesn't support relocating these long branch (4 MOVs).
338 if (JIT && !TT.isOSWindows())
339 return CodeModel::Large;
340 return CodeModel::Small;
341}
342
343/// Create an AArch64 architecture model.
344///
346 StringRef CPU, StringRef FS,
347 const TargetOptions &Options,
348 std::optional<Reloc::Model> RM,
349 std::optional<CodeModel::Model> CM,
350 CodeGenOptLevel OL, bool JIT,
351 bool LittleEndian)
352 : CodeGenTargetMachineImpl(T, TT.computeDataLayout(), TT,
353 computeDefaultCPU(TT, CPU), FS, Options,
355 getEffectiveAArch64CodeModel(TT, CM, JIT), OL),
356 TLOF(createTLOF(getTargetTriple())), isLittle(LittleEndian) {
357 initAsmInfo();
358
359 if (TT.isOSBinFormatMachO()) {
360 this->Options.TrapUnreachable = true;
361 this->Options.NoTrapAfterNoreturn = true;
362 }
363
364 if (getMCAsmInfo().usesWindowsCFI()) {
365 // Unwinding can get confused if the last instruction in an
366 // exception-handling region (function, funclet, try block, etc.)
367 // is a call.
368 //
369 // FIXME: We could elide the trap if the next instruction would be in
370 // the same region anyway.
371 this->Options.TrapUnreachable = true;
372 }
373
374 if (this->Options.TLSSize == 0) // default
375 this->Options.TLSSize = 24;
376 if ((getCodeModel() == CodeModel::Small ||
378 this->Options.TLSSize > 32)
379 // for the small (and kernel) code model, the maximum TLS size is 4GiB
380 this->Options.TLSSize = 32;
381 else if (getCodeModel() == CodeModel::Tiny && this->Options.TLSSize > 24)
382 // for the tiny code model, the maximum TLS size is 1MiB (< 16MiB)
383 this->Options.TLSSize = 24;
384
385 const bool TargetSupportsGISel =
386 TT.getArch() != Triple::aarch64_32 &&
387 TT.getEnvironment() != Triple::GNUILP32 &&
388 !(getCodeModel() == CodeModel::Large && TT.isOSBinFormatMachO());
389
390 const bool GlobalISelFlag =
392
393 // Enable GlobalISel at or below EnableGlobalISelAt0, unless this is
394 // MachO/CodeModel::Large, which GlobalISel does not support.
395 if (TargetSupportsGISel && EnableGlobalISelAtO != -1 &&
396 (static_cast<int>(getOptLevel()) <= EnableGlobalISelAtO ||
397 (!GlobalISelFlag && !Options.EnableGlobalISel))) {
398 setGlobalISel(true);
400 }
401
403
404 // AArch64 supports the MachineOutliner.
405 setMachineOutliner(true);
406
407 // AArch64 supports default outlining behaviour.
409
410 // AArch64 supports the debug entry values.
412
413 // AArch64 supports fixing up the DWARF unwind information.
414 if (!getMCAsmInfo().usesWindowsCFI())
415 setCFIFixup(true);
416}
417
421
423
424const AArch64Subtarget *
426 Attribute CPUAttr = F.getFnAttribute("target-cpu");
427 Attribute TuneAttr = F.getFnAttribute("tune-cpu");
428 Attribute FSAttr = F.getFnAttribute("target-features");
429
430 StringRef CPU = CPUAttr.isValid() ? CPUAttr.getValueAsString() : TargetCPU;
431 StringRef TuneCPU = TuneAttr.isValid() ? TuneAttr.getValueAsString() : CPU;
432 StringRef FS = FSAttr.isValid() ? FSAttr.getValueAsString() : TargetFS;
433 bool HasMinSize = F.hasMinSize();
434
435 bool IsStreaming = ForceStreaming ||
436 F.hasFnAttribute("aarch64_pstate_sm_enabled") ||
437 F.hasFnAttribute("aarch64_pstate_sm_body");
438 bool IsStreamingCompatible = ForceStreamingCompatible ||
439 F.hasFnAttribute("aarch64_pstate_sm_compatible");
440
441 unsigned MinSVEVectorSize = 0;
442 unsigned MaxSVEVectorSize = 0;
443 if (F.hasFnAttribute(Attribute::VScaleRange)) {
444 ConstantRange CR = getVScaleRange(&F, 64);
445 MinSVEVectorSize = CR.getUnsignedMin().getZExtValue() * 128;
446 MaxSVEVectorSize = CR.getUnsignedMax().getZExtValue() * 128;
447 } else {
448 MinSVEVectorSize = SVEVectorBitsMinOpt;
449 MaxSVEVectorSize = SVEVectorBitsMaxOpt;
450 }
451
452 assert(MinSVEVectorSize % 128 == 0 &&
453 "SVE requires vector length in multiples of 128!");
454 assert(MaxSVEVectorSize % 128 == 0 &&
455 "SVE requires vector length in multiples of 128!");
456 assert((MaxSVEVectorSize >= MinSVEVectorSize || MaxSVEVectorSize == 0) &&
457 "Minimum SVE vector size should not be larger than its maximum!");
458
459 // Sanitize user input in case of no asserts
460 if (MaxSVEVectorSize != 0) {
461 MinSVEVectorSize = std::min(MinSVEVectorSize, MaxSVEVectorSize);
462 MaxSVEVectorSize = std::max(MinSVEVectorSize, MaxSVEVectorSize);
463 }
464
466 // This lookup is hot during repeated TTI queries, so build the key directly
467 // instead of formatting through raw_svector_ostream.
468 Key += "SVEMin";
469 Key += utostr(MinSVEVectorSize);
470 Key += "SVEMax";
471 Key += utostr(MaxSVEVectorSize);
472 Key += "IsStreaming=";
473 Key += utostr(IsStreaming);
474 Key += "IsStreamingCompatible=";
475 Key += utostr(IsStreamingCompatible);
476 Key += CPU;
477 Key += TuneCPU;
478 Key += FS;
479 Key += "HasMinSize=";
480 Key += utostr(HasMinSize);
481
482 auto &I = SubtargetMap[Key];
483 if (!I) {
484 // This needs to be done before we create a new subtarget since any
485 // creation will depend on the TM and the code generation flags on the
486 // function that reside in TargetOptions.
488 I = std::make_unique<AArch64Subtarget>(
489 TargetTriple, CPU, TuneCPU, FS, *this, isLittle, MinSVEVectorSize,
490 MaxSVEVectorSize, IsStreaming, IsStreamingCompatible, HasMinSize,
492 }
493
494 if (IsStreaming && !I->hasSME())
495 reportFatalUsageError("streaming SVE functions require SME");
496
497 return I.get();
498}
499
502 const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>();
504 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
505 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
506 if (ST.hasFusion())
507 DAG->addMutation(createAArch64MacroFusionDAGMutation());
508 return DAG;
509}
510
513 const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>();
515 if (ST.hasFusion()) {
516 // Run the Macro Fusion after RA again since literals are expanded from
517 // pseudos then (v. addPreSched2()).
518 DAG->addMutation(createAArch64MacroFusionDAGMutation());
519 return DAG;
520 }
521
522 return DAG;
523}
524
526 const SmallPtrSetImpl<MachineInstr *> &MIs) const {
527 if (MIs.empty())
528 return 0;
529 auto *MI = *MIs.begin();
530 auto *FuncInfo = MI->getMF()->getInfo<AArch64FunctionInfo>();
531 return FuncInfo->clearLinkerOptimizationHints(MIs);
532}
533
534void AArch64leTargetMachine::anchor() { }
535
537 const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
538 const TargetOptions &Options, std::optional<Reloc::Model> RM,
539 std::optional<CodeModel::Model> CM, CodeGenOptLevel OL, bool JIT)
540 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, true) {}
541
542void AArch64beTargetMachine::anchor() { }
543
545 const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
546 const TargetOptions &Options, std::optional<Reloc::Model> RM,
547 std::optional<CodeModel::Model> CM, CodeGenOptLevel OL, bool JIT)
548 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, false) {}
549
550namespace {
551
552/// AArch64 Code Generator Pass Configuration Options.
553class AArch64PassConfig : public TargetPassConfig {
554public:
555 AArch64PassConfig(AArch64TargetMachine &TM, PassManagerBase &PM)
556 : TargetPassConfig(TM, PM) {
558 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
559 setEnableSinkAndFold(EnableSinkFold);
560 }
561
562 AArch64TargetMachine &getAArch64TargetMachine() const {
564 }
565
566 void addIRPasses() override;
567 bool addPreISel() override;
568 void addCodeGenPrepare() override;
569 bool addInstSelector() override;
570 bool addIRTranslator() override;
571 void addPreLegalizeMachineIR() override;
572 bool addLegalizeMachineIR() override;
573 void addPreRegBankSelect() override;
574 bool addRegBankSelect() override;
575 bool addGlobalInstructionSelect() override;
576 void addMachineSSAOptimization() override;
577 bool addILPOpts() override;
578 void addPreRegAlloc() override;
579 void addPostRewrite() override;
580 void addPostRegAlloc() override;
581 void addPreSched2() override;
582 void addPreEmitPass() override;
583 void addPostBBSections() override;
584 void addPreEmitPass2() override;
585 bool addRegAssignAndRewriteOptimized() override;
586
587 std::unique_ptr<CSEConfigBase> getCSEConfig() const override;
588
589private:
590 bool isGlobalISelOptNone() const;
591};
592
593} // end anonymous namespace
594
596#define GET_PASS_REGISTRY "AArch64PassRegistry.def"
598
599 PB.registerLateLoopOptimizationsEPCallback(
600 [=](LoopPassManager &LPM, OptimizationLevel Level) {
601 if (Level != OptimizationLevel::O0)
602 LPM.addPass(LoopIdiomVectorizePass());
603 });
604 if (getTargetTriple().isOSWindows())
605 PB.registerPipelineEarlySimplificationEPCallback(
608 });
609}
610
613 return TargetTransformInfo(std::make_unique<AArch64TTIImpl>(this, F));
614}
615
617 return new AArch64PassConfig(*this, PM);
618}
619
620std::unique_ptr<CSEConfigBase> AArch64PassConfig::getCSEConfig() const {
621 return getStandardCSEConfigForOpt(TM->getOptLevel());
622}
623
624// This function checks whether the opt level is explicitly set to none,
625// or whether GlobalISel was enabled due to SDAG encountering an optnone
626// function. If the opt level is greater than the level we automatically enable
627// globalisel at, and it wasn't enabled via CLI, we know that it must be because
628// of an optnone function.
629bool AArch64PassConfig::isGlobalISelOptNone() const {
630 const bool GlobalISelFlag =
632
633 return getOptLevel() == CodeGenOptLevel::None ||
634 (static_cast<unsigned>(getOptLevel()) >
635 getAArch64TargetMachine().getEnableGlobalISelAtO() &&
636 !GlobalISelFlag);
637}
638
639void AArch64PassConfig::addIRPasses() {
640 // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
641 // ourselves.
643
644 // Expand any SVE vector library calls that we can't code generate directly.
646 TM->getOptLevel() != CodeGenOptLevel::None)
648
649 // Cmpxchg instructions are often used with a subsequent comparison to
650 // determine whether it succeeded. We can exploit existing control-flow in
651 // ldrex/strex loops to simplify this, but it needs tidying up.
652 if (TM->getOptLevel() != CodeGenOptLevel::None && EnableAtomicTidy)
654 .forwardSwitchCondToPhi(true)
655 .convertSwitchRangeToICmp(true)
656 .convertSwitchToLookupTable(true)
657 .needCanonicalLoops(false)
658 .hoistCommonInsts(true)
659 .sinkCommonInsts(true)));
660
661 // Run LoopDataPrefetch
662 //
663 // Run this before LSR to remove the multiplies involved in computing the
664 // pointer values N iterations ahead.
665 if (TM->getOptLevel() != CodeGenOptLevel::None) {
670 }
671
672 if (EnableGEPOpt) {
673 // Call SeparateConstOffsetFromGEP pass to extract constants within indices
674 // and lower a GEP with multiple indices to either arithmetic operations or
675 // multiple GEPs with single index.
677 // Call EarlyCSE pass to find and remove subexpressions in the lowered
678 // result.
679 addPass(createEarlyCSEPass());
680 // Do loop invariant code motion in case part of the lowered result is
681 // invariant.
682 addPass(createLICMPass());
683 }
684
686
687 if (getOptLevel() == CodeGenOptLevel::Aggressive && EnableSelectOpt)
688 addPass(createSelectOptimizePass());
689
691 /*IsOptNone=*/TM->getOptLevel() == CodeGenOptLevel::None));
692
693 // Match complex arithmetic patterns
694 if (TM->getOptLevel() >= CodeGenOptLevel::Default)
696
697 // Match interleaved memory accesses to ldN/stN intrinsics.
698 if (TM->getOptLevel() != CodeGenOptLevel::None) {
701 }
702
703 // Add Control Flow Guard checks.
704 if (TM->getTargetTriple().isOSWindows()) {
705 if (TM->getTargetTriple().isWindowsArm64EC())
707 else
708 addPass(createCFGuardPass());
709 }
710
711 if (TM->Options.JMCInstrument)
712 addPass(createJMCInstrumenterPass());
713}
714
715// Pass Pipeline Configuration
716bool AArch64PassConfig::addPreISel() {
717 // Run promote constant before global merge, so that the promoted constants
718 // get a chance to be merged
719 if (TM->getOptLevel() != CodeGenOptLevel::None && EnablePromoteConstant)
721 // FIXME: On AArch64, this depends on the type.
722 // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes().
723 // and the offset has to be a multiple of the related size in bytes.
724 if ((TM->getOptLevel() != CodeGenOptLevel::None &&
727 bool OnlyOptimizeForSize =
728 (TM->getOptLevel() < CodeGenOptLevel::Aggressive) &&
730
731 // Merging of extern globals is enabled by default on non-Mach-O as we
732 // expect it to be generally either beneficial or harmless. On Mach-O it
733 // is disabled as we emit the .subsections_via_symbols directive which
734 // means that merging extern globals is not safe.
735 bool MergeExternalByDefault = !TM->getTargetTriple().isOSBinFormatMachO();
736 addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize,
737 MergeExternalByDefault));
738 }
739
740 return false;
741}
742
743void AArch64PassConfig::addCodeGenPrepare() {
744 if (getOptLevel() != CodeGenOptLevel::None)
747}
748
749bool AArch64PassConfig::addInstSelector() {
750 addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel()));
751
752 // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many
753 // references to _TLS_MODULE_BASE_ as possible.
754 if (TM->getTargetTriple().isOSBinFormatELF() &&
755 getOptLevel() != CodeGenOptLevel::None)
757
758 return false;
759}
760
761bool AArch64PassConfig::addIRTranslator() {
762 addPass(new IRTranslator(getOptLevel()));
763 return false;
764}
765
766void AArch64PassConfig::addPreLegalizeMachineIR() {
767 if (isGlobalISelOptNone()) {
769 addPass(new Localizer());
770 } else {
772 addPass(new Localizer());
774 addPass(new LoadStoreOpt());
775 }
776}
777
778bool AArch64PassConfig::addLegalizeMachineIR() {
779 addPass(new Legalizer());
780 return false;
781}
782
783void AArch64PassConfig::addPreRegBankSelect() {
784 if (!isGlobalISelOptNone()) {
785 addPass(createAArch64PostLegalizerCombiner(isGlobalISelOptNone()));
787 addPass(new LoadStoreOpt());
788 }
790}
791
792bool AArch64PassConfig::addRegBankSelect() {
793 addPass(new RegBankSelect());
794 return false;
795}
796
797bool AArch64PassConfig::addGlobalInstructionSelect() {
798 addPass(new InstructionSelect(getOptLevel()));
799 if (!isGlobalISelOptNone())
801 return false;
802}
803
804void AArch64PassConfig::addMachineSSAOptimization() {
805 if (TM->getOptLevel() != CodeGenOptLevel::None)
806 addPass(createMachineSMEABIPass(TM->getOptLevel()));
807
808 if (TM->getOptLevel() != CodeGenOptLevel::None && EnableSMEPeepholeOpt)
809 addPass(createSMEPeepholeOptPass());
810
811 // Run default MachineSSAOptimization first.
813
814 if (TM->getOptLevel() != CodeGenOptLevel::None)
816}
817
818bool AArch64PassConfig::addILPOpts() {
819 if (EnableCondOpt)
821 if (EnableCCMP)
823 if (EnableMCR)
824 addPass(&MachineCombinerID);
826 addPass(createAArch64CondBrTuning());
828 addPass(&EarlyIfConverterLegacyID);
832 if (TM->getOptLevel() != CodeGenOptLevel::None)
834 return true;
835}
836
837void AArch64PassConfig::addPreRegAlloc() {
838 if (TM->getOptLevel() == CodeGenOptLevel::None)
840
841 // Change dead register definitions to refer to the zero register.
842 if (TM->getOptLevel() != CodeGenOptLevel::None &&
845
846 // Use AdvSIMD scalar instructions whenever profitable.
847 if (TM->getOptLevel() != CodeGenOptLevel::None && EnableAdvSIMDScalar) {
849 // The AdvSIMD pass may produce copies that can be rewritten to
850 // be register coalescer friendly.
852 }
853 if (TM->getOptLevel() != CodeGenOptLevel::None && EnableMachinePipeliner)
854 addPass(&MachinePipelinerID);
855}
856
857void AArch64PassConfig::addPostRewrite() {
860}
861
862void AArch64PassConfig::addPostRegAlloc() {
863 // Remove redundant copy instructions.
864 if (TM->getOptLevel() != CodeGenOptLevel::None &&
867
868 if (TM->getOptLevel() != CodeGenOptLevel::None && usingDefaultRegAlloc())
869 // Improve performance for some FP/SIMD code for A57.
871}
872
873void AArch64PassConfig::addPreSched2() {
874 // Lower homogeneous frame instructions
877 // Expand some pseudo instructions to allow proper scheduling.
879 // Use load/store pair instructions when possible.
880 if (TM->getOptLevel() != CodeGenOptLevel::None) {
883 }
884 // Emit KCFI checks for indirect calls.
885 addPass(createKCFIPass());
886
887 // The AArch64SpeculationHardeningPass destroys dominator tree and natural
888 // loop info, which is needed for the FalkorHWPFFixPass and also later on.
889 // Therefore, run the AArch64SpeculationHardeningPass before the
890 // FalkorHWPFFixPass to avoid recomputing dominator tree and natural loop
891 // info.
893
894 if (TM->getOptLevel() != CodeGenOptLevel::None) {
896 addPass(createFalkorHWPFFixPass());
897 }
898}
899
900void AArch64PassConfig::addPreEmitPass() {
901 // Machine Block Placement might have created new opportunities when run
902 // at O3, where the Tail Duplication Threshold is set to 4 instructions.
903 // Run the load/store optimizer once more.
904 if (TM->getOptLevel() >= CodeGenOptLevel::Aggressive && EnableLoadStoreOpt)
906
907 if (TM->getOptLevel() >= CodeGenOptLevel::Aggressive &&
910 if (TM->getOptLevel() != CodeGenOptLevel::None)
912
914
915 if (TM->getTargetTriple().isOSWindows()) {
916 // Identify valid longjmp targets for Windows Control Flow Guard.
917 addPass(createCFGuardLongjmpPass());
918 // Identify valid eh continuation targets for Windows EHCont Guard.
920 }
921
922 if (TM->getOptLevel() != CodeGenOptLevel::None && EnableCollectLOH &&
923 TM->getTargetTriple().isOSBinFormatMachO())
925
926 // Apply code layout optimizations. Run late so detection reflects the
927 // final MI stream.
928 if (getOptLevel() != CodeGenOptLevel::None)
930}
931
932void AArch64PassConfig::addPostBBSections() {
937 // Relax conditional branch instructions if they're otherwise out of
938 // range of their destination.
940 addPass(&BranchRelaxationPassID);
941
942 if (TM->getOptLevel() != CodeGenOptLevel::None && EnableCompressJumpTables)
944}
945
946void AArch64PassConfig::addPreEmitPass2() {
947 // Insert pseudo probe annotation for callsite profiling
948 addPass(createPseudoProbeInserter());
949
950 // SVE bundles move prefixes with destructive operations. BLR_RVMARKER pseudo
951 // instructions are lowered to bundles as well.
952 addPass(createUnpackMachineBundlesLegacy(nullptr));
953}
954
955bool AArch64PassConfig::addRegAssignAndRewriteOptimized() {
958}
959
966
971
974 const auto *MFI = MF.getInfo<AArch64FunctionInfo>();
975 return new yaml::AArch64FunctionInfo(*MFI);
976}
977
980 SMDiagnostic &Error, SMRange &SourceRange) const {
981 const auto &YamlMFI = static_cast<const yaml::AArch64FunctionInfo &>(MFI);
982 MachineFunction &MF = PFS.MF;
983 MF.getInfo<AArch64FunctionInfo>()->initializeBaseYamlFields(YamlMFI);
984 return false;
985}
cl::opt< bool > EnableHomogeneousPrologEpilog("homogeneous-prolog-epilog", cl::Hidden, cl::desc("Emit homogeneous prologue and epilogue for the size " "optimization (default = off)"))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > EnableBranchTargets("aarch64-enable-branch-targets", cl::Hidden, cl::desc("Enable the AArch64 branch target pass"), cl::init(true))
static cl::opt< bool > EnableSVEIntrinsicOpts("aarch64-enable-sve-intrinsic-opts", cl::Hidden, cl::desc("Enable SVE intrinsic opts"), cl::init(true))
static cl::opt< bool > EnableAArch64CopyPropagation("aarch64-enable-copy-propagation", cl::desc("Enable the copy propagation with AArch64 copy instr"), cl::init(true), cl::Hidden)
static cl::opt< bool > BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true), cl::desc("Relax out of range conditional branches"))
static cl::opt< bool > EnablePromoteConstant("aarch64-enable-promote-const", cl::desc("Enable the promote constant pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableCondBrTuning("aarch64-enable-cond-br-tune", cl::desc("Enable the conditional branch tuning pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableSinkFold("aarch64-enable-sink-fold", cl::desc("Enable sinking and folding of instruction copies"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden, cl::desc("Enable the pass that removes dead" " definitions and replaces stores to" " them with stores to the zero" " register"), cl::init(true))
static cl::opt< bool > EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden, cl::desc("Enable optimizations on complex GEPs"), cl::init(false))
static cl::opt< bool > EnableSelectOpt("aarch64-select-opt", cl::Hidden, cl::desc("Enable select to branch optimizations"), cl::init(true))
static cl::opt< bool > EnableLoadStoreOpt("aarch64-enable-ldst-opt", cl::desc("Enable the load/store pair" " optimization pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableGISelLoadStoreOptPostLegal("aarch64-enable-gisel-ldst-postlegal", cl::desc("Enable GlobalISel's post-legalizer load/store optimization pass"), cl::init(false), cl::Hidden)
static StringRef computeDefaultCPU(const Triple &TT, StringRef CPU)
static cl::opt< unsigned > SVEVectorBitsMinOpt("aarch64-sve-vector-bits-min", cl::desc("Assume SVE vector registers are at least this big, " "with zero meaning no minimum size is assumed."), cl::init(0), cl::Hidden)
static cl::opt< bool > EnableMCR("aarch64-enable-mcr", cl::desc("Enable the machine combiner pass"), cl::init(true), cl::Hidden)
static cl::opt< cl::boolOrDefault > EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden, cl::desc("Enable the global merge pass"))
static cl::opt< bool > EnableStPairSuppress("aarch64-enable-stp-suppress", cl::desc("Suppress STP for AArch64"), cl::init(true), cl::Hidden)
static CodeModel::Model getEffectiveAArch64CodeModel(const Triple &TT, std::optional< CodeModel::Model > CM, bool JIT)
static cl::opt< bool > EnableCondOpt("aarch64-enable-condopt", cl::desc("Enable the condition optimizer pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > ForceStreaming("force-streaming", cl::desc("Force the use of streaming code for all functions"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableCollectLOH("aarch64-enable-collect-loh", cl::desc("Enable the pass that emits the linker optimization hints (LOH)"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableGISelLoadStoreOptPreLegal("aarch64-enable-gisel-ldst-prelegal", cl::desc("Enable GlobalISel's pre-legalizer load/store optimization pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableRedundantCopyElimination("aarch64-enable-copyelim", cl::desc("Enable the redundant copy elimination pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableAtomicTidy("aarch64-enable-atomic-cfg-tidy", cl::Hidden, cl::desc("Run SimplifyCFG after expanding atomic operations" " to make use of cmpxchg flow-based information"), cl::init(true))
static cl::opt< bool > EnableAdvSIMDScalar("aarch64-enable-simd-scalar", cl::desc("Enable use of AdvSIMD scalar integer instructions"), cl::init(false), cl::Hidden)
static cl::opt< int > EnableGlobalISelAtO("aarch64-enable-global-isel-at-O", cl::Hidden, cl::desc("Enable GlobalISel at or below an opt level (-1 to disable)"), cl::init(0))
static cl::opt< bool > EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden, cl::desc("Enable the loop data prefetch pass"), cl::init(true))
static cl::opt< bool > EnableSMEPeepholeOpt("enable-aarch64-sme-peephole-opt", cl::init(true), cl::Hidden, cl::desc("Perform SME peephole optimization"))
static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))
static cl::opt< bool > EnableSRLTSubregToRegMitigation("aarch64-srlt-mitigate-sr2r", cl::desc("Enable SUBREG_TO_REG mitigation by adding 'implicit-def' for " "super-regs when using Subreg Liveness Tracking"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableMachinePipeliner("aarch64-enable-pipeliner", cl::desc("Enable Machine Pipeliner for AArch64"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableFalkorHWPFFix("aarch64-enable-falkor-hwpf-fix", cl::init(true), cl::Hidden)
static cl::opt< unsigned > SVEVectorBitsMaxOpt("aarch64-sve-vector-bits-max", cl::desc("Assume SVE vector registers are at most this big, " "with zero meaning no maximum size is assumed."), cl::init(0), cl::Hidden)
static cl::opt< bool > ForceStreamingCompatible("force-streaming-compatible", cl::desc("Force the use of streaming-compatible code for all functions"), cl::init(false), cl::Hidden)
static std::unique_ptr< TargetLoweringObjectFile > createTLOF(const Triple &TT)
static cl::opt< bool > EnableCompressJumpTables("aarch64-enable-compress-jump-tables", cl::Hidden, cl::init(true), cl::desc("Use smallest entry possible for jump tables"))
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64Target()
static cl::opt< bool > EnableCCMP("aarch64-enable-ccmp", cl::desc("Enable the CCMP formation pass"), cl::init(true), cl::Hidden)
This file a TargetTransformInfoImplBase conforming object specific to the AArch64 target machine.
static Reloc::Model getEffectiveRelocModel()
This file contains the simple types necessary to represent the attributes associated with functions a...
#define X(NUM, ENUM, NAME)
Definition ELF.h:851
Provides analysis for continuously CSEing during GISel passes.
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
DXIL Legalizer
static cl::opt< bool > EnableGlobalMerge("enable-global-merge", cl::Hidden, cl::desc("Enable the global merge pass"), cl::init(true))
IRTranslator LLVM IR MI
This file declares the IRTranslator pass.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define T
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
This file describes the interface of the MachineFunctionPass responsible for assigning the generic vi...
const GCNTargetMachine & getTM(const GCNSubtarget *STI)
This file contains some functions that are useful when dealing with strings.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
Target-Independent Code Generator Pass Configuration Options pass.
This pass exposes codegen information to IR-level passes.
static std::unique_ptr< TargetLoweringObjectFile > createTLOF()
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
size_t clearLinkerOptimizationHints(const SmallPtrSetImpl< MachineInstr * > &MIs)
size_t clearLinkerOptimizationHints(const SmallPtrSetImpl< MachineInstr * > &MIs) const override
Remove all Linker Optimization Hints (LOH) associated with instructions in MIs and.
StringMap< std::unique_ptr< AArch64Subtarget > > SubtargetMap
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
void registerPassBuilderCallbacks(PassBuilder &PB) override
Allow the target to modify the pass pipeline.
const AArch64Subtarget * getSubtargetImpl() const =delete
yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override
Allocate and return a default initialized instance of the YAML representation for the MachineFunction...
ScheduleDAGInstrs * createPostMachineScheduler(MachineSchedContext *C) const override
Similar to createMachineScheduler but used when postRA machine scheduling is enabled.
unsigned getEnableGlobalISelAtO() const
Returns the optimisation level that enables GlobalISel.
std::unique_ptr< TargetLoweringObjectFile > TLOF
yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override
Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.
bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override
Parse out the target's MachineFunctionInfo from the YAML reprsentation.
TargetPassConfig * createPassConfig(PassManagerBase &PM) override
Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...
void reset() override
Reset internal state.
AArch64TargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT, bool IsLittleEndian)
Create an AArch64 architecture model.
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Return a TargetTransformInfo for a given function.
AArch64beTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)
AArch64leTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1563
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
CodeGenTargetMachineImpl(const Target &T, StringRef DataLayoutString, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOptLevel OL)
This class represents a range of values.
LLVM_ABI APInt getUnsignedMin() const
Return the smallest unsigned value contained in the ConstantRange.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
Lightweight error class with error context and mandatory checking.
Definition Error.h:159
This pass is responsible for selecting generic machine instructions to target-specific instructions.
static void setUseExtended(bool Enable)
This pass implements the localization mechanism described at the top of this file.
Definition Localizer.h:43
Pass to replace calls to ifuncs with indirect calls.
Definition LowerIFunc.h:19
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
static LLVM_ABI const OptimizationLevel O0
Disable as many optimizations as possible.
This class provides access to building LLVM's passes.
LLVM_ATTRIBUTE_MINSIZE std::enable_if_t<!std::is_same_v< PassT, PassManager > > addPass(PassT &&Pass)
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
This pass implements the reg bank selector pass used in the GlobalISel pipeline.
Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...
Definition SourceMgr.h:298
Represents a range in source code.
Definition SMLoc.h:47
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
iterator begin() const
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
Represent a constant reference to a string, i.e.
Definition StringRef.h:56
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
void setSupportsDebugEntryValues(bool Enable)
Triple TargetTriple
Triple string, CPU name, and target feature strings the TargetMachine instance is created with.
const Triple & getTargetTriple() const
void setMachineOutliner(bool Enable)
void setCFIFixup(bool Enable)
void setSupportsDefaultOutlining(bool Enable)
void setGlobalISelAbort(GlobalISelAbortMode Mode)
const MCAsmInfo & getMCAsmInfo() const
Return target specific asm information.
std::unique_ptr< const MCSubtargetInfo > STI
void setGlobalISel(bool Enable)
TargetOptions Options
CodeModel::Model getCodeModel() const
Returns the code model.
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
unsigned TLSSize
Bit size of immediate TLS offsets (0 == use the default).
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
Target-Independent Code Generator Pass Configuration Options.
virtual void addCodeGenPrepare()
Add pass to prepare the LLVM IR for code generation.
virtual void addIRPasses()
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
virtual void addMachineSSAOptimization()
addMachineSSAOptimization - Add standard passes that optimize machine instructions in SSA form.
virtual bool addRegAssignAndRewriteOptimized()
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...
Interfaces for registering analysis passes, producing common pass manager configurations,...
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ DynamicNoPIC
Definition CodeGen.h:25
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
ScheduleDAGMILive * createSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
FunctionPass * createAArch64PreLegalizerCombiner()
void initializeLDTLSCleanupPass(PassRegistry &)
LLVM_ABI FunctionPass * createCFGSimplificationPass(SimplifyCFGOptions Options=SimplifyCFGOptions(), std::function< bool(const Function &)> Ftor=nullptr)
void initializeMachineSMEABIPass(PassRegistry &)
FunctionPass * createAArch64PostSelectOptimize()
FunctionPass * createAArch64ConditionOptimizerLegacyPass()
void initializeAArch64A53Fix835769LegacyPass(PassRegistry &)
LLVM_ABI ModulePass * createJMCInstrumenterPass()
JMC instrument pass.
void initializeAArch64SRLTDefineSuperRegsPass(PassRegistry &)
void initializeAArch64SpeculationHardeningPass(PassRegistry &)
FunctionPass * createAArch64RedundantCopyEliminationPass()
LLVM_ABI FunctionPass * createTypePromotionLegacyPass()
Create IR Type Promotion pass.
void initializeAArch64PostLegalizerCombinerPass(PassRegistry &)
void initializeAArch64StackTaggingPreRALegacyPass(PassRegistry &)
FunctionPass * createMachineSMEABIPass(CodeGenOptLevel)
LLVM_ABI FunctionPass * createSelectOptimizePass()
This pass converts conditional moves to conditional jumps when profitable.
FunctionPass * createAArch64A53Fix835769LegacyPass()
LLVM_ABI Pass * createGlobalMergePass(const TargetMachine *TM, unsigned MaximalOffset, bool OnlyOptimizeForSize=false, bool MergeExternalByDefault=false, bool MergeConstantByDefault=false, bool MergeConstAggressiveByDefault=false)
GlobalMerge - This pass merges internal (by default) globals into structs to enable reuse of a base p...
void initializeAArch64BranchTargetsLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createPseudoProbeInserter()
This pass inserts pseudo probe annotation for callsite profiling.
FunctionPass * createAArch64PostCoalescerPass()
void initializeAArch64PromoteConstantPass(PassRegistry &)
FunctionPass * createFalkorMarkStridedAccessesPass()
Target & getTheAArch64beTarget()
FunctionPass * createAArch64PointerAuthPass()
FunctionPass * createFalkorHWPFFixPass()
FunctionPass * createAArch64SRLTDefineSuperRegsPass()
LLVM_ABI char & PostRASchedulerID
PostRAScheduler - This pass performs post register allocation scheduling.
std::string utostr(uint64_t X, bool isNeg=false)
FunctionPass * createAArch64O0PreLegalizerCombiner()
FunctionPass * createAArch64SLSHardeningLegacyPass()
void initializeAArch64CollectLOHLegacyPass(PassRegistry &)
FunctionPass * createAArch64LoadStoreOptLegacyPass()
createAArch64LoadStoreOptimizationPass - returns an instance of the load / store optimization pass.
FunctionPass * createAArch64CondBrTuning()
LLVM_ABI std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOptLevel Level)
Definition CSEInfo.cpp:85
void initializeAArch64Arm64ECCallLoweringPass(PassRegistry &)
void initializeAArch64SIMDInstrOptLegacyPass(PassRegistry &)
LLVM_ABI char & PostMachineSchedulerID
PostMachineScheduler - This pass schedules machine instructions postRA.
LLVM_ABI char & PeepholeOptimizerLegacyID
PeepholeOptimizer - This pass performs peephole optimizations - like extension and comparison elimina...
LLVM_ABI Pass * createLICMPass()
Definition LICM.cpp:386
FunctionPass * createAArch64A57FPLoadBalancingLegacyPass()
Target & getTheAArch64leTarget()
FunctionPass * createAArch64DeadRegisterDefinitions()
LLVM_ABI char & EarlyIfConverterLegacyID
EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.
void initializeAArch64RedundantCondBranchLegacyPass(PassRegistry &)
void initializeAArch64PostSelectOptimizeLegacyPass(PassRegistry &)
FunctionPass * createSMEPeepholeOptPass()
FunctionPass * createAArch64PostLegalizerLowering()
ThinOrFullLTOPhase
This enumerates the LLVM full LTO or ThinLTO optimization phases.
Definition Pass.h:77
FunctionPass * createAArch64StackTaggingPreRALegacyPass()
void initializeAArch64CodeLayoutOptPass(PassRegistry &)
LLVM_ABI void initializeMachineKCFILegacyPass(PassRegistry &)
PassManager< Loop, LoopAnalysisManager, LoopStandardAnalysisResults &, LPMUpdater & > LoopPassManager
The Loop pass manager.
LLVM_ABI char & MachineCombinerID
This pass performs instruction combining using trace metrics to estimate critical-path and resource d...
void initializeAArch64AsmPrinterPass(PassRegistry &)
FunctionPass * createAArch64MIPeepholeOptLegacyPass()
LLVM_ABI FunctionPass * createUnpackMachineBundlesLegacy(std::function< bool(const MachineFunction &)> Ftor)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
void initializeAArch64AdvSIMDScalarLegacyPass(PassRegistry &)
FunctionPass * createAArch64CompressJumpTablesPass()
Target & getTheAArch64_32Target()
FunctionPass * createAArch64ConditionalCompares()
FunctionPass * createAArch64ExpandPseudoLegacyPass()
Returns an instance of the pseudo instruction expansion pass.
void initializeAArch64PointerAuthLegacyPass(PassRegistry &)
ScheduleDAGMI * createSchedPostRA(MachineSchedContext *C)
Create a generic scheduler with no vreg liveness or DAG mutation passes.
LLVM_ABI char & BranchRelaxationPassID
BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...
void initializeFalkorMarkStridedAccessesLegacyPass(PassRegistry &)
void initializeAArch64StackTaggingPass(PassRegistry &)
void initializeAArch64PostLegalizerLoweringLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createKCFIPass()
Lowers KCFI operand bundles for indirect calls.
Definition KCFI.cpp:69
std::unique_ptr< ScheduleDAGMutation > createAArch64MacroFusionDAGMutation()
Note that you have to add: DAG.addMutation(createAArch64MacroFusionDAGMutation()); to AArch64TargetMa...
LLVM_ABI FunctionPass * createComplexDeinterleavingPass(const TargetMachine *TM)
This pass implements generation of target-specific intrinsics to support handling of complex number a...
PassManager< Module > ModulePassManager
Convenience typedef for a pass manager over modules.
ModulePass * createAArch64Arm64ECCallLoweringPass()
void initializeAArch64ConditionOptimizerLegacyPass(PassRegistry &)
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
LLVM_ABI FunctionPass * createLoopDataPrefetchPass()
FunctionPass * createAArch64SIMDInstrOptPass()
Returns an instance of the high cost ASIMD instruction replacement optimization pass.
void initializeSMEPeepholeOptPass(PassRegistry &)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
FunctionPass * createAArch64StorePairSuppressPass()
void initializeAArch64PostCoalescerLegacyPass(PassRegistry &)
ModulePass * createSVEIntrinsicOptsPass()
FunctionPass * createAArch64CollectLOHPass()
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
@ Default
-O2, -Os, -Oz
Definition CodeGen.h:85
LLVM_ABI FunctionPass * createCFGuardLongjmpPass()
Creates CFGuard longjmp target identification pass.
void initializeAArch64SLSHardeningLegacyPass(PassRegistry &)
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
Target & getTheARM64_32Target()
FunctionPass * createAArch64PostLegalizerCombiner(bool IsOptNone)
void initializeAArch64StorePairSuppressPass(PassRegistry &)
void initializeAArch64LowerHomogeneousPrologEpilogPass(PassRegistry &)
LLVM_ABI FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
LLVM_ABI FunctionPass * createInterleavedAccessPass()
InterleavedAccess Pass - This pass identifies and matches interleaved memory accesses to target speci...
LLVM_ABI void initializeGlobalISel(PassRegistry &)
Initialize all passes linked into the GlobalISel library.
void initializeAArch64PreLegalizerCombinerLegacyPass(PassRegistry &)
FunctionPass * createAArch64ISelDag(AArch64TargetMachine &TM, CodeGenOptLevel OptLevel)
createAArch64ISelDag - This pass converts a legalized DAG into a AArch64-specific DAG,...
LLVM_ABI FunctionPass * createCFGuardPass()
Insert Control Flow Guard checks on indirect function calls.
Definition CFGuard.cpp:316
void initializeAArch64CondBrTuningPass(PassRegistry &)
LLVM_ABI char & MachinePipelinerID
This pass performs software pipelining on machine instructions.
void initializeAArch64A57FPLoadBalancingLegacyPass(PassRegistry &)
FunctionPass * createAArch64BranchTargetsPass()
Target & getTheARM64Target()
LLVM_ABI std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
void initializeFalkorHWPFFixPass(PassRegistry &)
void initializeAArch64ExpandPseudoLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createEHContGuardTargetsPass()
Creates Windows EH Continuation Guard target identification pass.
ModulePass * createAArch64LowerHomogeneousPrologEpilogPass()
FunctionPass * createAArch64StackTaggingPass(bool IsOptNone)
LLVM_ABI FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
FunctionPass * createAArch64CleanupLocalDynamicTLSPass()
BumpPtrAllocatorImpl<> BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition Allocator.h:383
ModulePass * createAArch64PromoteConstantPass()
void initializeAArch64CompressJumpTablesLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
LLVM_ABI MachineFunctionPass * createMachineCopyPropagationPass(bool UseCopyInstr)
void initializeAArch64RedundantCopyEliminationLegacyPass(PassRegistry &)
FunctionPass * createAArch64CodeLayoutOptPass()
FunctionPass * createAArch64AdvSIMDScalar()
FunctionPass * createAArch64RedundantCondBranchPass()
void initializeAArch64DAGToDAGISelLegacyPass(PassRegistry &)
FunctionPass * createAArch64SpeculationHardeningPass()
Returns an instance of the pseudo instruction expansion pass.
void initializeSVEIntrinsicOptsPass(PassRegistry &)
void initializeAArch64MIPeepholeOptLegacyPass(PassRegistry &)
void initializeAArch64DeadRegisterDefinitionsLegacyPass(PassRegistry &)
void initializeAArch64ConditionalComparesLegacyPass(PassRegistry &)
void initializeAArch64O0PreLegalizerCombinerLegacyPass(PassRegistry &)
LLVM_ABI FunctionPass * createInterleavedLoadCombinePass()
InterleavedLoadCombines Pass - This pass identifies interleaved loads and combines them into wide loa...
void initializeAArch64LoadStoreOptLegacyPass(PassRegistry &)
LLVM_ABI CGPassBuilderOption getCGPassBuilderOption()
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
std::optional< bool > EnableGlobalISelOption
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
static FuncInfoTy * create(BumpPtrAllocator &Allocator, const Function &F, const SubtargetTy *STI)
Factory function: default behavior is to call new using the supplied allocator.
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
RegisterTargetMachine - Helper template for registering a target machine implementation,...
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.