LLVM 22.0.0git
PassBuilderPipelines.cpp
Go to the documentation of this file.
1//===- Construction of pass pipelines -------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// This file provides the implementation of the PassBuilder based on our
11/// static pass registry as well as related functionality. It also provides
12/// helpers to aid in analyzing, debugging, and testing passes and pass
13/// pipelines.
14///
15//===----------------------------------------------------------------------===//
16
17#include "llvm/ADT/Statistic.h"
27#include "llvm/IR/PassManager.h"
28#include "llvm/Pass.h"
150
151using namespace llvm;
152
153namespace llvm {
154
156 "enable-ml-inliner", cl::init(InliningAdvisorMode::Default), cl::Hidden,
157 cl::desc("Enable ML policy for inliner. Currently trained for -Oz only"),
159 "Heuristics-based inliner version"),
161 "Use development mode (runtime-loadable model)"),
163 "Use release mode (AOT-compiled model)")));
164
165/// Flag to enable inline deferral during PGO.
166static cl::opt<bool>
167 EnablePGOInlineDeferral("enable-npm-pgo-inline-deferral", cl::init(true),
169 cl::desc("Enable inline deferral during PGO"));
170
171static cl::opt<bool> EnableModuleInliner("enable-module-inliner",
172 cl::init(false), cl::Hidden,
173 cl::desc("Enable module inliner"));
174
176 "mandatory-inlining-first", cl::init(false), cl::Hidden,
177 cl::desc("Perform mandatory inlinings module-wide, before performing "
178 "inlining"));
179
181 "eagerly-invalidate-analyses", cl::init(true), cl::Hidden,
182 cl::desc("Eagerly invalidate more analyses in default pipelines"));
183
185 "enable-merge-functions", cl::init(false), cl::Hidden,
186 cl::desc("Enable function merging as part of the optimization pipeline"));
187
189 "enable-post-pgo-loop-rotation", cl::init(true), cl::Hidden,
190 cl::desc("Run the loop rotation transformation after PGO instrumentation"));
191
193 "enable-global-analyses", cl::init(true), cl::Hidden,
194 cl::desc("Enable inter-procedural analyses"));
195
196static cl::opt<bool> RunPartialInlining("enable-partial-inlining",
197 cl::init(false), cl::Hidden,
198 cl::desc("Run Partial inlining pass"));
199
201 "extra-vectorizer-passes", cl::init(false), cl::Hidden,
202 cl::desc("Run cleanup optimization passes after vectorization"));
203
204static cl::opt<bool> RunNewGVN("enable-newgvn", cl::init(false), cl::Hidden,
205 cl::desc("Run the NewGVN pass"));
206
207static cl::opt<bool>
208 EnableLoopInterchange("enable-loopinterchange", cl::init(false), cl::Hidden,
209 cl::desc("Enable the LoopInterchange Pass"));
210
211static cl::opt<bool> EnableUnrollAndJam("enable-unroll-and-jam",
212 cl::init(false), cl::Hidden,
213 cl::desc("Enable Unroll And Jam Pass"));
214
215static cl::opt<bool> EnableLoopFlatten("enable-loop-flatten", cl::init(false),
217 cl::desc("Enable the LoopFlatten Pass"));
218
219// Experimentally allow loop header duplication. This should allow for better
220// optimization at Oz, since loop-idiom recognition can then recognize things
221// like memcpy. If this ends up being useful for many targets, we should drop
222// this flag and make a code generation option that can be controlled
223// independent of the opt level and exposed through the frontend.
225 "enable-loop-header-duplication", cl::init(false), cl::Hidden,
226 cl::desc("Enable loop header duplication at any optimization level"));
227
228static cl::opt<bool>
229 EnableDFAJumpThreading("enable-dfa-jump-thread",
230 cl::desc("Enable DFA jump threading"),
231 cl::init(false), cl::Hidden);
232
233static cl::opt<bool>
234 EnableHotColdSplit("hot-cold-split",
235 cl::desc("Enable hot-cold splitting pass"));
236
237static cl::opt<bool> EnableIROutliner("ir-outliner", cl::init(false),
239 cl::desc("Enable ir outliner pass"));
240
241static cl::opt<bool>
242 DisablePreInliner("disable-preinline", cl::init(false), cl::Hidden,
243 cl::desc("Disable pre-instrumentation inliner"));
244
246 "preinline-threshold", cl::Hidden, cl::init(75),
247 cl::desc("Control the amount of inlining in pre-instrumentation inliner "
248 "(default = 75)"));
249
250static cl::opt<bool>
251 EnableGVNHoist("enable-gvn-hoist",
252 cl::desc("Enable the GVN hoisting pass (default = off)"));
253
254static cl::opt<bool>
255 EnableGVNSink("enable-gvn-sink",
256 cl::desc("Enable the GVN sinking pass (default = off)"));
257
259 "enable-jump-table-to-switch",
260 cl::desc("Enable JumpTableToSwitch pass (default = off)"));
261
262// This option is used in simplifying testing SampleFDO optimizations for
263// profile loading.
264static cl::opt<bool>
265 EnableCHR("enable-chr", cl::init(true), cl::Hidden,
266 cl::desc("Enable control height reduction optimization (CHR)"));
267
269 "flattened-profile-used", cl::init(false), cl::Hidden,
270 cl::desc("Indicate the sample profile being used is flattened, i.e., "
271 "no inline hierarchy exists in the profile"));
272
273static cl::opt<bool>
274 EnableMatrix("enable-matrix", cl::init(false), cl::Hidden,
275 cl::desc("Enable lowering of the matrix intrinsics"));
276
278 "enable-constraint-elimination", cl::init(true), cl::Hidden,
279 cl::desc(
280 "Enable pass to eliminate conditions based on linear constraints"));
281
283 "attributor-enable", cl::Hidden, cl::init(AttributorRunOption::NONE),
284 cl::desc("Enable the attributor inter-procedural deduction pass"),
286 "enable all attributor runs"),
288 "enable module-wide attributor runs"),
290 "enable call graph SCC attributor runs"),
291 clEnumValN(AttributorRunOption::NONE, "none",
292 "disable attributor runs")));
293
295 "enable-sampled-instrumentation", cl::init(false), cl::Hidden,
296 cl::desc("Enable profile instrumentation sampling (default = off)"));
298 "enable-loop-versioning-licm", cl::init(false), cl::Hidden,
299 cl::desc("Enable the experimental Loop Versioning LICM pass"));
300
302 "instrument-cold-function-only-path", cl::init(""),
303 cl::desc("File path for cold function only instrumentation(requires use "
304 "with --pgo-instrument-cold-function-only)"),
305 cl::Hidden);
306
309
311} // namespace llvm
312
329
330namespace llvm {
332} // namespace llvm
333
335 OptimizationLevel Level) {
336 for (auto &C : PeepholeEPCallbacks)
337 C(FPM, Level);
338}
341 for (auto &C : LateLoopOptimizationsEPCallbacks)
342 C(LPM, Level);
343}
345 OptimizationLevel Level) {
346 for (auto &C : LoopOptimizerEndEPCallbacks)
347 C(LPM, Level);
348}
351 for (auto &C : ScalarOptimizerLateEPCallbacks)
352 C(FPM, Level);
353}
355 OptimizationLevel Level) {
356 for (auto &C : CGSCCOptimizerLateEPCallbacks)
357 C(CGPM, Level);
358}
360 OptimizationLevel Level) {
361 for (auto &C : VectorizerStartEPCallbacks)
362 C(FPM, Level);
363}
365 OptimizationLevel Level) {
366 for (auto &C : VectorizerEndEPCallbacks)
367 C(FPM, Level);
368}
370 OptimizationLevel Level,
372 for (auto &C : OptimizerEarlyEPCallbacks)
373 C(MPM, Level, Phase);
374}
376 OptimizationLevel Level,
378 for (auto &C : OptimizerLastEPCallbacks)
379 C(MPM, Level, Phase);
380}
383 for (auto &C : FullLinkTimeOptimizationEarlyEPCallbacks)
384 C(MPM, Level);
385}
388 for (auto &C : FullLinkTimeOptimizationLastEPCallbacks)
389 C(MPM, Level);
390}
392 OptimizationLevel Level) {
393 for (auto &C : PipelineStartEPCallbacks)
394 C(MPM, Level);
395}
398 for (auto &C : PipelineEarlySimplificationEPCallbacks)
399 C(MPM, Level, Phase);
400}
401
402// Helper to add AnnotationRemarksPass.
406
407// Helper to check if the current compilation phase is preparing for LTO
412
413// Helper to check if the current compilation phase is LTO backend
418
419// Helper to wrap conditionally Coro passes.
421 // TODO: Skip passes according to Phase.
422 ModulePassManager CoroPM;
423 CoroPM.addPass(CoroEarlyPass());
424 CGSCCPassManager CGPM;
425 CGPM.addPass(CoroSplitPass());
426 CoroPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM)));
427 CoroPM.addPass(CoroCleanupPass());
428 CoroPM.addPass(GlobalDCEPass());
429 return CoroConditionalWrapper(std::move(CoroPM));
430}
431
432// TODO: Investigate the cost/benefit of tail call elimination on debugging.
434PassBuilder::buildO1FunctionSimplificationPipeline(OptimizationLevel Level,
436
438
440 FPM.addPass(CountVisitsPass());
441
442 // Form SSA out of local memory accesses after breaking apart aggregates into
443 // scalars.
444 FPM.addPass(SROAPass(SROAOptions::ModifyCFG));
445
446 // Catch trivial redundancies
447 FPM.addPass(EarlyCSEPass(true /* Enable mem-ssa. */));
448
449 // Hoisting of scalars and load expressions.
450 FPM.addPass(
451 SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true)));
452 FPM.addPass(InstCombinePass());
453
454 FPM.addPass(LibCallsShrinkWrapPass());
455
456 invokePeepholeEPCallbacks(FPM, Level);
457
458 FPM.addPass(
459 SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true)));
460
461 // Form canonically associated expression trees, and simplify the trees using
462 // basic mathematical properties. For example, this will form (nearly)
463 // minimal multiplication trees.
464 FPM.addPass(ReassociatePass());
465
466 // Add the primary loop simplification pipeline.
467 // FIXME: Currently this is split into two loop pass pipelines because we run
468 // some function passes in between them. These can and should be removed
469 // and/or replaced by scheduling the loop pass equivalents in the correct
470 // positions. But those equivalent passes aren't powerful enough yet.
471 // Specifically, `SimplifyCFGPass` and `InstCombinePass` are currently still
472 // used. We have `LoopSimplifyCFGPass` which isn't yet powerful enough yet to
473 // fully replace `SimplifyCFGPass`, and the closest to the other we have is
474 // `LoopInstSimplify`.
475 LoopPassManager LPM1, LPM2;
476
477 // Simplify the loop body. We do this initially to clean up after other loop
478 // passes run, either when iterating on a loop or on inner loops with
479 // implications on the outer loop.
480 LPM1.addPass(LoopInstSimplifyPass());
481 LPM1.addPass(LoopSimplifyCFGPass());
482
483 // Try to remove as much code from the loop header as possible,
484 // to reduce amount of IR that will have to be duplicated. However,
485 // do not perform speculative hoisting the first time as LICM
486 // will destroy metadata that may not need to be destroyed if run
487 // after loop rotation.
488 // TODO: Investigate promotion cap for O1.
489 LPM1.addPass(LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap,
490 /*AllowSpeculation=*/false));
491
492 LPM1.addPass(LoopRotatePass(/* Disable header duplication */ true,
494 // TODO: Investigate promotion cap for O1.
495 LPM1.addPass(LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap,
496 /*AllowSpeculation=*/true));
497 LPM1.addPass(SimpleLoopUnswitchPass());
499 LPM1.addPass(LoopFlattenPass());
500
501 LPM2.addPass(LoopIdiomRecognizePass());
502 LPM2.addPass(IndVarSimplifyPass());
503
505
506 LPM2.addPass(LoopDeletionPass());
507
508 // Do not enable unrolling in PreLinkThinLTO phase during sample PGO
509 // because it changes IR to makes profile annotation in back compile
510 // inaccurate. The normal unroller doesn't pay attention to forced full unroll
511 // attributes so we need to make sure and allow the full unroll pass to pay
512 // attention to it.
513 if (Phase != ThinOrFullLTOPhase::ThinLTOPreLink || !PGOOpt ||
514 PGOOpt->Action != PGOOptions::SampleUse)
515 LPM2.addPass(LoopFullUnrollPass(Level.getSpeedupLevel(),
516 /* OnlyWhenForced= */ !PTO.LoopUnrolling,
517 PTO.ForgetAllSCEVInLoopUnroll));
518
520
521 FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM1),
522 /*UseMemorySSA=*/true,
523 /*UseBlockFrequencyInfo=*/true));
524 FPM.addPass(
525 SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true)));
526 FPM.addPass(InstCombinePass());
527 // The loop passes in LPM2 (LoopFullUnrollPass) do not preserve MemorySSA.
528 // *All* loop passes must preserve it, in order to be able to use it.
529 FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM2),
530 /*UseMemorySSA=*/false,
531 /*UseBlockFrequencyInfo=*/false));
532
533 // Delete small array after loop unroll.
534 FPM.addPass(SROAPass(SROAOptions::ModifyCFG));
535
536 // Specially optimize memory movement as it doesn't look like dataflow in SSA.
537 FPM.addPass(MemCpyOptPass());
538
539 // Sparse conditional constant propagation.
540 // FIXME: It isn't clear why we do this *after* loop passes rather than
541 // before...
542 FPM.addPass(SCCPPass());
543
544 // Delete dead bit computations (instcombine runs after to fold away the dead
545 // computations, and then ADCE will run later to exploit any new DCE
546 // opportunities that creates).
547 FPM.addPass(BDCEPass());
548
549 // Run instcombine after redundancy and dead bit elimination to exploit
550 // opportunities opened up by them.
551 FPM.addPass(InstCombinePass());
552 invokePeepholeEPCallbacks(FPM, Level);
553
554 FPM.addPass(CoroElidePass());
555
557
558 // Finally, do an expensive DCE pass to catch all the dead code exposed by
559 // the simplifications and basic cleanup after all the simplifications.
560 // TODO: Investigate if this is too expensive.
561 FPM.addPass(ADCEPass());
562 FPM.addPass(
563 SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true)));
564 FPM.addPass(InstCombinePass());
565 invokePeepholeEPCallbacks(FPM, Level);
566
567 return FPM;
568}
569
573 assert(Level != OptimizationLevel::O0 && "Must request optimizations!");
574
575 // The O1 pipeline has a separate pipeline creation function to simplify
576 // construction readability.
577 if (Level.getSpeedupLevel() == 1)
578 return buildO1FunctionSimplificationPipeline(Level, Phase);
579
581
584
585 // Form SSA out of local memory accesses after breaking apart aggregates into
586 // scalars.
588
589 // Catch trivial redundancies
590 FPM.addPass(EarlyCSEPass(true /* Enable mem-ssa. */));
593
594 // Hoisting of scalars and load expressions.
595 if (EnableGVNHoist)
596 FPM.addPass(GVNHoistPass());
597
598 // Global value numbering based sinking.
599 if (EnableGVNSink) {
600 FPM.addPass(GVNSinkPass());
601 FPM.addPass(
602 SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true)));
603 }
604
605 // Speculative execution if the target has divergent branches; otherwise nop.
606 FPM.addPass(SpeculativeExecutionPass(/* OnlyIfDivergentTarget =*/true));
607
608 // Optimize based on known information about branches, and cleanup afterward.
611
612 // Jump table to switch conversion.
617
618 FPM.addPass(
619 SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true)));
622
623 if (!Level.isOptimizingForSize())
625
626 invokePeepholeEPCallbacks(FPM, Level);
627
628 // For PGO use pipeline, try to optimize memory intrinsics such as memcpy
629 // using the size value profile. Don't perform this when optimizing for size.
630 if (PGOOpt && PGOOpt->Action == PGOOptions::IRUse &&
631 !Level.isOptimizingForSize())
633
634 FPM.addPass(TailCallElimPass(/*UpdateFunctionEntryCount=*/
635 isInstrumentedPGOUse()));
636 FPM.addPass(
637 SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true)));
638
639 // Form canonically associated expression trees, and simplify the trees using
640 // basic mathematical properties. For example, this will form (nearly)
641 // minimal multiplication trees.
643
646
647 // Add the primary loop simplification pipeline.
648 // FIXME: Currently this is split into two loop pass pipelines because we run
649 // some function passes in between them. These can and should be removed
650 // and/or replaced by scheduling the loop pass equivalents in the correct
651 // positions. But those equivalent passes aren't powerful enough yet.
652 // Specifically, `SimplifyCFGPass` and `InstCombinePass` are currently still
653 // used. We have `LoopSimplifyCFGPass` which isn't yet powerful enough yet to
654 // fully replace `SimplifyCFGPass`, and the closest to the other we have is
655 // `LoopInstSimplify`.
656 LoopPassManager LPM1, LPM2;
657
658 // Simplify the loop body. We do this initially to clean up after other loop
659 // passes run, either when iterating on a loop or on inner loops with
660 // implications on the outer loop.
661 LPM1.addPass(LoopInstSimplifyPass());
662 LPM1.addPass(LoopSimplifyCFGPass());
663
664 // Try to remove as much code from the loop header as possible,
665 // to reduce amount of IR that will have to be duplicated. However,
666 // do not perform speculative hoisting the first time as LICM
667 // will destroy metadata that may not need to be destroyed if run
668 // after loop rotation.
669 // TODO: Investigate promotion cap for O1.
670 LPM1.addPass(LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap,
671 /*AllowSpeculation=*/false));
672
673 // Disable header duplication in loop rotation at -Oz.
675 Level != OptimizationLevel::Oz,
677 // TODO: Investigate promotion cap for O1.
678 LPM1.addPass(LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap,
679 /*AllowSpeculation=*/true));
680 LPM1.addPass(
681 SimpleLoopUnswitchPass(/* NonTrivial */ Level == OptimizationLevel::O3));
683 LPM1.addPass(LoopFlattenPass());
684
685 LPM2.addPass(LoopIdiomRecognizePass());
686 LPM2.addPass(IndVarSimplifyPass());
687
688 {
690 ExtraPasses.addPass(SimpleLoopUnswitchPass(/* NonTrivial */ Level ==
692 LPM2.addPass(std::move(ExtraPasses));
693 }
694
696
697 LPM2.addPass(LoopDeletionPass());
698
699 // Do not enable unrolling in PreLinkThinLTO phase during sample PGO
700 // because it changes IR to makes profile annotation in back compile
701 // inaccurate. The normal unroller doesn't pay attention to forced full unroll
702 // attributes so we need to make sure and allow the full unroll pass to pay
703 // attention to it.
704 if (Phase != ThinOrFullLTOPhase::ThinLTOPreLink || !PGOOpt ||
705 PGOOpt->Action != PGOOptions::SampleUse)
706 LPM2.addPass(LoopFullUnrollPass(Level.getSpeedupLevel(),
707 /* OnlyWhenForced= */ !PTO.LoopUnrolling,
708 PTO.ForgetAllSCEVInLoopUnroll));
709
711
712 FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM1),
713 /*UseMemorySSA=*/true,
714 /*UseBlockFrequencyInfo=*/true));
715 FPM.addPass(
716 SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true)));
718 // The loop passes in LPM2 (LoopIdiomRecognizePass, IndVarSimplifyPass,
719 // LoopDeletionPass and LoopFullUnrollPass) do not preserve MemorySSA.
720 // *All* loop passes must preserve it, in order to be able to use it.
721 FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM2),
722 /*UseMemorySSA=*/false,
723 /*UseBlockFrequencyInfo=*/false));
724
725 // Delete small array after loop unroll.
727
728 // Try vectorization/scalarization transforms that are both improvements
729 // themselves and can allow further folds with GVN and InstCombine.
730 FPM.addPass(VectorCombinePass(/*TryEarlyFoldsOnly=*/true));
731
732 // Eliminate redundancies.
734 if (RunNewGVN)
735 FPM.addPass(NewGVNPass());
736 else
737 FPM.addPass(GVNPass());
738
739 // Sparse conditional constant propagation.
740 // FIXME: It isn't clear why we do this *after* loop passes rather than
741 // before...
742 FPM.addPass(SCCPPass());
743
744 // Delete dead bit computations (instcombine runs after to fold away the dead
745 // computations, and then ADCE will run later to exploit any new DCE
746 // opportunities that creates).
747 FPM.addPass(BDCEPass());
748
749 // Run instcombine after redundancy and dead bit elimination to exploit
750 // opportunities opened up by them.
752 invokePeepholeEPCallbacks(FPM, Level);
753
754 // Re-consider control flow based optimizations after redundancy elimination,
755 // redo DCE, etc.
758
761
762 // Finally, do an expensive DCE pass to catch all the dead code exposed by
763 // the simplifications and basic cleanup after all the simplifications.
764 // TODO: Investigate if this is too expensive.
765 FPM.addPass(ADCEPass());
766
767 // Specially optimize memory movement as it doesn't look like dataflow in SSA.
768 FPM.addPass(MemCpyOptPass());
769
770 FPM.addPass(DSEPass());
772
774 LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap,
775 /*AllowSpeculation=*/true),
776 /*UseMemorySSA=*/true, /*UseBlockFrequencyInfo=*/false));
777
778 FPM.addPass(CoroElidePass());
779
781
783 .convertSwitchRangeToICmp(true)
784 .hoistCommonInsts(true)
785 .sinkCommonInsts(true)));
787 invokePeepholeEPCallbacks(FPM, Level);
788
789 return FPM;
790}
791
792void PassBuilder::addRequiredLTOPreLinkPasses(ModulePassManager &MPM) {
795}
796
797void PassBuilder::addPreInlinerPasses(ModulePassManager &MPM,
798 OptimizationLevel Level,
799 ThinOrFullLTOPhase LTOPhase) {
800 assert(Level != OptimizationLevel::O0 && "Not expecting O0 here!");
802 return;
803 InlineParams IP;
804
806
807 // FIXME: The hint threshold has the same value used by the regular inliner
808 // when not optimzing for size. This should probably be lowered after
809 // performance testing.
810 // FIXME: this comment is cargo culted from the old pass manager, revisit).
811 IP.HintThreshold = Level.isOptimizingForSize() ? PreInlineThreshold : 325;
813 IP, /* MandatoryFirst */ true,
815 CGSCCPassManager &CGPipeline = MIWP.getPM();
816
818 FPM.addPass(SROAPass(SROAOptions::ModifyCFG));
819 FPM.addPass(EarlyCSEPass()); // Catch trivial redundancies.
820 FPM.addPass(SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(
821 true))); // Merge & remove basic blocks.
822 FPM.addPass(InstCombinePass()); // Combine silly sequences.
823 invokePeepholeEPCallbacks(FPM, Level);
824
825 CGPipeline.addPass(createCGSCCToFunctionPassAdaptor(
826 std::move(FPM), PTO.EagerlyInvalidateAnalyses));
827
828 MPM.addPass(std::move(MIWP));
829
830 // Delete anything that is now dead to make sure that we don't instrument
831 // dead code. Instrumentation can end up keeping dead code around and
832 // dramatically increase code size.
833 MPM.addPass(GlobalDCEPass());
834}
835
836void PassBuilder::addPostPGOLoopRotation(ModulePassManager &MPM,
837 OptimizationLevel Level) {
839 // Disable header duplication in loop rotation at -Oz.
842 LoopRotatePass(EnableLoopHeaderDuplication ||
843 Level != OptimizationLevel::Oz),
844 /*UseMemorySSA=*/false,
845 /*UseBlockFrequencyInfo=*/false),
846 PTO.EagerlyInvalidateAnalyses));
847 }
848}
849
850void PassBuilder::addPGOInstrPasses(ModulePassManager &MPM,
851 OptimizationLevel Level, bool RunProfileGen,
852 bool IsCS, bool AtomicCounterUpdate,
853 std::string ProfileFile,
854 std::string ProfileRemappingFile) {
855 assert(Level != OptimizationLevel::O0 && "Not expecting O0 here!");
856
857 if (!RunProfileGen) {
858 assert(!ProfileFile.empty() && "Profile use expecting a profile file!");
859 MPM.addPass(
860 PGOInstrumentationUse(ProfileFile, ProfileRemappingFile, IsCS, FS));
861 // Cache ProfileSummaryAnalysis once to avoid the potential need to insert
862 // RequireAnalysisPass for PSI before subsequent non-module passes.
863 MPM.addPass(RequireAnalysisPass<ProfileSummaryAnalysis, Module>());
864 return;
865 }
866
867 // Perform PGO instrumentation.
868 MPM.addPass(PGOInstrumentationGen(IsCS ? PGOInstrumentationType::CSFDO
870
871 addPostPGOLoopRotation(MPM, Level);
872 // Add the profile lowering pass.
873 InstrProfOptions Options;
874 if (!ProfileFile.empty())
875 Options.InstrProfileOutput = ProfileFile;
876 // Do counter promotion at Level greater than O0.
877 Options.DoCounterPromotion = true;
878 Options.UseBFIInPromotion = IsCS;
879 if (EnableSampledInstr) {
880 Options.Sampling = true;
881 // With sampling, there is little beneifit to enable counter promotion.
882 // But note that sampling does work with counter promotion.
883 Options.DoCounterPromotion = false;
884 }
885 Options.Atomic = AtomicCounterUpdate;
886 MPM.addPass(InstrProfilingLoweringPass(Options, IsCS));
887}
888
890 bool RunProfileGen, bool IsCS,
891 bool AtomicCounterUpdate,
892 std::string ProfileFile,
893 std::string ProfileRemappingFile) {
894 if (!RunProfileGen) {
895 assert(!ProfileFile.empty() && "Profile use expecting a profile file!");
896 MPM.addPass(
897 PGOInstrumentationUse(ProfileFile, ProfileRemappingFile, IsCS, FS));
898 // Cache ProfileSummaryAnalysis once to avoid the potential need to insert
899 // RequireAnalysisPass for PSI before subsequent non-module passes.
901 return;
902 }
903
904 // Perform PGO instrumentation.
907 // Add the profile lowering pass.
909 if (!ProfileFile.empty())
910 Options.InstrProfileOutput = ProfileFile;
911 // Do not do counter promotion at O0.
912 Options.DoCounterPromotion = false;
913 Options.UseBFIInPromotion = IsCS;
914 Options.Atomic = AtomicCounterUpdate;
916}
917
919 return getInlineParams(Level.getSpeedupLevel(), Level.getSizeLevel());
920}
921
925 InlineParams IP;
926 if (PTO.InlinerThreshold == -1)
927 IP = getInlineParamsFromOptLevel(Level);
928 else
929 IP = getInlineParams(PTO.InlinerThreshold);
930 // For PreLinkThinLTO + SamplePGO or PreLinkFullLTO + SamplePGO,
931 // set hot-caller threshold to 0 to disable hot
932 // callsite inline (as much as possible [1]) because it makes
933 // profile annotation in the backend inaccurate.
934 //
935 // [1] Note the cost of a function could be below zero due to erased
936 // prologue / epilogue.
937 if (isLTOPreLink(Phase) && PGOOpt && PGOOpt->Action == PGOOptions::SampleUse)
939
940 if (PGOOpt)
942
946
947 // Require the GlobalsAA analysis for the module so we can query it within
948 // the CGSCC pipeline.
950 MIWP.addModulePass(RequireAnalysisPass<GlobalsAA, Module>());
951 // Invalidate AAManager so it can be recreated and pick up the newly
952 // available GlobalsAA.
953 MIWP.addModulePass(
955 }
956
957 // Require the ProfileSummaryAnalysis for the module so we can query it within
958 // the inliner pass.
960
961 // Now begin the main postorder CGSCC pipeline.
962 // FIXME: The current CGSCC pipeline has its origins in the legacy pass
963 // manager and trying to emulate its precise behavior. Much of this doesn't
964 // make a lot of sense and we should revisit the core CGSCC structure.
965 CGSCCPassManager &MainCGPipeline = MIWP.getPM();
966
967 // Note: historically, the PruneEH pass was run first to deduce nounwind and
968 // generally clean up exception handling overhead. It isn't clear this is
969 // valuable as the inliner doesn't currently care whether it is inlining an
970 // invoke or a call.
971
973 MainCGPipeline.addPass(AttributorCGSCCPass());
974
975 // Deduce function attributes. We do another run of this after the function
976 // simplification pipeline, so this only needs to run when it could affect the
977 // function simplification pipeline, which is only the case with recursive
978 // functions.
979 MainCGPipeline.addPass(PostOrderFunctionAttrsPass(/*SkipNonRecursive*/ true));
980
981 // When at O3 add argument promotion to the pass pipeline.
982 // FIXME: It isn't at all clear why this should be limited to O3.
983 if (Level == OptimizationLevel::O3)
984 MainCGPipeline.addPass(ArgumentPromotionPass());
985
986 // Try to perform OpenMP specific optimizations. This is a (quick!) no-op if
987 // there are no OpenMP runtime calls present in the module.
988 if (Level == OptimizationLevel::O2 || Level == OptimizationLevel::O3)
989 MainCGPipeline.addPass(OpenMPOptCGSCCPass(Phase));
990
991 invokeCGSCCOptimizerLateEPCallbacks(MainCGPipeline, Level);
992
993 // Add the core function simplification pipeline nested inside the
994 // CGSCC walk.
997 PTO.EagerlyInvalidateAnalyses, /*NoRerun=*/true));
998
999 // Finally, deduce any function attributes based on the fully simplified
1000 // function.
1001 MainCGPipeline.addPass(PostOrderFunctionAttrsPass());
1002
1003 // Mark that the function is fully simplified and that it shouldn't be
1004 // simplified again if we somehow revisit it due to CGSCC mutations unless
1005 // it's been modified since.
1008
1010 MainCGPipeline.addPass(CoroSplitPass(Level != OptimizationLevel::O0));
1011 MainCGPipeline.addPass(CoroAnnotationElidePass());
1012 }
1013
1014 // Make sure we don't affect potential future NoRerun CGSCC adaptors.
1015 MIWP.addLateModulePass(createModuleToFunctionPassAdaptor(
1017
1018 return MIWP;
1019}
1020
1025
1027 // For PreLinkThinLTO + SamplePGO or PreLinkFullLTO + SamplePGO,
1028 // set hot-caller threshold to 0 to disable hot
1029 // callsite inline (as much as possible [1]) because it makes
1030 // profile annotation in the backend inaccurate.
1031 //
1032 // [1] Note the cost of a function could be below zero due to erased
1033 // prologue / epilogue.
1034 if (isLTOPreLink(Phase) && PGOOpt && PGOOpt->Action == PGOOptions::SampleUse)
1035 IP.HotCallSiteThreshold = 0;
1036
1037 if (PGOOpt)
1039
1040 // The inline deferral logic is used to avoid losing some
1041 // inlining chance in future. It is helpful in SCC inliner, in which
1042 // inlining is processed in bottom-up order.
1043 // While in module inliner, the inlining order is a priority-based order
1044 // by default. The inline deferral is unnecessary there. So we disable the
1045 // inline deferral logic in module inliner.
1046 IP.EnableDeferral = false;
1047
1050 MPM.addPass(GlobalOptPass());
1051 MPM.addPass(GlobalDCEPass());
1052 MPM.addPass(PGOCtxProfFlatteningPass(/*IsPreThinlink=*/false));
1053 }
1054
1057 PTO.EagerlyInvalidateAnalyses));
1058
1062 MPM.addPass(
1064 }
1065
1066 return MPM;
1067}
1068
1072 assert(Level != OptimizationLevel::O0 &&
1073 "Should not be used for O0 pipeline");
1074
1076 "FullLTOPostLink shouldn't call buildModuleSimplificationPipeline!");
1077
1079
1080 // Place pseudo probe instrumentation as the first pass of the pipeline to
1081 // minimize the impact of optimization changes.
1082 if (PGOOpt && PGOOpt->PseudoProbeForProfiling &&
1085
1086 bool HasSampleProfile = PGOOpt && (PGOOpt->Action == PGOOptions::SampleUse);
1087
1088 // In ThinLTO mode, when flattened profile is used, all the available
1089 // profile information will be annotated in PreLink phase so there is
1090 // no need to load the profile again in PostLink.
1091 bool LoadSampleProfile =
1092 HasSampleProfile &&
1094
1095 // During the ThinLTO backend phase we perform early indirect call promotion
1096 // here, before globalopt. Otherwise imported available_externally functions
1097 // look unreferenced and are removed. If we are going to load the sample
1098 // profile then defer until later.
1099 // TODO: See if we can move later and consolidate with the location where
1100 // we perform ICP when we are loading a sample profile.
1101 // TODO: We pass HasSampleProfile (whether there was a sample profile file
1102 // passed to the compile) to the SamplePGO flag of ICP. This is used to
1103 // determine whether the new direct calls are annotated with prof metadata.
1104 // Ideally this should be determined from whether the IR is annotated with
1105 // sample profile, and not whether the a sample profile was provided on the
1106 // command line. E.g. for flattened profiles where we will not be reloading
1107 // the sample profile in the ThinLTO backend, we ideally shouldn't have to
1108 // provide the sample profile file.
1109 if (Phase == ThinOrFullLTOPhase::ThinLTOPostLink && !LoadSampleProfile)
1110 MPM.addPass(PGOIndirectCallPromotion(true /* InLTO */, HasSampleProfile));
1111
1112 // Create an early function pass manager to cleanup the output of the
1113 // frontend. Not necessary with LTO post link pipelines since the pre link
1114 // pipeline already cleaned up the frontend output.
1116 // Do basic inference of function attributes from known properties of system
1117 // libraries and other oracles.
1119 MPM.addPass(CoroEarlyPass());
1120
1121 FunctionPassManager EarlyFPM;
1122 EarlyFPM.addPass(EntryExitInstrumenterPass(/*PostInlining=*/false));
1123 // Lower llvm.expect to metadata before attempting transforms.
1124 // Compare/branch metadata may alter the behavior of passes like
1125 // SimplifyCFG.
1127 EarlyFPM.addPass(SimplifyCFGPass());
1129 EarlyFPM.addPass(EarlyCSEPass());
1130 if (Level == OptimizationLevel::O3)
1131 EarlyFPM.addPass(CallSiteSplittingPass());
1133 std::move(EarlyFPM), PTO.EagerlyInvalidateAnalyses));
1134 }
1135
1136 if (LoadSampleProfile) {
1137 // Annotate sample profile right after early FPM to ensure freshness of
1138 // the debug info.
1140 PGOOpt->ProfileFile, PGOOpt->ProfileRemappingFile, Phase, FS));
1141 // Cache ProfileSummaryAnalysis once to avoid the potential need to insert
1142 // RequireAnalysisPass for PSI before subsequent non-module passes.
1144 // Do not invoke ICP in the LTOPrelink phase as it makes it hard
1145 // for the profile annotation to be accurate in the LTO backend.
1146 if (!isLTOPreLink(Phase))
1147 // We perform early indirect call promotion here, before globalopt.
1148 // This is important for the ThinLTO backend phase because otherwise
1149 // imported available_externally functions look unreferenced and are
1150 // removed.
1151 MPM.addPass(
1152 PGOIndirectCallPromotion(true /* IsInLTO */, true /* SamplePGO */));
1153 }
1154
1155 // Try to perform OpenMP specific optimizations on the module. This is a
1156 // (quick!) no-op if there are no OpenMP runtime calls present in the module.
1158
1160 MPM.addPass(AttributorPass());
1161
1162 // Lower type metadata and the type.test intrinsic in the ThinLTO
1163 // post link pipeline after ICP. This is to enable usage of the type
1164 // tests in ICP sequences.
1166 MPM.addPass(LowerTypeTestsPass(nullptr, nullptr,
1168
1170
1171 // Interprocedural constant propagation now that basic cleanup has occurred
1172 // and prior to optimizing globals.
1173 // FIXME: This position in the pipeline hasn't been carefully considered in
1174 // years, it should be re-analyzed.
1175 MPM.addPass(IPSCCPPass(
1176 IPSCCPOptions(/*AllowFuncSpec=*/
1177 Level != OptimizationLevel::Os &&
1178 Level != OptimizationLevel::Oz &&
1179 !isLTOPreLink(Phase))));
1180
1181 // Attach metadata to indirect call sites indicating the set of functions
1182 // they may target at run-time. This should follow IPSCCP.
1184
1185 // Optimize globals to try and fold them into constants.
1186 MPM.addPass(GlobalOptPass());
1187
1188 // Create a small function pass pipeline to cleanup after all the global
1189 // optimizations.
1190 FunctionPassManager GlobalCleanupPM;
1191 // FIXME: Should this instead by a run of SROA?
1192 GlobalCleanupPM.addPass(PromotePass());
1193 GlobalCleanupPM.addPass(InstCombinePass());
1194 invokePeepholeEPCallbacks(GlobalCleanupPM, Level);
1195 GlobalCleanupPM.addPass(
1196 SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true)));
1197 MPM.addPass(createModuleToFunctionPassAdaptor(std::move(GlobalCleanupPM),
1198 PTO.EagerlyInvalidateAnalyses));
1199
1200 // We already asserted this happens in non-FullLTOPostLink earlier.
1201 const bool IsPreLink = Phase != ThinOrFullLTOPhase::ThinLTOPostLink;
1202 // Enable contextual profiling instrumentation.
1203 const bool IsCtxProfGen =
1205 const bool IsPGOPreLink = !IsCtxProfGen && PGOOpt && IsPreLink;
1206 const bool IsPGOInstrGen =
1207 IsPGOPreLink && PGOOpt->Action == PGOOptions::IRInstr;
1208 const bool IsPGOInstrUse =
1209 IsPGOPreLink && PGOOpt->Action == PGOOptions::IRUse;
1210 const bool IsMemprofUse = IsPGOPreLink && !PGOOpt->MemoryProfile.empty();
1211 // We don't want to mix pgo ctx gen and pgo gen; we also don't currently
1212 // enable ctx profiling from the frontend.
1214 "Enabling both instrumented PGO and contextual instrumentation is not "
1215 "supported.");
1216 const bool IsCtxProfUse =
1218
1219 assert(
1221 "--instrument-cold-function-only-path is provided but "
1222 "--pgo-instrument-cold-function-only is not enabled");
1223 const bool IsColdFuncOnlyInstrGen = PGOInstrumentColdFunctionOnly &&
1224 IsPGOPreLink &&
1226
1227 if (IsPGOInstrGen || IsPGOInstrUse || IsMemprofUse || IsCtxProfGen ||
1228 IsCtxProfUse || IsColdFuncOnlyInstrGen)
1229 addPreInlinerPasses(MPM, Level, Phase);
1230
1231 // Add all the requested passes for instrumentation PGO, if requested.
1232 if (IsPGOInstrGen || IsPGOInstrUse) {
1233 addPGOInstrPasses(MPM, Level,
1234 /*RunProfileGen=*/IsPGOInstrGen,
1235 /*IsCS=*/false, PGOOpt->AtomicCounterUpdate,
1236 PGOOpt->ProfileFile, PGOOpt->ProfileRemappingFile);
1237 } else if (IsCtxProfGen || IsCtxProfUse) {
1239 // In pre-link, we just want the instrumented IR. We use the contextual
1240 // profile in the post-thinlink phase.
1241 // The instrumentation will be removed in post-thinlink after IPO.
1242 // FIXME(mtrofin): move AssignGUIDPass if there is agreement to use this
1243 // mechanism for GUIDs.
1244 MPM.addPass(AssignGUIDPass());
1245 if (IsCtxProfUse) {
1246 MPM.addPass(PGOCtxProfFlatteningPass(/*IsPreThinlink=*/true));
1247 return MPM;
1248 }
1249 // Block further inlining in the instrumented ctxprof case. This avoids
1250 // confusingly collecting profiles for the same GUID corresponding to
1251 // different variants of the function. We could do like PGO and identify
1252 // functions by a (GUID, Hash) tuple, but since the ctxprof "use" waits for
1253 // thinlto to happen before performing any further optimizations, it's
1254 // unnecessary to collect profiles for non-prevailing copies.
1256 addPostPGOLoopRotation(MPM, Level);
1258 } else if (IsColdFuncOnlyInstrGen) {
1259 addPGOInstrPasses(MPM, Level, /* RunProfileGen */ true, /* IsCS */ false,
1260 /* AtomicCounterUpdate */ false,
1262 /* ProfileRemappingFile */ "");
1263 }
1264
1265 if (IsPGOInstrGen || IsPGOInstrUse || IsCtxProfGen)
1266 MPM.addPass(PGOIndirectCallPromotion(false, false));
1267
1268 if (IsPGOPreLink && PGOOpt->CSAction == PGOOptions::CSIRInstr)
1269 MPM.addPass(PGOInstrumentationGenCreateVar(PGOOpt->CSProfileGenFile,
1271
1272 if (IsMemprofUse)
1273 MPM.addPass(MemProfUsePass(PGOOpt->MemoryProfile, FS));
1274
1275 if (PGOOpt && (PGOOpt->Action == PGOOptions::IRUse ||
1276 PGOOpt->Action == PGOOptions::SampleUse))
1277 MPM.addPass(PGOForceFunctionAttrsPass(PGOOpt->ColdOptType));
1278
1279 MPM.addPass(AlwaysInlinerPass(/*InsertLifetimeIntrinsics=*/true));
1280
1283 else
1284 MPM.addPass(buildInlinerPipeline(Level, Phase));
1285
1286 // Remove any dead arguments exposed by cleanups, constant folding globals,
1287 // and argument promotion.
1289
1292
1294 MPM.addPass(CoroCleanupPass());
1295
1296 // Optimize globals now that functions are fully simplified.
1297 MPM.addPass(GlobalOptPass());
1298 MPM.addPass(GlobalDCEPass());
1299
1300 return MPM;
1301}
1302
1303/// TODO: Should LTO cause any differences to this set of passes?
1304void PassBuilder::addVectorPasses(OptimizationLevel Level,
1305 FunctionPassManager &FPM, bool IsFullLTO) {
1308
1310 if (IsFullLTO) {
1311 // The vectorizer may have significantly shortened a loop body; unroll
1312 // again. Unroll small loops to hide loop backedge latency and saturate any
1313 // parallel execution resources of an out-of-order processor. We also then
1314 // need to clean up redundancies and loop invariant code.
1315 // FIXME: It would be really good to use a loop-integrated instruction
1316 // combiner for cleanup here so that the unrolling and LICM can be pipelined
1317 // across the loop nests.
1318 // We do UnrollAndJam in a separate LPM to ensure it happens before unroll
1321 LoopUnrollAndJamPass(Level.getSpeedupLevel())));
1323 Level.getSpeedupLevel(), /*OnlyWhenForced=*/!PTO.LoopUnrolling,
1326 // Now that we are done with loop unrolling, be it either by LoopVectorizer,
1327 // or LoopUnroll passes, some variable-offset GEP's into alloca's could have
1328 // become constant-offset, thus enabling SROA and alloca promotion. Do so.
1329 // NOTE: we are very late in the pipeline, and we don't have any LICM
1330 // or SimplifyCFG passes scheduled after us, that would cleanup
1331 // the CFG mess this may created if allowed to modify CFG, so forbid that.
1333 }
1334
1335 if (!IsFullLTO) {
1336 // Eliminate loads by forwarding stores from the previous iteration to loads
1337 // of the current iteration.
1339 }
1340 // Cleanup after the loop optimization passes.
1341 FPM.addPass(InstCombinePass());
1342
1343 if (Level.getSpeedupLevel() > 1 && ExtraVectorizerPasses) {
1344 ExtraFunctionPassManager<ShouldRunExtraVectorPasses> ExtraPasses;
1345 // At higher optimization levels, try to clean up any runtime overlap and
1346 // alignment checks inserted by the vectorizer. We want to track correlated
1347 // runtime checks for two inner loops in the same outer loop, fold any
1348 // common computations, hoist loop-invariant aspects out of any outer loop,
1349 // and unswitch the runtime checks if possible. Once hoisted, we may have
1350 // dead (or speculatable) control flows or more combining opportunities.
1351 ExtraPasses.addPass(EarlyCSEPass());
1352 ExtraPasses.addPass(CorrelatedValuePropagationPass());
1353 ExtraPasses.addPass(InstCombinePass());
1354 LoopPassManager LPM;
1355 LPM.addPass(LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap,
1356 /*AllowSpeculation=*/true));
1357 LPM.addPass(SimpleLoopUnswitchPass(/* NonTrivial */ Level ==
1359 ExtraPasses.addPass(
1360 createFunctionToLoopPassAdaptor(std::move(LPM), /*UseMemorySSA=*/true,
1361 /*UseBlockFrequencyInfo=*/true));
1362 ExtraPasses.addPass(
1363 SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true)));
1364 ExtraPasses.addPass(InstCombinePass());
1365 FPM.addPass(std::move(ExtraPasses));
1366 }
1367
1368 // Now that we've formed fast to execute loop structures, we do further
1369 // optimizations. These are run afterward as they might block doing complex
1370 // analyses and transforms such as what are needed for loop vectorization.
1371
1372 // Cleanup after loop vectorization, etc. Simplification passes like CVP and
1373 // GVN, loop transforms, and others have already run, so it's now better to
1374 // convert to more optimized IR using more aggressive simplify CFG options.
1375 // The extra sinking transform can create larger basic blocks, so do this
1376 // before SLP vectorization.
1377 FPM.addPass(SimplifyCFGPass(SimplifyCFGOptions()
1378 .forwardSwitchCondToPhi(true)
1379 .convertSwitchRangeToICmp(true)
1380 .convertSwitchToLookupTable(true)
1381 .needCanonicalLoops(false)
1382 .hoistCommonInsts(true)
1383 .sinkCommonInsts(true)));
1384
1385 if (IsFullLTO) {
1386 FPM.addPass(SCCPPass());
1387 FPM.addPass(InstCombinePass());
1388 FPM.addPass(BDCEPass());
1389 }
1390
1391 // Optimize parallel scalar instruction chains into SIMD instructions.
1392 if (PTO.SLPVectorization) {
1393 FPM.addPass(SLPVectorizerPass());
1394 if (Level.getSpeedupLevel() > 1 && ExtraVectorizerPasses) {
1395 FPM.addPass(EarlyCSEPass());
1396 }
1397 }
1398 // Enhance/cleanup vector code.
1399 FPM.addPass(VectorCombinePass());
1400
1401 if (!IsFullLTO) {
1402 FPM.addPass(InstCombinePass());
1403 // Unroll small loops to hide loop backedge latency and saturate any
1404 // parallel execution resources of an out-of-order processor. We also then
1405 // need to clean up redundancies and loop invariant code.
1406 // FIXME: It would be really good to use a loop-integrated instruction
1407 // combiner for cleanup here so that the unrolling and LICM can be pipelined
1408 // across the loop nests.
1409 // We do UnrollAndJam in a separate LPM to ensure it happens before unroll
1410 if (EnableUnrollAndJam && PTO.LoopUnrolling) {
1412 LoopUnrollAndJamPass(Level.getSpeedupLevel())));
1413 }
1414 FPM.addPass(LoopUnrollPass(LoopUnrollOptions(
1415 Level.getSpeedupLevel(), /*OnlyWhenForced=*/!PTO.LoopUnrolling,
1416 PTO.ForgetAllSCEVInLoopUnroll)));
1417 FPM.addPass(WarnMissedTransformationsPass());
1418 // Now that we are done with loop unrolling, be it either by LoopVectorizer,
1419 // or LoopUnroll passes, some variable-offset GEP's into alloca's could have
1420 // become constant-offset, thus enabling SROA and alloca promotion. Do so.
1421 // NOTE: we are very late in the pipeline, and we don't have any LICM
1422 // or SimplifyCFG passes scheduled after us, that would cleanup
1423 // the CFG mess this may created if allowed to modify CFG, so forbid that.
1424 FPM.addPass(SROAPass(SROAOptions::PreserveCFG));
1425 }
1426
1427 FPM.addPass(InferAlignmentPass());
1428 FPM.addPass(InstCombinePass());
1429
1430 // This is needed for two reasons:
1431 // 1. It works around problems that instcombine introduces, such as sinking
1432 // expensive FP divides into loops containing multiplications using the
1433 // divide result.
1434 // 2. It helps to clean up some loop-invariant code created by the loop
1435 // unroll pass when IsFullLTO=false.
1437 LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap,
1438 /*AllowSpeculation=*/true),
1439 /*UseMemorySSA=*/true, /*UseBlockFrequencyInfo=*/false));
1440
1441 // Now that we've vectorized and unrolled loops, we may have more refined
1442 // alignment information, try to re-derive it here.
1443 FPM.addPass(AlignmentFromAssumptionsPass());
1444}
1445
1448 ThinOrFullLTOPhase LTOPhase) {
1449 const bool LTOPreLink = isLTOPreLink(LTOPhase);
1451
1452 // Run partial inlining pass to partially inline functions that have
1453 // large bodies.
1456
1457 // Remove avail extern fns and globals definitions since we aren't compiling
1458 // an object file for later LTO. For LTO we want to preserve these so they
1459 // are eligible for inlining at link-time. Note if they are unreferenced they
1460 // will be removed by GlobalDCE later, so this only impacts referenced
1461 // available externally globals. Eventually they will be suppressed during
1462 // codegen, but eliminating here enables more opportunity for GlobalDCE as it
1463 // may make globals referenced by available external functions dead and saves
1464 // running remaining passes on the eliminated functions. These should be
1465 // preserved during prelinking for link-time inlining decisions.
1466 if (!LTOPreLink)
1468
1469 // Do RPO function attribute inference across the module to forward-propagate
1470 // attributes where applicable.
1471 // FIXME: Is this really an optimization rather than a canonicalization?
1473
1474 // Do a post inline PGO instrumentation and use pass. This is a context
1475 // sensitive PGO pass. We don't want to do this in LTOPreLink phrase as
1476 // cross-module inline has not been done yet. The context sensitive
1477 // instrumentation is after all the inlines are done.
1478 if (!LTOPreLink && PGOOpt) {
1479 if (PGOOpt->CSAction == PGOOptions::CSIRInstr)
1480 addPGOInstrPasses(MPM, Level, /*RunProfileGen=*/true,
1481 /*IsCS=*/true, PGOOpt->AtomicCounterUpdate,
1482 PGOOpt->CSProfileGenFile, PGOOpt->ProfileRemappingFile);
1483 else if (PGOOpt->CSAction == PGOOptions::CSIRUse)
1484 addPGOInstrPasses(MPM, Level, /*RunProfileGen=*/false,
1485 /*IsCS=*/true, PGOOpt->AtomicCounterUpdate,
1486 PGOOpt->ProfileFile, PGOOpt->ProfileRemappingFile);
1487 }
1488
1489 // Re-compute GlobalsAA here prior to function passes. This is particularly
1490 // useful as the above will have inlined, DCE'ed, and function-attr
1491 // propagated everything. We should at this point have a reasonably minimal
1492 // and richly annotated call graph. By computing aliasing and mod/ref
1493 // information for all local globals here, the late loop passes and notably
1494 // the vectorizer will be able to use them to help recognize vectorizable
1495 // memory operations.
1498
1499 invokeOptimizerEarlyEPCallbacks(MPM, Level, LTOPhase);
1500
1501 FunctionPassManager OptimizePM;
1502
1503 // Only drop unnecessary assumes post-inline and post-link, as otherwise
1504 // additional uses of the affected value may be introduced through inlining
1505 // and CSE.
1506 if (!isLTOPreLink(LTOPhase))
1507 OptimizePM.addPass(DropUnnecessaryAssumesPass());
1508
1509 // Scheduling LoopVersioningLICM when inlining is over, because after that
1510 // we may see more accurate aliasing. Reason to run this late is that too
1511 // early versioning may prevent further inlining due to increase of code
1512 // size. Other optimizations which runs later might get benefit of no-alias
1513 // assumption in clone loop.
1515 OptimizePM.addPass(
1517 // LoopVersioningLICM pass might increase new LICM opportunities.
1519 LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap,
1520 /*AllowSpeculation=*/true),
1521 /*USeMemorySSA=*/true, /*UseBlockFrequencyInfo=*/false));
1522 }
1523
1524 OptimizePM.addPass(Float2IntPass());
1526
1527 if (EnableMatrix) {
1528 OptimizePM.addPass(LowerMatrixIntrinsicsPass());
1529 OptimizePM.addPass(EarlyCSEPass());
1530 }
1531
1532 // CHR pass should only be applied with the profile information.
1533 // The check is to check the profile summary information in CHR.
1534 if (EnableCHR && Level == OptimizationLevel::O3)
1535 OptimizePM.addPass(ControlHeightReductionPass());
1536
1537 // FIXME: We need to run some loop optimizations to re-rotate loops after
1538 // simplifycfg and others undo their rotation.
1539
1540 // Optimize the loop execution. These passes operate on entire loop nests
1541 // rather than on each loop in an inside-out manner, and so they are actually
1542 // function passes.
1543
1544 invokeVectorizerStartEPCallbacks(OptimizePM, Level);
1545
1546 LoopPassManager LPM;
1547 // First rotate loops that may have been un-rotated by prior passes.
1548 // Disable header duplication at -Oz.
1550 Level != OptimizationLevel::Oz,
1551 LTOPreLink));
1552 // Some loops may have become dead by now. Try to delete them.
1553 // FIXME: see discussion in https://reviews.llvm.org/D112851,
1554 // this may need to be revisited once we run GVN before loop deletion
1555 // in the simplification pipeline.
1556 LPM.addPass(LoopDeletionPass());
1557
1558 if (PTO.LoopInterchange)
1559 LPM.addPass(LoopInterchangePass());
1560
1562 std::move(LPM), /*UseMemorySSA=*/false, /*UseBlockFrequencyInfo=*/false));
1563
1564 // FIXME: This may not be the right place in the pipeline.
1565 // We need to have the data to support the right place.
1566 if (PTO.LoopFusion)
1567 OptimizePM.addPass(LoopFusePass());
1568
1569 // Distribute loops to allow partial vectorization. I.e. isolate dependences
1570 // into separate loop that would otherwise inhibit vectorization. This is
1571 // currently only performed for loops marked with the metadata
1572 // llvm.loop.distribute=true or when -enable-loop-distribute is specified.
1573 OptimizePM.addPass(LoopDistributePass());
1574
1575 // Populates the VFABI attribute with the scalar-to-vector mappings
1576 // from the TargetLibraryInfo.
1577 OptimizePM.addPass(InjectTLIMappings());
1578
1579 addVectorPasses(Level, OptimizePM, /* IsFullLTO */ false);
1580
1581 invokeVectorizerEndEPCallbacks(OptimizePM, Level);
1582
1583 // LoopSink pass sinks instructions hoisted by LICM, which serves as a
1584 // canonicalization pass that enables other optimizations. As a result,
1585 // LoopSink pass needs to be a very late IR pass to avoid undoing LICM
1586 // result too early.
1587 OptimizePM.addPass(LoopSinkPass());
1588
1589 // And finally clean up LCSSA form before generating code.
1590 OptimizePM.addPass(InstSimplifyPass());
1591
1592 // This hoists/decomposes div/rem ops. It should run after other sink/hoist
1593 // passes to avoid re-sinking, but before SimplifyCFG because it can allow
1594 // flattening of blocks.
1595 OptimizePM.addPass(DivRemPairsPass());
1596
1597 // Try to annotate calls that were created during optimization.
1598 OptimizePM.addPass(
1599 TailCallElimPass(/*UpdateFunctionEntryCount=*/isInstrumentedPGOUse()));
1600
1601 // LoopSink (and other loop passes since the last simplifyCFG) might have
1602 // resulted in single-entry-single-exit or empty blocks. Clean up the CFG.
1603 OptimizePM.addPass(
1605 .convertSwitchRangeToICmp(true)
1606 .speculateUnpredictables(true)
1607 .hoistLoadsStoresWithCondFaulting(true)));
1608
1609 // Add the core optimizing pipeline.
1610 MPM.addPass(createModuleToFunctionPassAdaptor(std::move(OptimizePM),
1611 PTO.EagerlyInvalidateAnalyses));
1612
1613 invokeOptimizerLastEPCallbacks(MPM, Level, LTOPhase);
1614
1615 // Split out cold code. Splitting is done late to avoid hiding context from
1616 // other optimizations and inadvertently regressing performance. The tradeoff
1617 // is that this has a higher code size cost than splitting early.
1618 if (EnableHotColdSplit && !LTOPreLink)
1620
1621 // Search the code for similar regions of code. If enough similar regions can
1622 // be found where extracting the regions into their own function will decrease
1623 // the size of the program, we extract the regions, a deduplicate the
1624 // structurally similar regions.
1625 if (EnableIROutliner)
1626 MPM.addPass(IROutlinerPass());
1627
1628 // Now we need to do some global optimization transforms.
1629 // FIXME: It would seem like these should come first in the optimization
1630 // pipeline and maybe be the bottom of the canonicalization pipeline? Weird
1631 // ordering here.
1632 MPM.addPass(GlobalDCEPass());
1634
1635 // Merge functions if requested. It has a better chance to merge functions
1636 // after ConstantMerge folded jump tables.
1637 if (PTO.MergeFunctions)
1639
1640 if (PTO.CallGraphProfile && !LTOPreLink)
1641 MPM.addPass(CGProfilePass(isLTOPostLink(LTOPhase)));
1642
1643 // RelLookupTableConverterPass runs later in LTO post-link pipeline.
1644 if (!LTOPreLink)
1646
1647 return MPM;
1648}
1649
1653 if (Level == OptimizationLevel::O0)
1654 return buildO0DefaultPipeline(Level, Phase);
1655
1657
1658 // Convert @llvm.global.annotations to !annotation metadata.
1660
1661 // Force any function attributes we want the rest of the pipeline to observe.
1663
1664 if (PGOOpt && PGOOpt->DebugInfoForProfiling)
1666
1667 // Apply module pipeline start EP callback.
1669
1670 // Add the core simplification pipeline.
1672
1673 // Now add the optimization pipeline.
1675
1676 if (PGOOpt && PGOOpt->PseudoProbeForProfiling &&
1677 PGOOpt->Action == PGOOptions::SampleUse)
1679
1680 // Emit annotation remarks.
1682
1683 if (isLTOPreLink(Phase))
1684 addRequiredLTOPreLinkPasses(MPM);
1685 return MPM;
1686}
1687
1690 bool EmitSummary) {
1692 if (ThinLTO)
1694 else
1696 MPM.addPass(EmbedBitcodePass(ThinLTO, EmitSummary));
1697
1698 // Perform any cleanups to the IR that aren't suitable for per TU compilation,
1699 // like removing CFI/WPD related instructions. Note, we reuse
1700 // LowerTypeTestsPass to clean up type tests rather than duplicate that logic
1701 // in FatLtoCleanup.
1702 MPM.addPass(FatLtoCleanup());
1703
1704 // If we're doing FatLTO w/ CFI enabled, we don't want the type tests in the
1705 // object code, only in the bitcode section, so drop it before we run
1706 // module optimization and generate machine code. If llvm.type.test() isn't in
1707 // the IR, this won't do anything.
1708 MPM.addPass(
1710
1711 // Use the ThinLTO post-link pipeline with sample profiling
1712 if (ThinLTO && PGOOpt && PGOOpt->Action == PGOOptions::SampleUse)
1713 MPM.addPass(buildThinLTODefaultPipeline(Level, /*ImportSummary=*/nullptr));
1714 else {
1715 // ModuleSimplification does not run the coroutine passes for
1716 // ThinLTOPreLink, so we need the coroutine passes to run for ThinLTO
1717 // builds, otherwise they will miscompile.
1718 if (ThinLTO) {
1719 // TODO: replace w/ buildCoroWrapper() when it takes phase and level into
1720 // consideration.
1721 CGSCCPassManager CGPM;
1725 MPM.addPass(CoroCleanupPass());
1726 }
1727
1728 // otherwise, just use module optimization
1729 MPM.addPass(
1731 // Emit annotation remarks.
1733 }
1734 return MPM;
1735}
1736
1739 if (Level == OptimizationLevel::O0)
1741
1743
1744 // Convert @llvm.global.annotations to !annotation metadata.
1746
1747 // Force any function attributes we want the rest of the pipeline to observe.
1749
1750 if (PGOOpt && PGOOpt->DebugInfoForProfiling)
1752
1753 // Apply module pipeline start EP callback.
1755
1756 // If we are planning to perform ThinLTO later, we don't bloat the code with
1757 // unrolling/vectorization/... now. Just simplify the module as much as we
1758 // can.
1761 // In pre-link, for ctx prof use, we stop here with an instrumented IR. We let
1762 // thinlto use the contextual info to perform imports; then use the contextual
1763 // profile in the post-thinlink phase.
1764 if (!UseCtxProfile.empty()) {
1765 addRequiredLTOPreLinkPasses(MPM);
1766 return MPM;
1767 }
1768
1769 // Run partial inlining pass to partially inline functions that have
1770 // large bodies.
1771 // FIXME: It isn't clear whether this is really the right place to run this
1772 // in ThinLTO. Because there is another canonicalization and simplification
1773 // phase that will run after the thin link, running this here ends up with
1774 // less information than will be available later and it may grow functions in
1775 // ways that aren't beneficial.
1778
1779 if (PGOOpt && PGOOpt->PseudoProbeForProfiling &&
1780 PGOOpt->Action == PGOOptions::SampleUse)
1782
1783 // Handle Optimizer{Early,Last}EPCallbacks added by clang on PreLink. Actual
1784 // optimization is going to be done in PostLink stage, but clang can't add
1785 // callbacks there in case of in-process ThinLTO called by linker.
1790
1791 // Emit annotation remarks.
1793
1794 addRequiredLTOPreLinkPasses(MPM);
1795
1796 return MPM;
1797}
1798
1800 OptimizationLevel Level, const ModuleSummaryIndex *ImportSummary) {
1802
1803 if (ImportSummary) {
1804 // For ThinLTO we must apply the context disambiguation decisions early, to
1805 // ensure we can correctly match the callsites to summary data.
1808 ImportSummary, PGOOpt && PGOOpt->Action == PGOOptions::SampleUse));
1809
1810 // These passes import type identifier resolutions for whole-program
1811 // devirtualization and CFI. They must run early because other passes may
1812 // disturb the specific instruction patterns that these passes look for,
1813 // creating dependencies on resolutions that may not appear in the summary.
1814 //
1815 // For example, GVN may transform the pattern assume(type.test) appearing in
1816 // two basic blocks into assume(phi(type.test, type.test)), which would
1817 // transform a dependency on a WPD resolution into a dependency on a type
1818 // identifier resolution for CFI.
1819 //
1820 // Also, WPD has access to more precise information than ICP and can
1821 // devirtualize more effectively, so it should operate on the IR first.
1822 //
1823 // The WPD and LowerTypeTest passes need to run at -O0 to lower type
1824 // metadata and intrinsics.
1825 MPM.addPass(WholeProgramDevirtPass(nullptr, ImportSummary));
1826 MPM.addPass(LowerTypeTestsPass(nullptr, ImportSummary));
1827 }
1828
1829 if (Level == OptimizationLevel::O0) {
1830 // Run a second time to clean up any type tests left behind by WPD for use
1831 // in ICP.
1832 MPM.addPass(LowerTypeTestsPass(nullptr, nullptr,
1835 // Drop available_externally and unreferenced globals. This is necessary
1836 // with ThinLTO in order to avoid leaving undefined references to dead
1837 // globals in the object file.
1839 MPM.addPass(GlobalDCEPass());
1840 return MPM;
1841 }
1842 if (!UseCtxProfile.empty()) {
1843 MPM.addPass(
1845 } else {
1846 // Add the core simplification pipeline.
1849 }
1850 // Now add the optimization pipeline.
1853
1854 // Emit annotation remarks.
1856
1857 return MPM;
1858}
1859
1862 // FIXME: We should use a customized pre-link pipeline!
1863 return buildPerModuleDefaultPipeline(Level,
1865}
1866
1869 ModuleSummaryIndex *ExportSummary) {
1871
1873
1874 // Create a function that performs CFI checks for cross-DSO calls with targets
1875 // in the current module.
1876 MPM.addPass(CrossDSOCFIPass());
1877
1878 if (Level == OptimizationLevel::O0) {
1879 // The WPD and LowerTypeTest passes need to run at -O0 to lower type
1880 // metadata and intrinsics.
1881 MPM.addPass(WholeProgramDevirtPass(ExportSummary, nullptr));
1882 MPM.addPass(LowerTypeTestsPass(ExportSummary, nullptr));
1883 // Run a second time to clean up any type tests left behind by WPD for use
1884 // in ICP.
1885 MPM.addPass(LowerTypeTestsPass(nullptr, nullptr,
1887
1889
1891
1892 // Emit annotation remarks.
1894
1895 return MPM;
1896 }
1897
1898 if (PGOOpt && PGOOpt->Action == PGOOptions::SampleUse) {
1899 // Load sample profile before running the LTO optimization pipeline.
1900 MPM.addPass(SampleProfileLoaderPass(PGOOpt->ProfileFile,
1901 PGOOpt->ProfileRemappingFile,
1903 // Cache ProfileSummaryAnalysis once to avoid the potential need to insert
1904 // RequireAnalysisPass for PSI before subsequent non-module passes.
1906 }
1907
1908 // Try to run OpenMP optimizations, quick no-op if no OpenMP metadata present.
1910
1911 // Remove unused virtual tables to improve the quality of code generated by
1912 // whole-program devirtualization and bitset lowering.
1913 MPM.addPass(GlobalDCEPass(/*InLTOPostLink=*/true));
1914
1915 // Do basic inference of function attributes from known properties of system
1916 // libraries and other oracles.
1918
1919 if (Level.getSpeedupLevel() > 1) {
1921 CallSiteSplittingPass(), PTO.EagerlyInvalidateAnalyses));
1922
1923 // Indirect call promotion. This should promote all the targets that are
1924 // left by the earlier promotion pass that promotes intra-module targets.
1925 // This two-step promotion is to save the compile time. For LTO, it should
1926 // produce the same result as if we only do promotion here.
1928 true /* InLTO */, PGOOpt && PGOOpt->Action == PGOOptions::SampleUse));
1929
1930 // Promoting by-reference arguments to by-value exposes more constants to
1931 // IPSCCP.
1932 CGSCCPassManager CGPM;
1935 CGPM.addPass(
1938
1939 // Propagate constants at call sites into the functions they call. This
1940 // opens opportunities for globalopt (and inlining) by substituting function
1941 // pointers passed as arguments to direct uses of functions.
1942 MPM.addPass(IPSCCPPass(IPSCCPOptions(/*AllowFuncSpec=*/
1943 Level != OptimizationLevel::Os &&
1944 Level != OptimizationLevel::Oz)));
1945
1946 // Attach metadata to indirect call sites indicating the set of functions
1947 // they may target at run-time. This should follow IPSCCP.
1949 }
1950
1951 // Do RPO function attribute inference across the module to forward-propagate
1952 // attributes where applicable.
1953 // FIXME: Is this really an optimization rather than a canonicalization?
1955
1956 // Use in-range annotations on GEP indices to split globals where beneficial.
1957 MPM.addPass(GlobalSplitPass());
1958
1959 // Run whole program optimization of virtual call when the list of callees
1960 // is fixed.
1961 MPM.addPass(WholeProgramDevirtPass(ExportSummary, nullptr));
1962
1963 // Stop here at -O1.
1964 if (Level == OptimizationLevel::O1) {
1965 // The LowerTypeTestsPass needs to run to lower type metadata and the
1966 // type.test intrinsics. The pass does nothing if CFI is disabled.
1967 MPM.addPass(LowerTypeTestsPass(ExportSummary, nullptr));
1968 // Run a second time to clean up any type tests left behind by WPD for use
1969 // in ICP (which is performed earlier than this in the regular LTO
1970 // pipeline).
1971 MPM.addPass(LowerTypeTestsPass(nullptr, nullptr,
1973
1975
1977
1978 // Emit annotation remarks.
1980
1981 return MPM;
1982 }
1983
1984 // TODO: Skip to match buildCoroWrapper.
1985 MPM.addPass(CoroEarlyPass());
1986
1987 // Optimize globals to try and fold them into constants.
1988 MPM.addPass(GlobalOptPass());
1989
1990 // Promote any localized globals to SSA registers.
1992
1993 // Linking modules together can lead to duplicate global constant, only
1994 // keep one copy of each constant.
1996
1997 // Remove unused arguments from functions.
1999
2000 // Reduce the code after globalopt and ipsccp. Both can open up significant
2001 // simplification opportunities, and both can propagate functions through
2002 // function pointers. When this happens, we often have to resolve varargs
2003 // calls, etc, so let instcombine do this.
2004 FunctionPassManager PeepholeFPM;
2005 PeepholeFPM.addPass(InstCombinePass());
2006 if (Level.getSpeedupLevel() > 1)
2007 PeepholeFPM.addPass(AggressiveInstCombinePass());
2008 invokePeepholeEPCallbacks(PeepholeFPM, Level);
2009
2010 MPM.addPass(createModuleToFunctionPassAdaptor(std::move(PeepholeFPM),
2011 PTO.EagerlyInvalidateAnalyses));
2012
2013 // Lower variadic functions for supported targets prior to inlining.
2015
2016 // Note: historically, the PruneEH pass was run first to deduce nounwind and
2017 // generally clean up exception handling overhead. It isn't clear this is
2018 // valuable as the inliner doesn't currently care whether it is inlining an
2019 // invoke or a call.
2020 // Run the inliner now.
2021 if (EnableModuleInliner) {
2025 } else {
2028 /* MandatoryFirst */ true,
2031 }
2032
2033 // Perform context disambiguation after inlining, since that would reduce the
2034 // amount of additional cloning required to distinguish the allocation
2035 // contexts.
2038 /*Summary=*/nullptr,
2039 PGOOpt && PGOOpt->Action == PGOOptions::SampleUse));
2040
2041 // Optimize globals again after we ran the inliner.
2042 MPM.addPass(GlobalOptPass());
2043
2044 // Run the OpenMPOpt pass again after global optimizations.
2046
2047 // Garbage collect dead functions.
2048 MPM.addPass(GlobalDCEPass(/*InLTOPostLink=*/true));
2049
2050 // If we didn't decide to inline a function, check to see if we can
2051 // transform it to pass arguments by value instead of by reference.
2052 CGSCCPassManager CGPM;
2057
2059 // The IPO Passes may leave cruft around. Clean up after them.
2060 FPM.addPass(InstCombinePass());
2061 invokePeepholeEPCallbacks(FPM, Level);
2062
2065
2067
2068 // Do a post inline PGO instrumentation and use pass. This is a context
2069 // sensitive PGO pass.
2070 if (PGOOpt) {
2071 if (PGOOpt->CSAction == PGOOptions::CSIRInstr)
2072 addPGOInstrPasses(MPM, Level, /*RunProfileGen=*/true,
2073 /*IsCS=*/true, PGOOpt->AtomicCounterUpdate,
2074 PGOOpt->CSProfileGenFile, PGOOpt->ProfileRemappingFile);
2075 else if (PGOOpt->CSAction == PGOOptions::CSIRUse)
2076 addPGOInstrPasses(MPM, Level, /*RunProfileGen=*/false,
2077 /*IsCS=*/true, PGOOpt->AtomicCounterUpdate,
2078 PGOOpt->ProfileFile, PGOOpt->ProfileRemappingFile);
2079 }
2080
2081 // Break up allocas
2083
2084 // LTO provides additional opportunities for tailcall elimination due to
2085 // link-time inlining, and visibility of nocapture attribute.
2086 FPM.addPass(
2087 TailCallElimPass(/*UpdateFunctionEntryCount=*/isInstrumentedPGOUse()));
2088
2089 // Run a few AA driver optimizations here and now to cleanup the code.
2090 MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM),
2091 PTO.EagerlyInvalidateAnalyses));
2092
2093 MPM.addPass(
2095
2096 // Require the GlobalsAA analysis for the module so we can query it within
2097 // MainFPM.
2100 // Invalidate AAManager so it can be recreated and pick up the newly
2101 // available GlobalsAA.
2102 MPM.addPass(
2104 }
2105
2106 FunctionPassManager MainFPM;
2108 LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap,
2109 /*AllowSpeculation=*/true),
2110 /*USeMemorySSA=*/true, /*UseBlockFrequencyInfo=*/false));
2111
2112 if (RunNewGVN)
2113 MainFPM.addPass(NewGVNPass());
2114 else
2115 MainFPM.addPass(GVNPass());
2116
2117 // Remove dead memcpy()'s.
2118 MainFPM.addPass(MemCpyOptPass());
2119
2120 // Nuke dead stores.
2121 MainFPM.addPass(DSEPass());
2122 MainFPM.addPass(MoveAutoInitPass());
2124
2125 invokeVectorizerStartEPCallbacks(MainFPM, Level);
2126
2127 LoopPassManager LPM;
2128 if (EnableLoopFlatten && Level.getSpeedupLevel() > 1)
2129 LPM.addPass(LoopFlattenPass());
2130 LPM.addPass(IndVarSimplifyPass());
2131 LPM.addPass(LoopDeletionPass());
2132 // FIXME: Add loop interchange.
2133
2134 // Unroll small loops and perform peeling.
2135 LPM.addPass(LoopFullUnrollPass(Level.getSpeedupLevel(),
2136 /* OnlyWhenForced= */ !PTO.LoopUnrolling,
2137 PTO.ForgetAllSCEVInLoopUnroll));
2138 // The loop passes in LPM (LoopFullUnrollPass) do not preserve MemorySSA.
2139 // *All* loop passes must preserve it, in order to be able to use it.
2141 std::move(LPM), /*UseMemorySSA=*/false, /*UseBlockFrequencyInfo=*/true));
2142
2143 MainFPM.addPass(LoopDistributePass());
2144
2145 addVectorPasses(Level, MainFPM, /* IsFullLTO */ true);
2146
2147 invokeVectorizerEndEPCallbacks(MainFPM, Level);
2148
2149 // Run the OpenMPOpt CGSCC pass again late.
2152
2153 invokePeepholeEPCallbacks(MainFPM, Level);
2154 MainFPM.addPass(JumpThreadingPass());
2155 MPM.addPass(createModuleToFunctionPassAdaptor(std::move(MainFPM),
2156 PTO.EagerlyInvalidateAnalyses));
2157
2158 // Lower type metadata and the type.test intrinsic. This pass supports
2159 // clang's control flow integrity mechanisms (-fsanitize=cfi*) and needs
2160 // to be run at link time if CFI is enabled. This pass does nothing if
2161 // CFI is disabled.
2162 MPM.addPass(LowerTypeTestsPass(ExportSummary, nullptr));
2163 // Run a second time to clean up any type tests left behind by WPD for use
2164 // in ICP (which is performed earlier than this in the regular LTO pipeline).
2165 MPM.addPass(LowerTypeTestsPass(nullptr, nullptr,
2167
2168 // Enable splitting late in the FullLTO post-link pipeline.
2171
2172 // Add late LTO optimization passes.
2173 FunctionPassManager LateFPM;
2174
2175 // LoopSink pass sinks instructions hoisted by LICM, which serves as a
2176 // canonicalization pass that enables other optimizations. As a result,
2177 // LoopSink pass needs to be a very late IR pass to avoid undoing LICM
2178 // result too early.
2179 LateFPM.addPass(LoopSinkPass());
2180
2181 // This hoists/decomposes div/rem ops. It should run after other sink/hoist
2182 // passes to avoid re-sinking, but before SimplifyCFG because it can allow
2183 // flattening of blocks.
2184 LateFPM.addPass(DivRemPairsPass());
2185
2186 // Delete basic blocks, which optimization passes may have killed.
2188 .convertSwitchRangeToICmp(true)
2189 .hoistCommonInsts(true)
2190 .speculateUnpredictables(true)));
2191 MPM.addPass(createModuleToFunctionPassAdaptor(std::move(LateFPM)));
2192
2193 // Drop bodies of available eternally objects to improve GlobalDCE.
2195
2196 // Now that we have optimized the program, discard unreachable functions.
2197 MPM.addPass(GlobalDCEPass(/*InLTOPostLink=*/true));
2198
2199 if (PTO.MergeFunctions)
2201
2203
2204 if (PTO.CallGraphProfile)
2205 MPM.addPass(CGProfilePass(/*InLTOPostLink=*/true));
2206
2207 MPM.addPass(CoroCleanupPass());
2208
2210
2211 // Emit annotation remarks.
2213
2214 return MPM;
2215}
2216
2220 assert(Level == OptimizationLevel::O0 &&
2221 "buildO0DefaultPipeline should only be used with O0");
2222
2224
2225 // Perform pseudo probe instrumentation in O0 mode. This is for the
2226 // consistency between different build modes. For example, a LTO build can be
2227 // mixed with an O0 prelink and an O2 postlink. Loading a sample profile in
2228 // the postlink will require pseudo probe instrumentation in the prelink.
2229 if (PGOOpt && PGOOpt->PseudoProbeForProfiling)
2231
2232 if (PGOOpt && (PGOOpt->Action == PGOOptions::IRInstr ||
2233 PGOOpt->Action == PGOOptions::IRUse))
2235 MPM,
2236 /*RunProfileGen=*/(PGOOpt->Action == PGOOptions::IRInstr),
2237 /*IsCS=*/false, PGOOpt->AtomicCounterUpdate, PGOOpt->ProfileFile,
2238 PGOOpt->ProfileRemappingFile);
2239
2240 // Instrument function entry and exit before all inlining.
2242 EntryExitInstrumenterPass(/*PostInlining=*/false)));
2243
2245
2246 if (PGOOpt && PGOOpt->DebugInfoForProfiling)
2248
2249 if (PGOOpt && PGOOpt->Action == PGOOptions::SampleUse) {
2250 // Explicitly disable sample loader inlining and use flattened profile in O0
2251 // pipeline.
2252 MPM.addPass(SampleProfileLoaderPass(PGOOpt->ProfileFile,
2253 PGOOpt->ProfileRemappingFile,
2254 ThinOrFullLTOPhase::None, nullptr,
2255 /*DisableSampleProfileInlining=*/true,
2256 /*UseFlattenedProfile=*/true));
2257 // Cache ProfileSummaryAnalysis once to avoid the potential need to insert
2258 // RequireAnalysisPass for PSI before subsequent non-module passes.
2260 }
2261
2263
2264 // Build a minimal pipeline based on the semantics required by LLVM,
2265 // which is just that always inlining occurs. Further, disable generating
2266 // lifetime intrinsics to avoid enabling further optimizations during
2267 // code generation.
2269 /*InsertLifetimeIntrinsics=*/false));
2270
2271 if (PTO.MergeFunctions)
2273
2274 if (EnableMatrix)
2275 MPM.addPass(
2277
2278 if (!CGSCCOptimizerLateEPCallbacks.empty()) {
2279 CGSCCPassManager CGPM;
2281 if (!CGPM.isEmpty())
2283 }
2284 if (!LateLoopOptimizationsEPCallbacks.empty()) {
2285 LoopPassManager LPM;
2287 if (!LPM.isEmpty()) {
2289 createFunctionToLoopPassAdaptor(std::move(LPM))));
2290 }
2291 }
2292 if (!LoopOptimizerEndEPCallbacks.empty()) {
2293 LoopPassManager LPM;
2295 if (!LPM.isEmpty()) {
2297 createFunctionToLoopPassAdaptor(std::move(LPM))));
2298 }
2299 }
2300 if (!ScalarOptimizerLateEPCallbacks.empty()) {
2303 if (!FPM.isEmpty())
2304 MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
2305 }
2306
2308
2309 if (!VectorizerStartEPCallbacks.empty()) {
2312 if (!FPM.isEmpty())
2313 MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
2314 }
2315
2316 if (!VectorizerEndEPCallbacks.empty()) {
2319 if (!FPM.isEmpty())
2320 MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
2321 }
2322
2324
2326
2327 if (isLTOPreLink(Phase))
2328 addRequiredLTOPreLinkPasses(MPM);
2329
2331
2332 return MPM;
2333}
2334
2336 AAManager AA;
2337
2338 // The order in which these are registered determines their priority when
2339 // being queried.
2340
2341 // Add any target-specific alias analyses that should be run early.
2342 if (TM)
2343 TM->registerEarlyDefaultAliasAnalyses(AA);
2344
2345 // First we register the basic alias analysis that provides the majority of
2346 // per-function local AA logic. This is a stateless, on-demand local set of
2347 // AA techniques.
2348 AA.registerFunctionAnalysis<BasicAA>();
2349
2350 // Next we query fast, specialized alias analyses that wrap IR-embedded
2351 // information about aliasing.
2352 AA.registerFunctionAnalysis<ScopedNoAliasAA>();
2353 AA.registerFunctionAnalysis<TypeBasedAA>();
2354
2355 // Add support for querying global aliasing information when available.
2356 // Because the `AAManager` is a function analysis and `GlobalsAA` is a module
2357 // analysis, all that the `AAManager` can do is query for any *cached*
2358 // results from `GlobalsAA` through a readonly proxy.
2360 AA.registerModuleAnalysis<GlobalsAA>();
2361
2362 // Add target-specific alias analyses.
2363 if (TM)
2364 TM->registerDefaultAliasAnalyses(AA);
2365
2366 return AA;
2367}
2368
2369bool PassBuilder::isInstrumentedPGOUse() const {
2370 return (PGOOpt && PGOOpt->Action == PGOOptions::IRUse) ||
2371 !UseCtxProfile.empty();
2372}
aarch64 falkor hwpf fix Falkor HW Prefetch Fix Late Phase
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AggressiveInstCombiner - Combine expression patterns to form expressions with fewer,...
Provides passes to inlining "always_inline" functions.
This is the interface for LLVM's primary stateless and local alias analysis.
This file provides the interface for LLVM's Call Graph Profile pass.
This header provides classes for managing passes over SCCs of the call graph.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file provides the interface for a simple, fast CSE pass.
This file provides a pass which clones the current module and runs the provided pass pipeline on the ...
This file provides a pass manager that only runs its passes if the provided marker analysis has been ...
Super simple passes to force specific function attrs from the commandline into the IR for debugging p...
Provides passes for computing function attributes based on interprocedural analyses.
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
This is the interface for a simple mod/ref and alias analysis over globals.
AcceleratorCodeSelection - Identify all functions reachable from a kernel, removing those that are un...
This header defines various interfaces for pass management in LLVM.
Interfaces for passes which infer implicit function attributes from the name and signature of functio...
This file provides the primary interface to the instcombine pass.
Defines passes for running instruction simplification across chunks of IR.
This file provides the interface for LLVM's PGO Instrumentation lowering pass.
See the comments on JumpThreadingPass.
static LVOptions Options
Definition LVOptions.cpp:25
This file implements the Loop Fusion pass.
This header defines the LoopLoadEliminationPass object.
This header provides classes for managing a pipeline of passes over loops in LLVM IR.
The header file for the LowerConstantIntrinsics pass as used by the new pass manager.
The header file for the LowerExpectIntrinsic pass as used by the new pass manager.
This pass performs merges of loads and stores on both sides of a.
This file provides the interface for LLVM's Global Value Numbering pass.
This header enumerates the LLVM-provided high-level optimization levels.
This file provides the interface for IR based instrumentation passes ( (profile-gen,...
Define option tunables for PGO.
static void addAnnotationRemarksPass(ModulePassManager &MPM)
static InlineParams getInlineParamsFromOptLevel(OptimizationLevel Level)
static CoroConditionalWrapper buildCoroWrapper(ThinOrFullLTOPhase Phase)
static bool isLTOPreLink(ThinOrFullLTOPhase Phase)
static bool isLTOPostLink(ThinOrFullLTOPhase Phase)
This file implements relative lookup table converter that converts lookup tables to relative lookup t...
This file provides the interface for LLVM's Scalar Replacement of Aggregates pass.
This file provides the interface for the pseudo probe implementation for AutoFDO.
This file provides the interface for the sampled PGO loader pass.
This is the interface for a metadata-based scoped no-alias analysis.
This file provides the interface for the pass responsible for both simplifying and canonicalizing the...
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
This is the interface for a metadata-based TBAA.
Defines the virtual file system interface vfs::FileSystem.
A manager for alias analyses.
Inlines functions marked as "always_inline".
Argument promotion pass.
Assign a GUID to functions as metadata.
Analysis pass providing a never-invalidated alias analysis result.
Simple pass that canonicalizes aliases.
A pass that merges duplicate global constants into a single constant.
This class implements a trivial dead store elimination.
Eliminate dead arguments (and return values) from functions.
A pass that transforms external global definitions into declarations.
Pass embeds a copy of the module optimized with the provided pass pipeline into a global variable.
A pass manager to run a set of extra loop passes if the MarkerTy analysis is present.
The core GVN pass object.
Definition GVN.h:126
Pass to remove unused function declarations.
Definition GlobalDCE.h:38
Optimize globals that never have their address taken.
Definition GlobalOpt.h:25
Pass to perform split of global variables.
Definition GlobalSplit.h:26
Analysis pass providing a never-invalidated alias analysis result.
Pass to outline cold regions.
Pass to perform interprocedural constant propagation.
Definition SCCP.h:48
Pass to outline similar regions.
Definition IROutliner.h:444
Run instruction simplification across each instruction in the function.
Instrumentation based profiling lowering pass.
This pass performs 'jump threading', which looks at blocks that have multiple predecessors and multip...
Performs Loop Invariant Code Motion Pass.
Definition LICM.h:66
Loop unroll pass that only does full loop unrolling and peeling.
Performs Loop Idiom Recognize Pass.
Performs Loop Inst Simplify Pass.
A simple loop rotation transformation.
Performs basic CFG simplifications to assist other loop passes.
A pass that does profile-guided sinking of instructions into loops.
Definition LoopSink.h:33
A simple loop rotation transformation.
Loop unroll pass that will support both full and partial unrolling.
Merge identical functions.
The module inliner pass for the new pass manager.
Module pass, wrapping the inliner pass.
Definition Inliner.h:65
void addModulePass(T Pass)
Add a module pass that runs before the CGSCC passes.
Definition Inliner.h:81
Class to hold module path string table and global value map, and encapsulate methods for operating on...
Simple pass that provides a name to every anonymous globals.
OpenMP optimizations pass.
Definition OpenMPOpt.h:42
static LLVM_ABI const OptimizationLevel O3
Optimize for fast execution as much as possible.
static LLVM_ABI const OptimizationLevel Oz
A very specialized mode that will optimize for code size at any and all costs.
static LLVM_ABI const OptimizationLevel O0
Disable as many optimizations as possible.
static LLVM_ABI const OptimizationLevel Os
Similar to O2 but tries to optimize for small code size instead of fast execution without triggering ...
static LLVM_ABI const OptimizationLevel O2
Optimize for fast execution as much as possible without triggering significant incremental compile ti...
static LLVM_ABI const OptimizationLevel O1
Optimize quickly without destroying debuggability.
The indirect function call promotion pass.
The instrumentation (profile-instr-gen) pass for IR based PGO.
The instrumentation (profile-instr-gen) pass for IR based PGO.
The profile annotation (profile-instr-use) pass for IR based PGO.
The profile size based optimization pass for memory intrinsics.
Pass to remove unused function declarations.
LLVM_ABI void invokeFullLinkTimeOptimizationLastEPCallbacks(ModulePassManager &MPM, OptimizationLevel Level)
LLVM_ABI ModuleInlinerWrapperPass buildInlinerPipeline(OptimizationLevel Level, ThinOrFullLTOPhase Phase)
Construct the module pipeline that performs inlining as well as the inlining-driven cleanups.
LLVM_ABI void invokeOptimizerEarlyEPCallbacks(ModulePassManager &MPM, OptimizationLevel Level, ThinOrFullLTOPhase Phase)
LLVM_ABI void invokeVectorizerStartEPCallbacks(FunctionPassManager &FPM, OptimizationLevel Level)
LLVM_ABI AAManager buildDefaultAAPipeline()
Build the default AAManager with the default alias analysis pipeline registered.
LLVM_ABI void invokeCGSCCOptimizerLateEPCallbacks(CGSCCPassManager &CGPM, OptimizationLevel Level)
LLVM_ABI ModulePassManager buildThinLTOPreLinkDefaultPipeline(OptimizationLevel Level)
Build a pre-link, ThinLTO-targeting default optimization pipeline to a pass manager.
LLVM_ABI void addPGOInstrPassesForO0(ModulePassManager &MPM, bool RunProfileGen, bool IsCS, bool AtomicCounterUpdate, std::string ProfileFile, std::string ProfileRemappingFile)
Add PGOInstrumenation passes for O0 only.
LLVM_ABI void invokeScalarOptimizerLateEPCallbacks(FunctionPassManager &FPM, OptimizationLevel Level)
LLVM_ABI ModulePassManager buildPerModuleDefaultPipeline(OptimizationLevel Level, ThinOrFullLTOPhase Phase=ThinOrFullLTOPhase::None)
Build a per-module default optimization pipeline.
LLVM_ABI void invokePipelineStartEPCallbacks(ModulePassManager &MPM, OptimizationLevel Level)
LLVM_ABI void invokeVectorizerEndEPCallbacks(FunctionPassManager &FPM, OptimizationLevel Level)
LLVM_ABI ModulePassManager buildO0DefaultPipeline(OptimizationLevel Level, ThinOrFullLTOPhase Phase=ThinOrFullLTOPhase::None)
Build an O0 pipeline with the minimal semantically required passes.
LLVM_ABI FunctionPassManager buildFunctionSimplificationPipeline(OptimizationLevel Level, ThinOrFullLTOPhase Phase)
Construct the core LLVM function canonicalization and simplification pipeline.
LLVM_ABI void invokePeepholeEPCallbacks(FunctionPassManager &FPM, OptimizationLevel Level)
LLVM_ABI void invokePipelineEarlySimplificationEPCallbacks(ModulePassManager &MPM, OptimizationLevel Level, ThinOrFullLTOPhase Phase)
LLVM_ABI void invokeLoopOptimizerEndEPCallbacks(LoopPassManager &LPM, OptimizationLevel Level)
LLVM_ABI ModulePassManager buildLTODefaultPipeline(OptimizationLevel Level, ModuleSummaryIndex *ExportSummary)
Build an LTO default optimization pipeline to a pass manager.
LLVM_ABI ModulePassManager buildModuleInlinerPipeline(OptimizationLevel Level, ThinOrFullLTOPhase Phase)
Construct the module pipeline that performs inlining with module inliner pass.
LLVM_ABI ModulePassManager buildThinLTODefaultPipeline(OptimizationLevel Level, const ModuleSummaryIndex *ImportSummary)
Build a ThinLTO default optimization pipeline to a pass manager.
LLVM_ABI void invokeLateLoopOptimizationsEPCallbacks(LoopPassManager &LPM, OptimizationLevel Level)
LLVM_ABI void invokeFullLinkTimeOptimizationEarlyEPCallbacks(ModulePassManager &MPM, OptimizationLevel Level)
LLVM_ABI ModulePassManager buildFatLTODefaultPipeline(OptimizationLevel Level, bool ThinLTO, bool EmitSummary)
Build a fat object default optimization pipeline.
LLVM_ABI ModulePassManager buildModuleSimplificationPipeline(OptimizationLevel Level, ThinOrFullLTOPhase Phase)
Construct the core LLVM module canonicalization and simplification pipeline.
LLVM_ABI ModulePassManager buildModuleOptimizationPipeline(OptimizationLevel Level, ThinOrFullLTOPhase LTOPhase)
Construct the core LLVM module optimization pipeline.
LLVM_ABI void invokeOptimizerLastEPCallbacks(ModulePassManager &MPM, OptimizationLevel Level, ThinOrFullLTOPhase Phase)
LLVM_ABI ModulePassManager buildLTOPreLinkDefaultPipeline(OptimizationLevel Level)
Build a pre-link, LTO-targeting default optimization pipeline to a pass manager.
LLVM_ATTRIBUTE_MINSIZE std::enable_if_t<!std::is_same_v< PassT, PassManager > > addPass(PassT &&Pass)
bool isEmpty() const
Returns if the pass manager contains any passes.
unsigned LicmMssaNoAccForPromotionCap
Tuning option to disable promotion to scalars in LICM with MemorySSA, if the number of access is too ...
Definition PassBuilder.h:78
bool SLPVectorization
Tuning option to enable/disable slp loop vectorization, set based on opt level.
Definition PassBuilder.h:56
int InlinerThreshold
Tuning option to override the default inliner threshold.
Definition PassBuilder.h:92
bool LoopFusion
Tuning option to enable/disable loop fusion. Its default value is false.
Definition PassBuilder.h:66
bool CallGraphProfile
Tuning option to enable/disable call graph profile.
Definition PassBuilder.h:82
bool MergeFunctions
Tuning option to enable/disable function merging.
Definition PassBuilder.h:89
bool ForgetAllSCEVInLoopUnroll
Tuning option to forget all SCEV loops in LoopUnroll.
Definition PassBuilder.h:70
unsigned LicmMssaOptCap
Tuning option to cap the number of calls to retrive clobbering accesses in MemorySSA,...
Definition PassBuilder.h:74
bool LoopInterleaving
Tuning option to set loop interleaving on/off, set based on opt level.
Definition PassBuilder.h:48
LLVM_ABI PipelineTuningOptions()
Constructor sets pipeline tuning defaults based on cl::opts.
bool LoopUnrolling
Tuning option to enable/disable loop unrolling. Its default value is true.
Definition PassBuilder.h:59
bool LoopInterchange
Tuning option to enable/disable loop interchange.
Definition PassBuilder.h:63
bool LoopVectorization
Tuning option to enable/disable loop vectorization, set based on opt level.
Definition PassBuilder.h:52
Reassociate commutative expressions.
Definition Reassociate.h:74
A pass to do RPO deduction and propagation of function attributes.
This pass performs function-level constant propagation and merging.
Definition SCCP.h:30
The sample profiler data loader pass.
Analysis pass providing a never-invalidated alias analysis result.
This pass transforms loops that contain branches or switches on loop- invariant conditions to have mu...
A pass to simplify and canonicalize the CFG of a function.
Definition SimplifyCFG.h:30
Analysis pass providing a never-invalidated alias analysis result.
Optimize scalar/vector interactions in IR using target cost models.
Interfaces for registering analysis passes, producing common pass manager configurations,...
Abstract Attribute helper functions.
Definition Attributor.h:165
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
@ Assume
Do not drop type tests (default).
@ All
Drop only llvm.assumes using type test value.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI cl::opt< bool > EnableKnowledgeRetention
static cl::opt< bool > RunNewGVN("enable-newgvn", cl::init(false), cl::Hidden, cl::desc("Run the NewGVN pass"))
static cl::opt< bool > DisablePreInliner("disable-preinline", cl::init(false), cl::Hidden, cl::desc("Disable pre-instrumentation inliner"))
static cl::opt< bool > EnableJumpTableToSwitch("enable-jump-table-to-switch", cl::desc("Enable JumpTableToSwitch pass (default = off)"))
static cl::opt< bool > PerformMandatoryInliningsFirst("mandatory-inlining-first", cl::init(false), cl::Hidden, cl::desc("Perform mandatory inlinings module-wide, before performing " "inlining"))
static cl::opt< bool > RunPartialInlining("enable-partial-inlining", cl::init(false), cl::Hidden, cl::desc("Run Partial inlining pass"))
static cl::opt< bool > EnableGVNSink("enable-gvn-sink", cl::desc("Enable the GVN sinking pass (default = off)"))
static cl::opt< bool > EnableLoopHeaderDuplication("enable-loop-header-duplication", cl::init(false), cl::Hidden, cl::desc("Enable loop header duplication at any optimization level"))
cl::opt< unsigned > SetLicmMssaNoAccForPromotionCap
static cl::opt< bool > EnableModuleInliner("enable-module-inliner", cl::init(false), cl::Hidden, cl::desc("Enable module inliner"))
static cl::opt< bool > EnableEagerlyInvalidateAnalyses("eagerly-invalidate-analyses", cl::init(true), cl::Hidden, cl::desc("Eagerly invalidate more analyses in default pipelines"))
static cl::opt< bool > EnableMatrix("enable-matrix", cl::init(false), cl::Hidden, cl::desc("Enable lowering of the matrix intrinsics"))
ModuleToFunctionPassAdaptor createModuleToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
cl::opt< std::string > UseCtxProfile("use-ctx-profile", cl::init(""), cl::Hidden, cl::desc("Use the specified contextual profile file"))
static cl::opt< bool > EnableSampledInstr("enable-sampled-instrumentation", cl::init(false), cl::Hidden, cl::desc("Enable profile instrumentation sampling (default = off)"))
static cl::opt< bool > EnableLoopFlatten("enable-loop-flatten", cl::init(false), cl::Hidden, cl::desc("Enable the LoopFlatten Pass"))
static cl::opt< InliningAdvisorMode > UseInlineAdvisor("enable-ml-inliner", cl::init(InliningAdvisorMode::Default), cl::Hidden, cl::desc("Enable ML policy for inliner. Currently trained for -Oz only"), cl::values(clEnumValN(InliningAdvisorMode::Default, "default", "Heuristics-based inliner version"), clEnumValN(InliningAdvisorMode::Development, "development", "Use development mode (runtime-loadable model)"), clEnumValN(InliningAdvisorMode::Release, "release", "Use release mode (AOT-compiled model)")))
PassManager< LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &, CGSCCUpdateResult & > CGSCCPassManager
The CGSCC pass manager.
static cl::opt< bool > EnableUnrollAndJam("enable-unroll-and-jam", cl::init(false), cl::Hidden, cl::desc("Enable Unroll And Jam Pass"))
ThinOrFullLTOPhase
This enumerates the LLVM full LTO or ThinLTO optimization phases.
Definition Pass.h:77
@ FullLTOPreLink
Full LTO prelink phase.
Definition Pass.h:85
@ ThinLTOPostLink
ThinLTO postlink (backend compile) phase.
Definition Pass.h:83
@ None
No LTO/ThinLTO behavior needed.
Definition Pass.h:79
@ FullLTOPostLink
Full LTO postlink (backend compile) phase.
Definition Pass.h:87
@ ThinLTOPreLink
ThinLTO prelink (summary) phase.
Definition Pass.h:81
PassManager< Loop, LoopAnalysisManager, LoopStandardAnalysisResults &, LPMUpdater & > LoopPassManager
The Loop pass manager.
static cl::opt< bool > EnableConstraintElimination("enable-constraint-elimination", cl::init(true), cl::Hidden, cl::desc("Enable pass to eliminate conditions based on linear constraints"))
ModuleToPostOrderCGSCCPassAdaptor createModuleToPostOrderCGSCCPassAdaptor(CGSCCPassT &&Pass)
A function to deduce a function pass type and wrap it in the templated adaptor.
FunctionToLoopPassAdaptor createFunctionToLoopPassAdaptor(LoopPassT &&Pass, bool UseMemorySSA=false, bool UseBlockFrequencyInfo=false)
A function to deduce a loop pass type and wrap it in the templated adaptor.
static cl::opt< bool > EnablePGOInlineDeferral("enable-npm-pgo-inline-deferral", cl::init(true), cl::Hidden, cl::desc("Enable inline deferral during PGO"))
Flag to enable inline deferral during PGO.
CGSCCToFunctionPassAdaptor createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false, bool NoRerun=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
cl::opt< bool > ForgetSCEVInLoopUnroll
PassManager< Module > ModulePassManager
Convenience typedef for a pass manager over modules.
static cl::opt< bool > EnablePostPGOLoopRotation("enable-post-pgo-loop-rotation", cl::init(true), cl::Hidden, cl::desc("Run the loop rotation transformation after PGO instrumentation"))
LLVM_ABI bool AreStatisticsEnabled()
Check if statistics are enabled.
static cl::opt< std::string > InstrumentColdFuncOnlyPath("instrument-cold-function-only-path", cl::init(""), cl::desc("File path for cold function only instrumentation(requires use " "with --pgo-instrument-cold-function-only)"), cl::Hidden)
static cl::opt< bool > EnableGlobalAnalyses("enable-global-analyses", cl::init(true), cl::Hidden, cl::desc("Enable inter-procedural analyses"))
static cl::opt< bool > EnableDFAJumpThreading("enable-dfa-jump-thread", cl::desc("Enable DFA jump threading"), cl::init(false), cl::Hidden)
static cl::opt< bool > FlattenedProfileUsed("flattened-profile-used", cl::init(false), cl::Hidden, cl::desc("Indicate the sample profile being used is flattened, i.e., " "no inline hierarchy exists in the profile"))
static cl::opt< bool > ExtraVectorizerPasses("extra-vectorizer-passes", cl::init(false), cl::Hidden, cl::desc("Run cleanup optimization passes after vectorization"))
static cl::opt< bool > EnableHotColdSplit("hot-cold-split", cl::desc("Enable hot-cold splitting pass"))
cl::opt< bool > EnableMemProfContextDisambiguation
Enable MemProf context disambiguation for thin link.
cl::opt< unsigned > SetLicmMssaOptCap
PassManager< Function > FunctionPassManager
Convenience typedef for a pass manager over functions.
LLVM_ABI InlineParams getInlineParams()
Generate the parameters to tune the inline cost analysis based only on the commandline options.
cl::opt< bool > PGOInstrumentColdFunctionOnly
static cl::opt< bool > EnableLoopInterchange("enable-loopinterchange", cl::init(false), cl::Hidden, cl::desc("Enable the LoopInterchange Pass"))
static cl::opt< bool > EnableCHR("enable-chr", cl::init(true), cl::Hidden, cl::desc("Enable control height reduction optimization (CHR)"))
static cl::opt< bool > EnableMergeFunctions("enable-merge-functions", cl::init(false), cl::Hidden, cl::desc("Enable function merging as part of the optimization pipeline"))
static cl::opt< bool > EnableGVNHoist("enable-gvn-hoist", cl::desc("Enable the GVN hoisting pass (default = off)"))
static cl::opt< bool > EnableIROutliner("ir-outliner", cl::init(false), cl::Hidden, cl::desc("Enable ir outliner pass"))
static cl::opt< AttributorRunOption > AttributorRun("attributor-enable", cl::Hidden, cl::init(AttributorRunOption::NONE), cl::desc("Enable the attributor inter-procedural deduction pass"), cl::values(clEnumValN(AttributorRunOption::ALL, "all", "enable all attributor runs"), clEnumValN(AttributorRunOption::MODULE, "module", "enable module-wide attributor runs"), clEnumValN(AttributorRunOption::CGSCC, "cgscc", "enable call graph SCC attributor runs"), clEnumValN(AttributorRunOption::NONE, "none", "disable attributor runs")))
static cl::opt< int > PreInlineThreshold("preinline-threshold", cl::Hidden, cl::init(75), cl::desc("Control the amount of inlining in pre-instrumentation inliner " "(default = 75)"))
static cl::opt< bool > UseLoopVersioningLICM("enable-loop-versioning-licm", cl::init(false), cl::Hidden, cl::desc("Enable the experimental Loop Versioning LICM pass"))
cl::opt< unsigned > MaxDevirtIterations("max-devirt-iterations", cl::ReallyHidden, cl::init(4))
A DCE pass that assumes instructions are dead until proven otherwise.
Definition ADCE.h:31
Pass to convert @llvm.global.annotations to !annotation metadata.
This pass attempts to minimize the number of assume without loosing any information.
Hoist/decompose integer division and remainder instructions to enable CFG improvements and better cod...
Definition DivRemPairs.h:23
A simple and fast domtree-based CSE pass.
Definition EarlyCSE.h:31
Pass which forces specific function attributes into the IR, primarily as a debugging tool.
A simple and fast domtree-based GVN pass to hoist common expressions from sibling branches.
Definition GVN.h:417
Uses an "inverted" value numbering to decide the similarity of expressions and sinks similar expressi...
Definition GVN.h:424
A set of parameters to control various transforms performed by IPSCCP pass.
Definition SCCP.h:35
A pass which infers function attributes from the names and signatures of function declarations in a m...
Provides context on when an inline advisor is constructed in the pipeline (e.g., link phase,...
Thresholds to tune inline cost analysis.
Definition InlineCost.h:207
std::optional< int > HotCallSiteThreshold
Threshold to use when the callsite is considered hot.
Definition InlineCost.h:224
int DefaultThreshold
The default threshold to start with for a callee.
Definition InlineCost.h:209
std::optional< bool > EnableDeferral
Indicate whether we should allow inline deferral.
Definition InlineCost.h:237
std::optional< int > HintThreshold
Threshold to use for callees with inline hint.
Definition InlineCost.h:212
Options for the frontend instrumentation based profiling pass.
A no-op pass template which simply forces a specific analysis result to be invalidated.
Pass to forward loads in a loop around the backedge to subsequent iterations.
A set of parameters used to control various transforms performed by the LoopUnroll pass.
The LoopVectorize Pass.
Computes function attributes in post-order over the call graph.
A utility pass template to force an analysis result to be available.