LLVM 19.0.0git
InlineModelFeatureMaps.h
Go to the documentation of this file.
1//===- InlineModelFeatureMaps.h - common model runner defs ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9
10#ifndef LLVM_ANALYSIS_INLINEMODELFEATUREMAPS_H
11#define LLVM_ANALYSIS_INLINEMODELFEATUREMAPS_H
12
14
15#include <array>
16#include <vector>
17
18namespace llvm {
19
20// List of cost features. A "cost" feature is a summand of the heuristic-based
21// inline cost, and we define them separately to preserve the original heuristic
22// behavior.
23#define INLINE_COST_FEATURE_ITERATOR(M) \
24 M(int64_t, {1}, sroa_savings, \
25 "Savings from SROA (scalar replacement of aggregates)") \
26 M(int64_t, {1}, sroa_losses, \
27 "Losses from SROA (scalar replacement of aggregates)") \
28 M(int64_t, {1}, load_elimination, "Cost of load elimination in the call") \
29 M(int64_t, {1}, call_penalty, \
30 "Accumulation of penalty applied to call sites when inlining") \
31 M(int64_t, {1}, call_argument_setup, \
32 "Accumulation of call argument setup costs") \
33 M(int64_t, {1}, load_relative_intrinsic, \
34 "Accumulation of costs of loading relative intrinsics") \
35 M(int64_t, {1}, lowered_call_arg_setup, \
36 "Accumulation of cost of lowered call argument setups") \
37 M(int64_t, {1}, indirect_call_penalty, \
38 "Accumulation of costs for indirect calls") \
39 M(int64_t, {1}, jump_table_penalty, "Accumulation of costs for jump tables") \
40 M(int64_t, {1}, case_cluster_penalty, \
41 "Accumulation of costs for case clusters") \
42 M(int64_t, {1}, switch_default_dest_penalty, \
43 "Accumulation of costs for switch default destination") \
44 M(int64_t, {1}, switch_penalty, \
45 "Accumulation of costs for switch statements") \
46 M(int64_t, {1}, unsimplified_common_instructions, \
47 "Costs from unsimplified common instructions") \
48 M(int64_t, {1}, num_loops, "Number of loops in the caller") \
49 M(int64_t, {1}, dead_blocks, "Number of dead blocks in the caller") \
50 M(int64_t, {1}, simplified_instructions, \
51 "Number of simplified instructions") \
52 M(int64_t, {1}, constant_args, \
53 "Number of constant arguments in the call site") \
54 M(int64_t, {1}, constant_offset_ptr_args, \
55 "Number of constant offset pointer args in the call site") \
56 M(int64_t, {1}, callsite_cost, "Estimated cost of the call site") \
57 M(int64_t, {1}, cold_cc_penalty, "Penalty for a cold calling convention") \
58 M(int64_t, {1}, last_call_to_static_bonus, \
59 "Bonus for being the last call to static") \
60 M(int64_t, {1}, is_multiple_blocks, \
61 "Boolean; is the Callee multiple blocks") \
62 M(int64_t, {1}, nested_inlines, \
63 "Would the default inliner perfom nested inlining") \
64 M(int64_t, {1}, nested_inline_cost_estimate, \
65 "Estimate of the accumulated cost of nested inlines") \
66 M(int64_t, {1}, threshold, "Threshold for the heuristic inliner")
67
68// clang-format off
69enum class InlineCostFeatureIndex : size_t {
70#define POPULATE_INDICES(DTYPE, SHAPE, NAME, DOC) NAME,
72#undef POPULATE_INDICES
73
75};
76// clang-format on
77
79 std::array<int,
80 static_cast<size_t>(InlineCostFeatureIndex::NumberOfFeatures)>;
81
83 return Feature != InlineCostFeatureIndex::sroa_savings &&
84 Feature != InlineCostFeatureIndex::is_multiple_blocks &&
85 Feature != InlineCostFeatureIndex::dead_blocks &&
86 Feature != InlineCostFeatureIndex::simplified_instructions &&
87 Feature != InlineCostFeatureIndex::constant_args &&
88 Feature != InlineCostFeatureIndex::constant_offset_ptr_args &&
89 Feature != InlineCostFeatureIndex::nested_inlines &&
90 Feature != InlineCostFeatureIndex::nested_inline_cost_estimate &&
91 Feature != InlineCostFeatureIndex::threshold;
92}
93
94// List of features. Each feature is defined through a triple:
95// - the name of an enum member, which will be the feature index
96// - a textual name, used for ML model binding (so it needs to match the
97// names used by the ML model).
98// - a documentation description. Currently, that is not used anywhere
99// programmatically, and serves as workaround to inability of inserting comments
100// in macros.
101#define INLINE_FEATURE_ITERATOR(M) \
102 M(int64_t, {1}, callee_basic_block_count, \
103 "number of basic blocks of the callee") \
104 M(int64_t, {1}, callsite_height, \
105 "position of the call site in the original call graph - measured from " \
106 "the farthest SCC") \
107 M(int64_t, {1}, node_count, \
108 "total current number of defined functions in the module") \
109 M(int64_t, {1}, nr_ctant_params, \
110 "number of parameters in the call site that are constants") \
111 M(int64_t, {1}, cost_estimate, "total cost estimate (threshold - free)") \
112 M(int64_t, {1}, edge_count, "total number of calls in the module") \
113 M(int64_t, {1}, caller_users, \
114 "number of module-internal users of the caller, +1 if the caller is " \
115 "exposed externally") \
116 M(int64_t, {1}, caller_conditionally_executed_blocks, \
117 "number of blocks reached from a conditional instruction, in the caller") \
118 M(int64_t, {1}, caller_basic_block_count, \
119 "number of basic blocks in the caller") \
120 M(int64_t, {1}, callee_conditionally_executed_blocks, \
121 "number of blocks reached from a conditional instruction, in the callee") \
122 M(int64_t, {1}, callee_users, \
123 "number of module-internal users of the callee, +1 if the callee is " \
124 "exposed externally")
125
126// clang-format off
127enum class FeatureIndex : size_t {
128#define POPULATE_INDICES(DTYPE, SHAPE, NAME, COMMENT) NAME,
129// InlineCost features - these must come first
131
132// Non-cost features
134#undef POPULATE_INDICES
135
137};
138// clang-format on
139
140constexpr FeatureIndex
142 return static_cast<FeatureIndex>(static_cast<size_t>(Feature));
143}
144
145constexpr size_t NumberOfFeatures =
146 static_cast<size_t>(FeatureIndex::NumberOfFeatures);
147
148extern const std::vector<TensorSpec> FeatureMap;
149
150extern const char *const DecisionName;
151extern const TensorSpec InlineDecisionSpec;
152extern const char *const DefaultDecisionName;
154extern const char *const RewardName;
155
156using InlineFeatures = std::vector<int64_t>;
157
158} // namespace llvm
159#endif // LLVM_ANALYSIS_INLINEMODELFEATUREMAPS_H
#define INLINE_COST_FEATURE_ITERATOR(M)
#define INLINE_FEATURE_ITERATOR(M)
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
constexpr FeatureIndex inlineCostFeatureToMlFeature(InlineCostFeatureIndex Feature)
const char *const DefaultDecisionName
constexpr size_t NumberOfFeatures
constexpr bool isHeuristicInlineCostFeature(InlineCostFeatureIndex Feature)
const TensorSpec DefaultDecisionSpec
const char *const DecisionName
POPULATE_INDICES(DTYPE, SHAPE, NAME, DOC)
const std::vector< TensorSpec > FeatureMap
std::array< int, static_cast< size_t >(InlineCostFeatureIndex::NumberOfFeatures)> InlineCostFeatures
const TensorSpec InlineDecisionSpec
const char *const RewardName
std::vector< int64_t > InlineFeatures