LLVM 22.0.0git
AMDGPUTargetTransformInfo.h
Go to the documentation of this file.
1//===- AMDGPUTargetTransformInfo.h - AMDGPU specific TTI --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file a TargetTransformInfoImplBase conforming object specific to the
11/// AMDGPU target machine. It uses the target's detailed information to
12/// provide more precise answers to certain TTI queries, while letting the
13/// target independent and default TTI implementations handle the rest.
14//
15//===----------------------------------------------------------------------===//
16
17#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETTRANSFORMINFO_H
18#define LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETTRANSFORMINFO_H
19
20#include "AMDGPU.h"
23#include <optional>
24
25namespace llvm {
26
28class GCNSubtarget;
29class InstCombiner;
30class Loop;
31class ScalarEvolution;
33class Type;
34class Value;
35
36class AMDGPUTTIImpl final : public BasicTTIImplBase<AMDGPUTTIImpl> {
38 using TTI = TargetTransformInfo;
39
40 friend BaseT;
41
42 Triple TargetTriple;
43
44 const TargetSubtargetInfo *ST;
45 const TargetLoweringBase *TLI;
46
47 const TargetSubtargetInfo *getST() const { return ST; }
48 const TargetLoweringBase *getTLI() const { return TLI; }
49
50public:
51 explicit AMDGPUTTIImpl(const AMDGPUTargetMachine *TM, const Function &F);
52
55 OptimizationRemarkEmitter *ORE) const override;
56
58 TTI::PeelingPreferences &PP) const override;
59
61};
62
63class GCNTTIImpl final : public BasicTTIImplBase<GCNTTIImpl> {
64 using BaseT = BasicTTIImplBase<GCNTTIImpl>;
65 using TTI = TargetTransformInfo;
66
67 friend BaseT;
68
69 const GCNSubtarget *ST;
70 const SITargetLowering *TLI;
71 AMDGPUTTIImpl CommonTTI;
72 bool IsGraphics;
73 bool HasFP32Denormals;
74 bool HasFP64FP16Denormals;
75 static constexpr bool InlinerVectorBonusPercent = 0;
76
77 static const FeatureBitset InlineFeatureIgnoreList;
78
79 const GCNSubtarget *getST() const { return ST; }
80 const SITargetLowering *getTLI() const { return TLI; }
81
82 static inline int getFullRateInstrCost() {
84 }
85
86 static inline int getHalfRateInstrCost(TTI::TargetCostKind CostKind) {
87 return CostKind == TTI::TCK_CodeSize ? 2
89 }
90
91 // TODO: The size is usually 8 bytes, but takes 4x as many cycles. Maybe
92 // should be 2 or 4.
93 static inline int getQuarterRateInstrCost(TTI::TargetCostKind CostKind) {
94 return CostKind == TTI::TCK_CodeSize ? 2
96 }
97
98 // On some parts, normal fp64 operations are half rate, and others
99 // quarter. This also applies to some integer operations.
100 int get64BitInstrCost(TTI::TargetCostKind CostKind) const;
101
102 std::pair<InstructionCost, MVT> getTypeLegalizationCost(Type *Ty) const;
103
104 /// \returns true if V might be divergent even when all of its operands
105 /// are uniform.
106 bool isSourceOfDivergence(const Value *V) const;
107
108 /// Returns true for the target specific set of operations which produce
109 /// uniform result even taking non-uniform arguments.
110 bool isAlwaysUniform(const Value *V) const;
111
112public:
113 explicit GCNTTIImpl(const AMDGPUTargetMachine *TM, const Function &F);
114
115 bool hasBranchDivergence(const Function *F = nullptr) const override;
116
119 OptimizationRemarkEmitter *ORE) const override;
120
122 TTI::PeelingPreferences &PP) const override;
123
124 TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override {
125 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
127 }
128
129 unsigned getNumberOfRegisters(unsigned RCID) const override;
132 unsigned getMinVectorRegisterBitWidth() const override;
133 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const override;
134 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
135 unsigned ChainSizeInBytes,
136 VectorType *VecTy) const override;
137 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
138 unsigned ChainSizeInBytes,
139 VectorType *VecTy) const override;
140 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const override;
141
142 bool isLegalToVectorizeMemChain(unsigned ChainSizeInBytes, Align Alignment,
143 unsigned AddrSpace) const;
144 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment,
145 unsigned AddrSpace) const override;
146 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
147 unsigned AddrSpace) const override;
148
151 LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
152 unsigned DestAddrSpace, Align SrcAlign, Align DestAlign,
153 std::optional<uint32_t> AtomicElementSize) const override;
154
156 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
157 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
158 Align SrcAlign, Align DestAlign,
159 std::optional<uint32_t> AtomicCpySize) const override;
160 unsigned getMaxInterleaveFactor(ElementCount VF) const override;
161
163 MemIntrinsicInfo &Info) const override;
164
166 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
170 const Instruction *CxtI = nullptr) const override;
171
173 const Instruction *I = nullptr) const override;
174
175 bool isInlineAsmSourceOfDivergence(const CallInst *CI,
176 ArrayRef<unsigned> Indices = {}) const;
177
179 InstructionCost getVectorInstrCost(unsigned Opcode, Type *ValTy,
181 unsigned Index, const Value *Op0,
182 const Value *Op1) const override;
183
184 bool isReadRegisterSourceOfDivergence(const IntrinsicInst *ReadReg) const;
185
186 bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override {
187 // Address space casts must cast between different address spaces.
188 if (FromAS == ToAS)
189 return false;
190
191 // Casts between any aliasing address spaces are valid.
192 return AMDGPU::addrspacesMayAlias(FromAS, ToAS);
193 }
194
195 bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override {
196 return AMDGPU::addrspacesMayAlias(AS0, AS1);
197 }
198
199 unsigned getFlatAddressSpace() const override {
200 // Don't bother running InferAddressSpaces pass on graphics shaders which
201 // don't use flat addressing.
202 if (IsGraphics)
203 return -1;
205 }
206
208 Intrinsic::ID IID) const override;
209
210 bool
215
217 Value *NewV) const override;
218
219 bool canSimplifyLegacyMulToMul(const Instruction &I, const Value *Op0,
220 const Value *Op1, InstCombiner &IC) const;
221
223 unsigned LaneAgIdx) const;
224
225 std::optional<Instruction *>
227
230 const APInt &DemandedElts,
231 APInt &UndefElts) const;
232
234 IntrinsicInst &II) const;
235
236 std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
237 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
238 APInt &UndefElts2, APInt &UndefElts3,
239 std::function<void(Instruction *, unsigned, APInt, APInt &)>
240 SimplifyAndSetOp) const override;
241
243
247 VectorType *SubTp, ArrayRef<const Value *> Args = {},
248 const Instruction *CxtI = nullptr) const override;
249
250 bool isProfitableToSinkOperands(Instruction *I,
251 SmallVectorImpl<Use *> &Ops) const override;
252
253 bool areInlineCompatible(const Function *Caller,
254 const Function *Callee) const override;
255
256 int getInliningLastCallToStaticBonus() const override;
257 unsigned getInliningThresholdMultiplier() const override { return 11; }
258 unsigned adjustInliningThreshold(const CallBase *CB) const override;
259 unsigned getCallerAllocaCost(const CallBase *CB,
260 const AllocaInst *AI) const override;
261
262 int getInlinerVectorBonusPercent() const override {
263 return InlinerVectorBonusPercent;
264 }
265
267 getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
268 std::optional<FastMathFlags> FMF,
269 TTI::TargetCostKind CostKind) const override;
270
273 TTI::TargetCostKind CostKind) const override;
276 TTI::TargetCostKind CostKind) const override;
277
278 /// Data cache line size for LoopDataPrefetch pass. Has no use before GFX12.
279 unsigned getCacheLineSize() const override { return 128; }
280
281 /// How much before a load we should place the prefetch instruction.
282 /// This is currently measured in number of IR instructions.
283 unsigned getPrefetchDistance() const override;
284
285 /// \return if target want to issue a prefetch in address space \p AS.
286 bool shouldPrefetchAddressSpace(unsigned AS) const override;
288 const Function &F,
289 SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const override;
290
291 enum class KnownIEEEMode { Unknown, On, Off };
292
293 /// Return KnownIEEEMode::On if we know if the use context can assume
294 /// "amdgpu-ieee"="true" and KnownIEEEMode::Off if we can assume
295 /// "amdgpu-ieee"="false".
297
298 /// Account for loads of i8 vector types to have reduced cost. For
299 /// example the cost of load 4 i8s values is one is the cost of loading
300 /// a single i32 value.
302 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
305 const Instruction *I = nullptr) const override;
306
307 /// When counting parts on AMD GPUs, account for i8s being grouped
308 /// together under a single i32 value. Otherwise fall back to base
309 /// implementation.
310 unsigned getNumberOfParts(Type *Tp) const override;
311
312 InstructionUniformity getInstructionUniformity(const Value *V) const override;
313};
314
315} // end namespace llvm
316
317#endif // LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETTRANSFORMINFO_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU address space definition.
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
Analysis containing CSE Info
Definition CSEInfo.cpp:27
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
uint64_t IntrinsicInst * II
uint64_t getMaxMemIntrinsicInlineSizeThreshold() const override
AMDGPUTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
Class for arbitrary precision integers.
Definition APInt.h:78
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
Container class for subtarget features.
bool simplifyDemandedLaneMaskArg(InstCombiner &IC, IntrinsicInst &II, unsigned LaneAgIdx) const
Simplify a lane index operand (e.g.
GCNTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
Account for loads of i8 vector types to have reduced cost.
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
void collectKernelLaunchBounds(const Function &F, SmallVectorImpl< std::pair< StringRef, int64_t > > &LB) const override
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override
Instruction * hoistLaneIntrinsicThroughOperand(InstCombiner &IC, IntrinsicInst &II) const
bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const override
int getInlinerVectorBonusPercent() const override
bool isInlineAsmSourceOfDivergence(const CallInst *CI, ArrayRef< unsigned > Indices={}) const
Analyze if the results of inline asm are divergent.
bool isReadRegisterSourceOfDivergence(const IntrinsicInst *ReadReg) const
unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const override
unsigned getNumberOfRegisters(unsigned RCID) const override
bool canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const override
bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const override
unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, unsigned ChainSizeInBytes, VectorType *VecTy) const override
bool isLegalToVectorizeMemChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
unsigned getCacheLineSize() const override
Data cache line size for LoopDataPrefetch pass. Has no use before GFX12.
bool shouldPrefetchAddressSpace(unsigned AS) const override
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override
bool hasBranchDivergence(const Function *F=nullptr) const override
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const override
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
void getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicCpySize) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
unsigned getInliningThresholdMultiplier() const override
unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const override
unsigned getPrefetchDistance() const override
How much before a load we should place the prefetch instruction.
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
KnownIEEEMode fpenvIEEEMode(const Instruction &I) const
Return KnownIEEEMode::On if we know if the use context can assume "amdgpu-ieee"="true" and KnownIEEEM...
unsigned adjustInliningThreshold(const CallBase *CB) const override
bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const override
Whether it is profitable to sink the operands of an Instruction I to the basic block of I.
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
int getInliningLastCallToStaticBonus() const override
unsigned getFlatAddressSpace() const override
InstructionCost getVectorSplitCost() const
InstructionCost getVectorInstrCost(unsigned Opcode, Type *ValTy, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
Value * simplifyAMDGCNLaneIntrinsicDemanded(InstCombiner &IC, IntrinsicInst &II, const APInt &DemandedElts, APInt &UndefElts) const
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const override
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override
unsigned getNumberOfParts(Type *Tp) const override
When counting parts on AMD GPUs, account for i8s being grouped together under a single i32 value.
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
bool canSimplifyLegacyMulToMul(const Instruction &I, const Value *Op0, const Value *Op1, InstCombiner &IC) const
unsigned getMinVectorRegisterBitWidth() const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind Vector) const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
Type * getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicElementSize) const override
uint64_t getMaxMemIntrinsicInlineSizeThreshold() const override
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
InstructionUniformity getInstructionUniformity(const Value *V) const override
The core instruction combiner logic.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
The optimization diagnostic interface.
The main scalar evolution driver.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TargetCostKind
The kind of cost model.
@ TCK_CodeSize
Instruction code size.
PopcntSupportKind
Flags indicating the kind of support for population count.
@ TCC_Basic
The cost of a typical 'add' instruction.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM Value Representation.
Definition Value.h:75
Base class of all SIMD vector types.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ FLAT_ADDRESS
Address space for flat memory.
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
static bool addrspacesMayAlias(unsigned AS1, unsigned AS2)
Definition AMDGPU.h:592
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
@ Length
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
ArrayRef(const T &OneElt) -> ArrayRef< T >
InstructionUniformity
Enum describing how instructions behave with respect to uniformity and divergence,...
Definition Uniformity.h:18
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Information about a load/store intrinsic defined by the target.
Parameters that control the generic loop unrolling transformation.