LLVM 17.0.0git
AMDGPUTargetTransformInfo.h
Go to the documentation of this file.
1//===- AMDGPUTargetTransformInfo.h - AMDGPU specific TTI --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file a TargetTransformInfo::Concept conforming object specific to the
11/// AMDGPU target machine. It uses the target's detailed information to
12/// provide more precise answers to certain TTI queries, while letting the
13/// target independent and default TTI implementations handle the rest.
14//
15//===----------------------------------------------------------------------===//
16
17#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETTRANSFORMINFO_H
18#define LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETTRANSFORMINFO_H
19
20#include "AMDGPU.h"
22#include <optional>
23
24namespace llvm {
25
26class AMDGPUTargetMachine;
27class GCNSubtarget;
28class InstCombiner;
29class Loop;
30class ScalarEvolution;
31class SITargetLowering;
32class Type;
33class Value;
34
35class AMDGPUTTIImpl final : public BasicTTIImplBase<AMDGPUTTIImpl> {
38
39 friend BaseT;
40
41 Triple TargetTriple;
42
43 const TargetSubtargetInfo *ST;
44 const TargetLoweringBase *TLI;
45
46 const TargetSubtargetInfo *getST() const { return ST; }
47 const TargetLoweringBase *getTLI() const { return TLI; }
48
49public:
50 explicit AMDGPUTTIImpl(const AMDGPUTargetMachine *TM, const Function &F);
51
55
58};
59
60class GCNTTIImpl final : public BasicTTIImplBase<GCNTTIImpl> {
63
64 friend BaseT;
65
66 const GCNSubtarget *ST;
67 const SITargetLowering *TLI;
68 AMDGPUTTIImpl CommonTTI;
69 bool IsGraphics;
70 bool HasFP32Denormals;
71 bool HasFP64FP16Denormals;
72
73 static const FeatureBitset InlineFeatureIgnoreList;
74
75 const GCNSubtarget *getST() const { return ST; }
76 const SITargetLowering *getTLI() const { return TLI; }
77
78 static inline int getFullRateInstrCost() {
80 }
81
82 static inline int getHalfRateInstrCost(TTI::TargetCostKind CostKind) {
83 return CostKind == TTI::TCK_CodeSize ? 2
85 }
86
87 // TODO: The size is usually 8 bytes, but takes 4x as many cycles. Maybe
88 // should be 2 or 4.
89 static inline int getQuarterRateInstrCost(TTI::TargetCostKind CostKind) {
90 return CostKind == TTI::TCK_CodeSize ? 2
92 }
93
94 // On some parts, normal fp64 operations are half rate, and others
95 // quarter. This also applies to some integer operations.
96 int get64BitInstrCost(TTI::TargetCostKind CostKind) const;
97
98 std::pair<InstructionCost, MVT> getTypeLegalizationCost(Type *Ty) const;
99
100public:
101 explicit GCNTTIImpl(const AMDGPUTargetMachine *TM, const Function &F);
102
103 bool hasBranchDivergence() { return true; }
104 bool useGPUDivergenceAnalysis() const;
105
109
112
114 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
116 }
117
118 unsigned getNumberOfRegisters(unsigned RCID) const;
120 unsigned getMinVectorRegisterBitWidth() const;
121 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const;
122 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
123 unsigned ChainSizeInBytes,
124 VectorType *VecTy) const;
125 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
126 unsigned ChainSizeInBytes,
127 VectorType *VecTy) const;
128 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const;
129
130 bool isLegalToVectorizeMemChain(unsigned ChainSizeInBytes, Align Alignment,
131 unsigned AddrSpace) const;
132 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment,
133 unsigned AddrSpace) const;
134 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
135 unsigned AddrSpace) const;
137 LLVMContext & Context, Value * Length, unsigned SrcAddrSpace,
138 unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign,
139 std::optional<uint32_t> AtomicElementSize) const;
140
142 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
143 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
144 unsigned SrcAlign, unsigned DestAlign,
145 std::optional<uint32_t> AtomicCpySize) const;
147
149
151 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
153 TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None},
154 ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
155 const Instruction *CxtI = nullptr);
156
157 InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
158 const Instruction *I = nullptr);
159
160 bool isInlineAsmSourceOfDivergence(const CallInst *CI,
161 ArrayRef<unsigned> Indices = {}) const;
162
164 InstructionCost getVectorInstrCost(unsigned Opcode, Type *ValTy,
166 unsigned Index, Value *Op0, Value *Op1);
167
168 bool isReadRegisterSourceOfDivergence(const IntrinsicInst *ReadReg) const;
169 bool isSourceOfDivergence(const Value *V) const;
170 bool isAlwaysUniform(const Value *V) const;
171
172 unsigned getFlatAddressSpace() const {
173 // Don't bother running InferAddressSpaces pass on graphics shaders which
174 // don't use flat addressing.
175 if (IsGraphics)
176 return -1;
178 }
179
181 Intrinsic::ID IID) const;
182
186 }
187
189 Value *NewV) const;
190
191 bool canSimplifyLegacyMulToMul(const Value *Op0, const Value *Op1,
192 InstCombiner &IC) const;
193 std::optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
194 IntrinsicInst &II) const;
195 std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
196 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
197 APInt &UndefElts2, APInt &UndefElts3,
198 std::function<void(Instruction *, unsigned, APInt, APInt &)>
199 SimplifyAndSetOp) const;
200
202
204 ArrayRef<int> Mask,
206 VectorType *SubTp,
207 ArrayRef<const Value *> Args = std::nullopt);
208
209 bool areInlineCompatible(const Function *Caller,
210 const Function *Callee) const;
211
212 unsigned getInliningThresholdMultiplier() { return 11; }
213 unsigned adjustInliningThreshold(const CallBase *CB) const;
214
216
218 unsigned Opcode, VectorType *Ty, std::optional<FastMathFlags> FMF,
220
224 VectorType *Ty, VectorType *CondTy, bool IsUnsigned,
226};
227
228} // end namespace llvm
229
230#endif // LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETTRANSFORMINFO_H
amdgpu Simplify well known AMD library false FunctionCallee Callee
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
RelocType Type
Definition: COFFYAML.cpp:390
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Machine InstCombiner
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
Class for arbitrary precision integers.
Definition: APInt.h:75
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Base class which can be used to help build a TTI implementation.
Definition: BasicTTIImpl.h:79
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1186
Container class for subtarget features.
InstructionCost getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy, bool IsUnsigned, TTI::TargetCostKind CostKind)
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind Vector) const
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
InstructionCost getVectorInstrCost(unsigned Opcode, Type *ValTy, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
bool isAlwaysUniform(const Value *V) const
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args=std::nullopt)
bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
bool isInlineAsmSourceOfDivergence(const CallInst *CI, ArrayRef< unsigned > Indices={}) const
Analyze if the results of inline asm are divergent.
bool isReadRegisterSourceOfDivergence(const IntrinsicInst *ReadReg) const
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth)
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
unsigned getNumberOfRegisters(unsigned RCID) const
bool canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const
bool isLegalToVectorizeMemChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
unsigned getMaxInterleaveFactor(ElementCount VF)
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
unsigned getFlatAddressSpace() const
unsigned getInliningThresholdMultiplier()
InstructionCost getVectorSplitCost()
unsigned getMinVectorRegisterBitWidth() const
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const
unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const
void getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign, std::optional< uint32_t > AtomicCpySize) const
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=ArrayRef< const Value * >(), const Instruction *CxtI=nullptr)
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
unsigned adjustInliningThreshold(const CallBase *CB) const
bool areInlineCompatible(const Function *Caller, const Function *Callee) const
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const
bool isSourceOfDivergence(const Value *V) const
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
bool canSimplifyLegacyMulToMul(const Value *Op0, const Value *Op1, InstCombiner &IC) const
Type * getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign, std::optional< uint32_t > AtomicElementSize) const
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
The core instruction combiner logic.
Definition: InstCombiner.h:45
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:547
The optimization diagnostic interface.
The main scalar evolution driver.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TargetCostKind
The kind of cost model.
@ TCK_CodeSize
Instruction code size.
PopcntSupportKind
Flags indicating the kind of support for population count.
@ TCC_Basic
The cost of a typical 'add' instruction.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
Base class of all SIMD vector types.
Definition: DerivedTypes.h:389
@ REGION_ADDRESS
Address space for region memory. (GDS)
Definition: AMDGPU.h:378
@ LOCAL_ADDRESS
Address space for local memory.
Definition: AMDGPU.h:381
@ FLAT_ADDRESS
Address space for flat memory.
Definition: AMDGPU.h:376
@ PRIVATE_ADDRESS
Address space for private memory.
Definition: AMDGPU.h:382
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Length
Definition: DWP.cpp:406
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:292
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Information about a load/store intrinsic defined by the target.
Parameters that control the generic loop unrolling transformation.