LLVM 23.0.0git
X86TargetTransformInfo.h
Go to the documentation of this file.
1//===-- X86TargetTransformInfo.h - X86 specific TTI -------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file a TargetTransformInfoImplBase conforming object specific to the
10/// X86 target machine. It uses the target's detailed information to
11/// provide more precise answers to certain TTI queries, while letting the
12/// target independent and default TTI implementations handle the rest.
13///
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H
17#define LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H
18
19#include "X86TargetMachine.h"
22#include <optional>
23
24namespace llvm {
25
26class InstCombiner;
27
28class X86TTIImpl final : public BasicTTIImplBase<X86TTIImpl> {
29 typedef BasicTTIImplBase<X86TTIImpl> BaseT;
30 typedef TargetTransformInfo TTI;
31 friend BaseT;
32
33 const X86Subtarget *ST;
34 const X86TargetLowering *TLI;
35
36 const X86Subtarget *getST() const { return ST; }
37 const X86TargetLowering *getTLI() const { return TLI; }
38
39 const FeatureBitset InlineFeatureIgnoreList = {
40 // This indicates the CPU is 64 bit capable not that we are in 64-bit
41 // mode.
42 X86::FeatureX86_64,
43
44 // These features don't have any intrinsics or ABI effect.
45 X86::FeatureNOPL,
46 X86::FeatureCX16,
47 X86::FeatureLAHFSAHF64,
48
49 // Some older targets can be setup to fold unaligned loads.
50 X86::FeatureSSEUnalignedMem,
51
52 // Codegen control options.
53 X86::TuningFast11ByteNOP,
54 X86::TuningFast15ByteNOP,
55 X86::TuningFastBEXTR,
56 X86::TuningFastHorizontalOps,
57 X86::TuningFastLZCNT,
58 X86::TuningFastScalarFSQRT,
59 X86::TuningFastSHLDRotate,
60 X86::TuningFastScalarShiftMasks,
61 X86::TuningFastVectorShiftMasks,
62 X86::TuningFastVariableCrossLaneShuffle,
63 X86::TuningFastVariablePerLaneShuffle,
64 X86::TuningFastVectorFSQRT,
65 X86::TuningLEAForSP,
66 X86::TuningLEAUsesAG,
67 X86::TuningLZCNTFalseDeps,
68 X86::TuningBranchFusion,
69 X86::TuningMacroFusion,
70 X86::TuningPadShortFunctions,
71 X86::TuningPOPCNTFalseDeps,
72 X86::TuningMULCFalseDeps,
73 X86::TuningPERMFalseDeps,
74 X86::TuningRANGEFalseDeps,
75 X86::TuningGETMANTFalseDeps,
76 X86::TuningMULLQFalseDeps,
77 X86::TuningSlow3OpsLEA,
78 X86::TuningSlowDivide32,
79 X86::TuningSlowDivide64,
80 X86::TuningSlowIncDec,
81 X86::TuningSlowLEA,
82 X86::TuningSlowPMADDWD,
83 X86::TuningSlowPMULLD,
84 X86::TuningSlowSHLD,
85 X86::TuningSlowTwoMemOps,
86 X86::TuningSlowUAMem16,
87 X86::TuningPreferMaskRegisters,
88 X86::TuningInsertVZEROUPPER,
89 X86::TuningUseSLMArithCosts,
90 X86::TuningUseGLMDivSqrtCosts,
91 X86::TuningNoDomainDelay,
92 X86::TuningNoDomainDelayMov,
93 X86::TuningNoDomainDelayShuffle,
94 X86::TuningNoDomainDelayBlend,
95 X86::TuningPreferShiftShuffle,
96 X86::TuningFastImmVectorShift,
97 X86::TuningFastDPWSSD,
98
99 // Perf-tuning flags.
100 X86::TuningFastGather,
101 X86::TuningSlowUAMem32,
102 X86::TuningAllowLight256Bit,
103
104 // Based on whether user set the -mprefer-vector-width command line.
105 X86::TuningPrefer128Bit,
106 X86::TuningPrefer256Bit,
107
108 // CPU name enums. These just follow CPU string.
109 X86::ProcIntelAtom
110 };
111
112public:
113 explicit X86TTIImpl(const X86TargetMachine *TM, const Function &F)
114 : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),
115 TLI(ST->getTargetLowering()) {}
116
117 /// \name Scalar TTI Implementations
118 /// @{
119 TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override;
120
121 /// @}
122
123 /// \name Cache TTI Implementation
124 /// @{
125 std::optional<unsigned> getCacheSize(
126 TargetTransformInfo::CacheLevel Level) const override;
127 std::optional<unsigned> getCacheAssociativity(
128 TargetTransformInfo::CacheLevel Level) const override;
129 /// @}
130
131 /// \name Vector TTI Implementations
132 /// @{
133
134 unsigned getNumberOfRegisters(unsigned ClassID) const override;
135 unsigned getRegisterClassForType(bool Vector, Type *Ty) const override;
136 bool hasConditionalLoadStoreForType(Type *Ty, bool IsStore) const override;
139 unsigned getLoadStoreVecRegBitWidth(unsigned AS) const override;
140 unsigned getMaxInterleaveFactor(ElementCount VF) const override;
142 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
146 const Instruction *CxtI = nullptr) const override;
147 InstructionCost getAltInstrCost(VectorType *VecTy, unsigned Opcode0,
148 unsigned Opcode1,
149 const SmallBitVector &OpcodeMask,
150 TTI::TargetCostKind CostKind) const override;
151
153 getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy,
154 ArrayRef<int> Mask, TTI::TargetCostKind CostKind, int Index,
155 VectorType *SubTp, ArrayRef<const Value *> Args = {},
156 const Instruction *CxtI = nullptr) const override;
158 getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
160 const Instruction *I = nullptr) const override;
162 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
166 const Instruction *I = nullptr) const override;
170 unsigned Index, const Value *Op0, const Value *Op1,
172 TTI::VectorInstrContext::None) const override;
174 getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts,
175 bool Insert, bool Extract,
177 bool ForPoisonSrc = true, ArrayRef<Value *> VL = {},
179 TTI::VectorInstrContext::None) const override;
181 getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF,
182 const APInt &DemandedDstElts,
183 TTI::TargetCostKind CostKind) const override;
185 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
188 const Instruction *I = nullptr) const override;
190 getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA,
191 TTI::TargetCostKind CostKind) const override;
192 InstructionCost getMaskedMemoryOpCost(const MemIntrinsicCostAttributes &MICA,
194 InstructionCost getGatherScatterOpCost(const MemIntrinsicCostAttributes &MICA,
198 const TTI::PointersChainInfo &Info, Type *AccessTy,
199 TTI::TargetCostKind CostKind) const override;
201 getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr,
202 TTI::TargetCostKind CostKind) const override;
203
204 std::optional<Instruction *>
205 instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override;
206 std::optional<Value *>
208 APInt DemandedMask, KnownBits &Known,
209 bool &KnownBitsComputed) const override;
210 std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
211 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
212 APInt &UndefElts2, APInt &UndefElts3,
213 std::function<void(Instruction *, unsigned, APInt, APInt &)>
214 SimplifyAndSetOp) const override;
215
216 unsigned getAtomicMemIntrinsicMaxElementSize() const override;
217
219 getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
220 TTI::TargetCostKind CostKind) const override;
221
223 getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
224 std::optional<FastMathFlags> FMF,
225 TTI::TargetCostKind CostKind) const override;
226
229 FastMathFlags FMF) const;
230
232 getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF,
233 TTI::TargetCostKind CostKind) const override;
234
236 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
237 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
238 bool UseMaskForCond = false, bool UseMaskForGaps = false) const override;
240 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
241 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
242 TTI::TargetCostKind CostKind, bool UseMaskForCond = false,
243 bool UseMaskForGaps = false) const;
244
245 InstructionCost getIntImmCost(int64_t) const;
246
247 InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
248 TTI::TargetCostKind CostKind) const override;
249
251 const Instruction *I = nullptr) const override;
252
253 InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
254 const APInt &Imm, Type *Ty,
256 Instruction *Inst = nullptr) const override;
258 getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
259 Type *Ty, TTI::TargetCostKind CostKind) const override;
260 /// Return the cost of the scaling factor used in the addressing
261 /// mode represented by AM for this target, for a load/store
262 /// of the specified type.
263 /// If the AM is supported, the return value must be >= 0.
264 /// If the AM is not supported, it returns an invalid cost.
265 InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
266 StackOffset BaseOffset, bool HasBaseReg,
267 int64_t Scale,
268 unsigned AddrSpace) const override;
269
270 bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
271 const TargetTransformInfo::LSRCost &C2) const override;
272 bool canMacroFuseCmp() const override;
273 bool
274 isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddressSpace,
275 TTI::MaskKind MaskKind =
277 bool
278 isLegalMaskedStore(Type *DataType, Align Alignment, unsigned AddressSpace,
279 TTI::MaskKind MaskKind =
281 bool isLegalNTLoad(Type *DataType, Align Alignment) const override;
282 bool isLegalNTStore(Type *DataType, Align Alignment) const override;
283 bool isLegalBroadcastLoad(Type *ElementTy,
284 ElementCount NumElements) const override;
285 bool forceScalarizeMaskedGather(VectorType *VTy,
286 Align Alignment) const override;
288 Align Alignment) const override {
289 return forceScalarizeMaskedGather(VTy, Alignment);
290 }
291 bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment) const;
292 bool isLegalMaskedGather(Type *DataType, Align Alignment) const override;
293 bool isLegalMaskedScatter(Type *DataType, Align Alignment) const override;
294 bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const override;
295 bool isLegalMaskedCompressStore(Type *DataType,
296 Align Alignment) const override;
297 bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
298 const SmallBitVector &OpcodeMask) const override;
299 bool hasDivRemOp(Type *DataType, bool IsSigned) const override;
300 bool isExpensiveToSpeculativelyExecute(const Instruction *I) const override;
301 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override;
302 bool areInlineCompatible(const Function *Caller,
303 const Function *Callee) const override;
304 bool areTypesABICompatible(const Function *Caller, const Function *Callee,
305 ArrayRef<Type *> Type) const override;
306
308 return ST->getMaxInlineSizeThreshold();
309 }
310
312 enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const override;
313 bool preferAlternateOpcodeVectorization() const override { return false; }
314 bool prefersVectorizedAddressing() const override;
315 bool supportsEfficientVectorElementLoadStore() const override;
316 bool enableInterleavedAccessVectorization() const override;
317
319
321 SmallVectorImpl<Use *> &Ops) const override;
322
323 bool isVectorShiftByScalarCheap(Type *Ty) const override;
324
325 unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
326 Type *ScalarValTy) const override;
327
328 bool useFastCCForInternalCall(Function &F) const override;
329
330private:
331 bool supportsGather() const;
332 InstructionCost getGSVectorCost(unsigned Opcode, TTI::TargetCostKind CostKind,
333 Type *DataTy, const Value *Ptr,
334 Align Alignment, unsigned AddressSpace) const;
335
336 int getGatherOverhead() const;
337 int getScatterOverhead() const;
338
339 /// @}
340};
341
342} // end namespace llvm
343
344#endif
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine InstCombiner
uint64_t IntrinsicInst * II
This pass exposes codegen information to IR-level passes.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
Container class for subtarget features.
The core instruction combiner logic.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
virtual const DataLayout & getDataLayout() const
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
VectorInstrContext
Represents a hint about the context in which an insert/extract is used.
@ None
The insert/extract is not used with a load/store.
MaskKind
Some targets only support masked load/store with a constant mask.
TargetCostKind
The kind of cost model.
PopcntSupportKind
Flags indicating the kind of support for population count.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
CastContextHint
Represents a hint about the context in which a cast is used.
CacheLevel
The possible cache levels.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM Value Representation.
Definition Value.h:75
Base class of all SIMD vector types.
bool useFastCCForInternalCall(Function &F) const override
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const override
bool isLegalNTLoad(Type *DataType, Align Alignment) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const override
unsigned getRegisterClassForType(bool Vector, Type *Ty) const override
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const override
InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override
Get memory intrinsic cost based on arguments.
bool preferAlternateOpcodeVectorization() const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
X86TTIImpl(const X86TargetMachine *TM, const Function &F)
bool isLegalNTStore(Type *DataType, Align Alignment) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
InstructionCost getInterleavedMemoryOpCostAVX512(unsigned Opcode, FixedVectorType *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const
bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
bool isVectorShiftByScalarCheap(Type *Ty) const override
bool isLegalMaskedGather(Type *DataType, Align Alignment) const override
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override
Return the cost of the scaling factor used in the addressing mode represented by AM for this target,...
unsigned getAtomicMemIntrinsicMaxElementSize() const override
InstructionCost getPointersChainCost(ArrayRef< const Value * > Ptrs, const Value *Base, const TTI::PointersChainInfo &Info, Type *AccessTy, TTI::TargetCostKind CostKind) const override
bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) const override
InstructionCost getBranchMispredictPenalty() const override
bool isExpensiveToSpeculativelyExecute(const Instruction *I) const override
bool hasConditionalLoadStoreForType(Type *Ty, bool IsStore) const override
bool isLegalMaskedStore(Type *DataType, Align Alignment, unsigned AddressSpace, TTI::MaskKind MaskKind=TTI::MaskKind::VariableOrConstantMask) const override
std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment) const
bool isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddressSpace, TTI::MaskKind MaskKind=TTI::MaskKind::VariableOrConstantMask) const override
bool enableInterleavedAccessVectorization() const override
unsigned getLoadStoreVecRegBitWidth(unsigned AS) const override
unsigned getNumberOfRegisters(unsigned ClassID) const override
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const override
uint64_t getMaxMemIntrinsicInlineSizeThreshold() const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
bool isLegalMaskedScatter(Type *DataType, Align Alignment) const override
bool hasDivRemOp(Type *DataType, bool IsSigned) const override
bool isLegalMaskedCompressStore(Type *DataType, Align Alignment) const override
InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const override
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override
bool supportsEfficientVectorElementLoadStore() const override
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr) const override
bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const override
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override
TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const override
InstructionCost getIntImmCost(int64_t) const
Calculate the cost of materializing a 64-bit value.
InstructionCost getMinMaxCost(Intrinsic::ID IID, Type *Ty, TTI::TargetCostKind CostKind, FastMathFlags FMF) const
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
bool canMacroFuseCmp() const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
InstructionCost getMaskedMemoryOpCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const
bool prefersVectorizedAddressing() const override
bool areTypesABICompatible(const Function *Caller, const Function *Callee, ArrayRef< Type * > Type) const override
InstructionCost getAltInstrCost(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind) const override
bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const override
InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
Estimate the overhead of scalarizing an instruction.
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getGatherScatterOpCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const
Calculate the cost of Gather / Scatter operation.
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2) const override
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
ArrayRef(const T &OneElt) -> ArrayRef< T >
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Returns options for expansion of memcmp. IsZeroCmp is.
Describe known properties for a set of pointers.