LLVM 22.0.0git
HexagonTargetTransformInfo.cpp
Go to the documentation of this file.
1//===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7/// \file
8/// This file implements a TargetTransformInfo analysis pass specific to the
9/// Hexagon target machine. It uses the target's detailed information to provide
10/// more precise answers to certain TTI queries, while letting the target
11/// independent and default TTI implementations handle the rest.
12///
13//===----------------------------------------------------------------------===//
14
16#include "HexagonSubtarget.h"
19#include "llvm/IR/InstrTypes.h"
21#include "llvm/IR/User.h"
26
27using namespace llvm;
28
29#define DEBUG_TYPE "hexagontti"
30
31static cl::opt<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
32 cl::Hidden, cl::desc("Enable loop vectorizer for HVX"));
33
35 "hexagon-allow-scatter-gather-hvx", cl::init(false), cl::Hidden,
36 cl::desc("Allow auto-generation of HVX scatter-gather"));
37
39 "force-hvx-float", cl::Hidden,
40 cl::desc("Enable auto-vectorization of floatint point types on v68."));
41
42static cl::opt<bool> EmitLookupTables("hexagon-emit-lookup-tables",
43 cl::init(true), cl::Hidden,
44 cl::desc("Control lookup table emission on Hexagon target"));
45
46static cl::opt<bool> HexagonMaskedVMem("hexagon-masked-vmem", cl::init(true),
47 cl::Hidden, cl::desc("Enable masked loads/stores for HVX"));
48
49// Constant "cost factor" to make floating point operations more expensive
50// in terms of vectorization cost. This isn't the best way, but it should
51// do. Ultimately, the cost should use cycles.
52static const unsigned FloatFactor = 4;
53
54bool HexagonTTIImpl::useHVX() const {
55 return ST.useHVXOps() && HexagonAutoHVX;
56}
57
58bool HexagonTTIImpl::isHVXVectorType(Type *Ty) const {
59 auto *VecTy = dyn_cast<VectorType>(Ty);
60 if (!VecTy)
61 return false;
62 if (!ST.isTypeForHVX(VecTy))
63 return false;
64 if (ST.useHVXV69Ops() || !VecTy->getElementType()->isFloatingPointTy())
65 return true;
66 return ST.useHVXV68Ops() && EnableV68FloatAutoHVX;
67}
68
69unsigned HexagonTTIImpl::getTypeNumElements(Type *Ty) const {
70 if (auto *VTy = dyn_cast<FixedVectorType>(Ty))
71 return VTy->getNumElements();
72 assert((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&
73 "Expecting scalar type");
74 return 1;
75}
76
78HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit) const {
79 // Return fast hardware support as every input < 64 bits will be promoted
80 // to 64 bits.
82}
83
84// The Hexagon target can unroll loops with run-time trip counts.
90
92 TTI::PeelingPreferences &PP) const {
94 // Only try to peel innermost loops with small runtime trip counts.
95 if (L && L->isInnermost() && canPeel(L) &&
96 SE.getSmallConstantTripCount(L) == 0 &&
99 PP.PeelCount = 2;
100 }
101}
102
108
109/// --- Vector TTI begin ---
110
111unsigned HexagonTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
112 bool Vector = ClassID == 1;
113 if (Vector)
114 return useHVX() ? 32 : 0;
115 return 32;
116}
117
119 return useHVX() ? 2 : 1;
120}
121
135
137 return useHVX() ? ST.getVectorLength()*8 : 32;
138}
139
141 bool IsScalable) const {
142 assert(!IsScalable && "Scalable VFs are not supported for Hexagon");
143 return ElementCount::getFixed((8 * ST.getVectorLength()) / ElemWidth);
144}
145
151
155 if (ICA.getID() == Intrinsic::bswap) {
156 std::pair<InstructionCost, MVT> LT =
158 return LT.first + 2;
159 }
161}
162
165 const SCEV *S,
167 return 0;
168}
169
171 Align Alignment,
172 unsigned AddressSpace,
175 const Instruction *I) const {
176 assert(Opcode == Instruction::Load || Opcode == Instruction::Store);
177 // TODO: Handle other cost kinds.
179 return 1;
180
181 if (Opcode == Instruction::Store)
182 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
183 CostKind, OpInfo, I);
184
185 if (Src->isVectorTy()) {
186 VectorType *VecTy = cast<VectorType>(Src);
187 unsigned VecWidth = VecTy->getPrimitiveSizeInBits().getFixedValue();
188 if (isHVXVectorType(VecTy)) {
189 unsigned RegWidth =
191 .getFixedValue();
192 assert(RegWidth && "Non-zero vector register width expected");
193 // Cost of HVX loads.
194 if (VecWidth % RegWidth == 0)
195 return VecWidth / RegWidth;
196 // Cost of constructing HVX vector from scalar loads
197 const Align RegAlign(RegWidth / 8);
198 if (Alignment > RegAlign)
199 Alignment = RegAlign;
200 unsigned AlignWidth = 8 * Alignment.value();
201 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
202 return 3 * NumLoads;
203 }
204
205 // Non-HVX vectors.
206 // Add extra cost for floating point types.
207 unsigned Cost =
209
210 // At this point unspecified alignment is considered as Align(1).
211 const Align BoundAlignment = std::min(Alignment, Align(8));
212 unsigned AlignWidth = 8 * BoundAlignment.value();
213 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
214 if (Alignment == Align(4) || Alignment == Align(8))
215 return Cost * NumLoads;
216 // Loads of less than 32 bits will need extra inserts to compose a vector.
217 assert(BoundAlignment <= Align(8));
218 unsigned LogA = Log2(BoundAlignment);
219 return (3 - LogA) * Cost * NumLoads;
220 }
221
222 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind,
223 OpInfo, I);
224}
225
231
234 VectorType *SrcTy, ArrayRef<int> Mask,
235 TTI::TargetCostKind CostKind, int Index,
237 const Instruction *CxtI) const {
238 return 1;
239}
240
242 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
243 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const {
244 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
245 Alignment, CostKind, I);
246}
247
249 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
250 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
251 bool UseMaskForCond, bool UseMaskForGaps) const {
252 if (Indices.size() != Factor || UseMaskForCond || UseMaskForGaps)
253 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
254 Alignment, AddressSpace,
255 CostKind,
256 UseMaskForCond, UseMaskForGaps);
257 return getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace, CostKind);
258}
259
261 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
263 TTI::OperandValueInfo Op2Info, const Instruction *I) const {
264 if (ValTy->isVectorTy() && CostKind == TTI::TCK_RecipThroughput) {
265 if (!isHVXVectorType(ValTy) && ValTy->isFPOrFPVectorTy())
267 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
268 if (Opcode == Instruction::FCmp)
269 return LT.first + FloatFactor * getTypeNumElements(ValTy);
270 }
271 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
272 Op1Info, Op2Info, I);
273}
274
276 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
278 ArrayRef<const Value *> Args, const Instruction *CxtI) const {
279 // TODO: Handle more cost kinds.
281 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
282 Op2Info, Args, CxtI);
283
284 if (Ty->isVectorTy()) {
285 if (!isHVXVectorType(Ty) && Ty->isFPOrFPVectorTy())
287 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
288 if (LT.second.isFloatingPoint())
289 return LT.first + FloatFactor * getTypeNumElements(Ty);
290 }
291 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
292 Args, CxtI);
293}
294
296 Type *SrcTy,
299 const Instruction *I) const {
300 auto isNonHVXFP = [this] (Type *Ty) {
301 return Ty->isVectorTy() && !isHVXVectorType(Ty) && Ty->isFPOrFPVectorTy();
302 };
303 if (isNonHVXFP(SrcTy) || isNonHVXFP(DstTy))
305
306 if (SrcTy->isFPOrFPVectorTy() || DstTy->isFPOrFPVectorTy()) {
307 unsigned SrcN = SrcTy->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy) : 0;
308 unsigned DstN = DstTy->isFPOrFPVectorTy() ? getTypeNumElements(DstTy) : 0;
309
310 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(SrcTy);
311 std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(DstTy);
313 std::max(SrcLT.first, DstLT.first) + FloatFactor * (SrcN + DstN);
314 // TODO: Allow non-throughput costs that aren't binary.
316 return Cost == 0 ? 0 : 1;
317 return Cost;
318 }
319 return 1;
320}
321
324 unsigned Index,
325 const Value *Op0,
326 const Value *Op1) const {
327 Type *ElemTy = Val->isVectorTy() ? cast<VectorType>(Val)->getElementType()
328 : Val;
329 if (Opcode == Instruction::InsertElement) {
330 // Need two rotations for non-zero index.
331 unsigned Cost = (Index != 0) ? 2 : 0;
332 if (ElemTy->isIntegerTy(32))
333 return Cost;
334 // If it's not a 32-bit value, there will need to be an extract.
335 return Cost + getVectorInstrCost(Instruction::ExtractElement, Val, CostKind,
336 Index, Op0, Op1);
337 }
338
339 if (Opcode == Instruction::ExtractElement)
340 return 2;
341
342 return 1;
343}
344
345bool HexagonTTIImpl::isLegalMaskedStore(Type *DataType, Align /*Alignment*/,
346 unsigned /*AddressSpace*/,
347 TTI::MaskKind /*MaskKind*/) const {
348 // This function is called from scalarize-masked-mem-intrin, which runs
349 // in pre-isel. Use ST directly instead of calling isHVXVectorType.
350 return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
351}
352
353bool HexagonTTIImpl::isLegalMaskedLoad(Type *DataType, Align /*Alignment*/,
354 unsigned /*AddressSpace*/,
355 TTI::MaskKind /*MaskKind*/) const {
356 // This function is called from scalarize-masked-mem-intrin, which runs
357 // in pre-isel. Use ST directly instead of calling isHVXVectorType.
358 return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
359}
360
362 // For now assume we can not deal with all HVX datatypes.
363 if (!Ty->isVectorTy() || !ST.isTypeForHVX(Ty) ||
365 return false;
366 // This must be in sync with HexagonVectorCombine pass.
367 switch (Ty->getScalarSizeInBits()) {
368 case 8:
369 return (getTypeNumElements(Ty) == 128);
370 case 16:
371 if (getTypeNumElements(Ty) == 64 || getTypeNumElements(Ty) == 32)
372 return (Alignment >= 2);
373 break;
374 case 32:
375 if (getTypeNumElements(Ty) == 32)
376 return (Alignment >= 4);
377 break;
378 default:
379 break;
380 }
381 return false;
382}
383
385 if (!Ty->isVectorTy() || !ST.isTypeForHVX(Ty) ||
387 return false;
388 // This must be in sync with HexagonVectorCombine pass.
389 switch (Ty->getScalarSizeInBits()) {
390 case 8:
391 return (getTypeNumElements(Ty) == 128);
392 case 16:
393 if (getTypeNumElements(Ty) == 64)
394 return (Alignment >= 2);
395 break;
396 case 32:
397 if (getTypeNumElements(Ty) == 32)
398 return (Alignment >= 4);
399 break;
400 default:
401 break;
402 }
403 return false;
404}
405
407 Align Alignment) const {
408 return !isLegalMaskedGather(VTy, Alignment);
409}
410
412 Align Alignment) const {
413 return !isLegalMaskedScatter(VTy, Alignment);
414}
415
416/// --- Vector TTI end ---
417
419 return ST.getL1PrefetchDistance();
420}
421
423 return ST.getL1CacheLineSize();
424}
425
430 auto isCastFoldedIntoLoad = [this](const CastInst *CI) -> bool {
431 if (!CI->isIntegerCast())
432 return false;
433 // Only extensions from an integer type shorter than 32-bit to i32
434 // can be folded into the load.
435 const DataLayout &DL = getDataLayout();
436 unsigned SBW = DL.getTypeSizeInBits(CI->getSrcTy());
437 unsigned DBW = DL.getTypeSizeInBits(CI->getDestTy());
438 if (DBW != 32 || SBW >= DBW)
439 return false;
440
441 const LoadInst *LI = dyn_cast<const LoadInst>(CI->getOperand(0));
442 // Technically, this code could allow multiple uses of the load, and
443 // check if all the uses are the same extension operation, but this
444 // should be sufficient for most cases.
445 return LI && LI->hasOneUse();
446 };
447
448 if (const CastInst *CI = dyn_cast<const CastInst>(U))
449 if (isCastFoldedIntoLoad(CI))
451 return BaseT::getInstructionCost(U, Operands, CostKind);
452}
453
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static const unsigned FloatFactor
static cl::opt< bool > EnableV68FloatAutoHVX("force-hvx-float", cl::Hidden, cl::desc("Enable auto-vectorization of floatint point types on v68."))
cl::opt< bool > HexagonAllowScatterGatherHVX("hexagon-allow-scatter-gather-hvx", cl::init(false), cl::Hidden, cl::desc("Allow auto-generation of HVX scatter-gather"))
static cl::opt< bool > EmitLookupTables("hexagon-emit-lookup-tables", cl::init(true), cl::Hidden, cl::desc("Control lookup table emission on Hexagon target"))
static cl::opt< bool > HexagonMaskedVMem("hexagon-masked-vmem", cl::init(true), cl::Hidden, cl::desc("Enable masked loads/stores for HVX"))
static cl::opt< bool > HexagonAutoHVX("hexagon-autohvx", cl::init(false), cl::Hidden, cl::desc("Enable loop vectorizer for HVX"))
This file implements a TargetTransformInfo analysis pass specific to the Hexagon target machine.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This pass exposes codegen information to IR-level passes.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
InstructionCost getMaskedMemoryOpCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
This is the base class for all instructions that perform data casts.
Definition InstrTypes.h:448
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) const override
bool isLegalMaskedStore(Type *DataType, Align Alignment, unsigned AddressSpace, TTI::MaskKind MaskKind) const override
ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
unsigned getNumberOfRegisters(unsigned ClassID) const override
— Vector TTI begin —
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *S, TTI::TargetCostKind CostKind) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const override
unsigned getMinVectorRegisterBitWidth() const override
bool isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddressSpace, TTI::MaskKind MaskKind) const override
bool isLegalMaskedGather(Type *Ty, Align Alignment) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Compute a cost of the given call instruction.
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
InstructionCost getMaskedMemoryOpCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
TTI::AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const override
Bias LSR towards creating post-increment opportunities.
bool shouldBuildLookupTables() const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
bool isLegalMaskedScatter(Type *Ty, Align Alignment) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind) const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
unsigned getCacheLineSize() const override
unsigned getPrefetchDistance() const override
— Vector TTI end —
static InstructionCost getMax()
An instruction for reading from memory.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Information for memory intrinsic cost model.
The optimization diagnostic interface.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
LLVM_ABI unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns the upper bound of the loop trip count as a normal unsigned value.
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
virtual const DataLayout & getDataLayout() const
virtual InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind) const
MaskKind
Some targets only support masked load/store with a constant mask.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
PopcntSupportKind
Flags indicating the kind of support for population count.
@ TCC_Free
Expected to fold away in lowering.
AddressingModeKind
Which addressing mode Loop Strength Reduction will try to generate.
@ AMK_PostIndexed
Prefer post-indexed addressing mode.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
CastContextHint
Represents a hint about the context in which a cast is used.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
Definition TypeSize.h:346
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
LLVM Value Representation.
Definition Value.h:75
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
Base class of all SIMD vector types.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
InstructionCost Cost
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool canPeel(const Loop *L)
Definition LoopPeel.cpp:95
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition Alignment.h:197
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
unsigned PeelCount
A forced peeling factor (the number of bodied of the original loop that should be peeled off before t...
Parameters that control the generic loop unrolling transformation.
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...