LLVM 18.0.0git
HexagonTargetTransformInfo.cpp
Go to the documentation of this file.
1//===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7/// \file
8/// This file implements a TargetTransformInfo analysis pass specific to the
9/// Hexagon target machine. It uses the target's detailed information to provide
10/// more precise answers to certain TTI queries, while letting the target
11/// independent and default TTI implementations handle the rest.
12///
13//===----------------------------------------------------------------------===//
14
16#include "HexagonSubtarget.h"
19#include "llvm/IR/InstrTypes.h"
21#include "llvm/IR/User.h"
26
27using namespace llvm;
28
29#define DEBUG_TYPE "hexagontti"
30
31static cl::opt<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
32 cl::Hidden, cl::desc("Enable loop vectorizer for HVX"));
33
35 "force-hvx-float", cl::Hidden,
36 cl::desc("Enable auto-vectorization of floatint point types on v68."));
37
38static cl::opt<bool> EmitLookupTables("hexagon-emit-lookup-tables",
39 cl::init(true), cl::Hidden,
40 cl::desc("Control lookup table emission on Hexagon target"));
41
42static cl::opt<bool> HexagonMaskedVMem("hexagon-masked-vmem", cl::init(true),
43 cl::Hidden, cl::desc("Enable masked loads/stores for HVX"));
44
45// Constant "cost factor" to make floating point operations more expensive
46// in terms of vectorization cost. This isn't the best way, but it should
47// do. Ultimately, the cost should use cycles.
48static const unsigned FloatFactor = 4;
49
50bool HexagonTTIImpl::useHVX() const {
51 return ST.useHVXOps() && HexagonAutoHVX;
52}
53
54bool HexagonTTIImpl::isHVXVectorType(Type *Ty) const {
55 auto *VecTy = dyn_cast<VectorType>(Ty);
56 if (!VecTy)
57 return false;
58 if (!ST.isTypeForHVX(VecTy))
59 return false;
60 if (ST.useHVXV69Ops() || !VecTy->getElementType()->isFloatingPointTy())
61 return true;
63}
64
65unsigned HexagonTTIImpl::getTypeNumElements(Type *Ty) const {
66 if (auto *VTy = dyn_cast<FixedVectorType>(Ty))
67 return VTy->getNumElements();
68 assert((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&
69 "Expecting scalar type");
70 return 1;
71}
72
74HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit) const {
75 // Return fast hardware support as every input < 64 bits will be promoted
76 // to 64 bits.
78}
79
80// The Hexagon target can unroll loops with run-time trip counts.
84 UP.Runtime = UP.Partial = true;
85}
86
90 // Only try to peel innermost loops with small runtime trip counts.
91 if (L && L->isInnermost() && canPeel(L) &&
92 SE.getSmallConstantTripCount(L) == 0 &&
95 PP.PeelCount = 2;
96 }
97}
98
101 ScalarEvolution *SE) const {
103}
104
105/// --- Vector TTI begin ---
106
108 if (Vector)
109 return useHVX() ? 32 : 0;
110 return 32;
111}
112
114 return useHVX() ? 2 : 1;
115}
116
119 switch (K) {
121 return TypeSize::getFixed(32);
125 return TypeSize::getScalable(0);
126 }
127
128 llvm_unreachable("Unsupported register kind");
129}
130
132 return useHVX() ? ST.getVectorLength()*8 : 32;
133}
134
136 bool IsScalable) const {
137 assert(!IsScalable && "Scalable VFs are not supported for Hexagon");
138 return ElementCount::getFixed((8 * ST.getVectorLength()) / ElemWidth);
139}
140
142 VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
144 return BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
145 CostKind);
146}
147
153}
154
159}
160
164 if (ICA.getID() == Intrinsic::bswap) {
165 std::pair<InstructionCost, MVT> LT =
167 return LT.first + 2;
168 }
170}
171
173 ScalarEvolution *SE,
174 const SCEV *S) {
175 return 0;
176}
177
179 MaybeAlign Alignment,
180 unsigned AddressSpace,
183 const Instruction *I) {
184 assert(Opcode == Instruction::Load || Opcode == Instruction::Store);
185 // TODO: Handle other cost kinds.
187 return 1;
188
189 if (Opcode == Instruction::Store)
190 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
191 CostKind, OpInfo, I);
192
193 if (Src->isVectorTy()) {
194 VectorType *VecTy = cast<VectorType>(Src);
195 unsigned VecWidth = VecTy->getPrimitiveSizeInBits().getFixedValue();
196 if (isHVXVectorType(VecTy)) {
197 unsigned RegWidth =
199 .getFixedValue();
200 assert(RegWidth && "Non-zero vector register width expected");
201 // Cost of HVX loads.
202 if (VecWidth % RegWidth == 0)
203 return VecWidth / RegWidth;
204 // Cost of constructing HVX vector from scalar loads
205 const Align RegAlign(RegWidth / 8);
206 if (!Alignment || *Alignment > RegAlign)
207 Alignment = RegAlign;
208 assert(Alignment);
209 unsigned AlignWidth = 8 * Alignment->value();
210 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
211 return 3 * NumLoads;
212 }
213
214 // Non-HVX vectors.
215 // Add extra cost for floating point types.
216 unsigned Cost =
218
219 // At this point unspecified alignment is considered as Align(1).
220 const Align BoundAlignment = std::min(Alignment.valueOrOne(), Align(8));
221 unsigned AlignWidth = 8 * BoundAlignment.value();
222 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
223 if (Alignment == Align(4) || Alignment == Align(8))
224 return Cost * NumLoads;
225 // Loads of less than 32 bits will need extra inserts to compose a vector.
226 assert(BoundAlignment <= Align(8));
227 unsigned LogA = Log2(BoundAlignment);
228 return (3 - LogA) * Cost * NumLoads;
229 }
230
231 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind,
232 OpInfo, I);
233}
234
237 Align Alignment, unsigned AddressSpace,
239 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
240 CostKind);
241}
242
244 ArrayRef<int> Mask,
246 int Index, Type *SubTp,
248 return 1;
249}
250
252 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
253 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
254 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
255 Alignment, CostKind, I);
256}
257
259 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
260 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
261 bool UseMaskForCond, bool UseMaskForGaps) {
262 if (Indices.size() != Factor || UseMaskForCond || UseMaskForGaps)
263 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
264 Alignment, AddressSpace,
265 CostKind,
266 UseMaskForCond, UseMaskForGaps);
267 return getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment), AddressSpace,
268 CostKind);
269}
270
272 Type *CondTy,
273 CmpInst::Predicate VecPred,
275 const Instruction *I) {
276 if (ValTy->isVectorTy() && CostKind == TTI::TCK_RecipThroughput) {
277 if (!isHVXVectorType(ValTy) && ValTy->isFPOrFPVectorTy())
279 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
280 if (Opcode == Instruction::FCmp)
281 return LT.first + FloatFactor * getTypeNumElements(ValTy);
282 }
283 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
284}
285
287 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
290 const Instruction *CxtI) {
291 // TODO: Handle more cost kinds.
293 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
294 Op2Info, Args, CxtI);
295
296 if (Ty->isVectorTy()) {
297 if (!isHVXVectorType(Ty) && Ty->isFPOrFPVectorTy())
299 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
300 if (LT.second.isFloatingPoint())
301 return LT.first + FloatFactor * getTypeNumElements(Ty);
302 }
303 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
304 Args, CxtI);
305}
306
308 Type *SrcTy,
311 const Instruction *I) {
312 auto isNonHVXFP = [this] (Type *Ty) {
313 return Ty->isVectorTy() && !isHVXVectorType(Ty) && Ty->isFPOrFPVectorTy();
314 };
315 if (isNonHVXFP(SrcTy) || isNonHVXFP(DstTy))
317
318 if (SrcTy->isFPOrFPVectorTy() || DstTy->isFPOrFPVectorTy()) {
319 unsigned SrcN = SrcTy->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy) : 0;
320 unsigned DstN = DstTy->isFPOrFPVectorTy() ? getTypeNumElements(DstTy) : 0;
321
322 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(SrcTy);
323 std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(DstTy);
325 std::max(SrcLT.first, DstLT.first) + FloatFactor * (SrcN + DstN);
326 // TODO: Allow non-throughput costs that aren't binary.
328 return Cost == 0 ? 0 : 1;
329 return Cost;
330 }
331 return 1;
332}
333
336 unsigned Index, Value *Op0,
337 Value *Op1) {
338 Type *ElemTy = Val->isVectorTy() ? cast<VectorType>(Val)->getElementType()
339 : Val;
340 if (Opcode == Instruction::InsertElement) {
341 // Need two rotations for non-zero index.
342 unsigned Cost = (Index != 0) ? 2 : 0;
343 if (ElemTy->isIntegerTy(32))
344 return Cost;
345 // If it's not a 32-bit value, there will need to be an extract.
346 return Cost + getVectorInstrCost(Instruction::ExtractElement, Val, CostKind,
347 Index, Op0, Op1);
348 }
349
350 if (Opcode == Instruction::ExtractElement)
351 return 2;
352
353 return 1;
354}
355
356bool HexagonTTIImpl::isLegalMaskedStore(Type *DataType, Align /*Alignment*/) {
357 // This function is called from scalarize-masked-mem-intrin, which runs
358 // in pre-isel. Use ST directly instead of calling isHVXVectorType.
359 return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
360}
361
362bool HexagonTTIImpl::isLegalMaskedLoad(Type *DataType, Align /*Alignment*/) {
363 // This function is called from scalarize-masked-mem-intrin, which runs
364 // in pre-isel. Use ST directly instead of calling isHVXVectorType.
365 return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
366}
367
368/// --- Vector TTI end ---
369
371 return ST.getL1PrefetchDistance();
372}
373
375 return ST.getL1CacheLineSize();
376}
377
382 auto isCastFoldedIntoLoad = [this](const CastInst *CI) -> bool {
383 if (!CI->isIntegerCast())
384 return false;
385 // Only extensions from an integer type shorter than 32-bit to i32
386 // can be folded into the load.
387 const DataLayout &DL = getDataLayout();
388 unsigned SBW = DL.getTypeSizeInBits(CI->getSrcTy());
389 unsigned DBW = DL.getTypeSizeInBits(CI->getDestTy());
390 if (DBW != 32 || SBW >= DBW)
391 return false;
392
393 const LoadInst *LI = dyn_cast<const LoadInst>(CI->getOperand(0));
394 // Technically, this code could allow multiple uses of the load, and
395 // check if all the uses are the same extension operation, but this
396 // should be sufficient for most cases.
397 return LI && LI->hasOneUse();
398 };
399
400 if (const CastInst *CI = dyn_cast<const CastInst>(U))
401 if (isCastFoldedIntoLoad(CI))
404}
405
407 return EmitLookupTables;
408}
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
return RetTy
static const unsigned FloatFactor
static cl::opt< bool > EnableV68FloatAutoHVX("force-hvx-float", cl::Hidden, cl::desc("Enable auto-vectorization of floatint point types on v68."))
static cl::opt< bool > EmitLookupTables("hexagon-emit-lookup-tables", cl::init(true), cl::Hidden, cl::desc("Control lookup table emission on Hexagon target"))
static cl::opt< bool > HexagonMaskedVMem("hexagon-masked-vmem", cl::init(true), cl::Hidden, cl::desc("Enable masked loads/stores for HVX"))
static cl::opt< bool > HexagonAutoHVX("hexagon-autohvx", cl::init(false), cl::Hidden, cl::desc("Enable loop vectorizer for HVX"))
This file implements a TargetTransformInfo analysis pass specific to the Hexagon target machine.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition: APInt.h:76
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on arguments.
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=ArrayRef< const Value * >(), const Instruction *CxtI=nullptr)
Definition: BasicTTIImpl.h:856
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getOperandsScalarizationOverhead(ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing an instructions unique non-constant operands.
Definition: BasicTTIImpl.h:773
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing an instruction.
Definition: BasicTTIImpl.h:727
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
Definition: BasicTTIImpl.h:619
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Compute a cost of the given call instruction.
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
Definition: BasicTTIImpl.h:820
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:428
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:711
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:672
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:291
unsigned getL1PrefetchDistance() const
unsigned getVectorLength() const
unsigned getL1CacheLineSize() const
bool isTypeForHVX(Type *VecTy, bool IncludeBool=false) const
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind)
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=ArrayRef< const Value * >(), const Instruction *CxtI=nullptr)
bool isLegalMaskedLoad(Type *DataType, Align Alignment)
InstructionCost getAddressComputationCost(Type *Tp, ScalarEvolution *SE, const SCEV *S)
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I)
unsigned getNumberOfRegisters(bool vector) const
— Vector TTI begin —
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
bool isLegalMaskedStore(Type *DataType, Align Alignment)
TTI::AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const
Bias LSR towards creating post-increment opportunities.
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, Type *SubTp, ArrayRef< const Value * > Args=std::nullopt)
unsigned getMinVectorRegisterBitWidth() const
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind)
ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
unsigned getCacheLineSize() const override
unsigned getPrefetchDistance() const override
— Vector TTI end —
InstructionCost getOperandsScalarizationOverhead(ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const
unsigned getMaxInterleaveFactor(ElementCount VF)
static InstructionCost getMax()
An instruction for reading from memory.
Definition: Instructions.h:177
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:47
The optimization diagnostic interface.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
unsigned getSmallConstantMaxTripCount(const Loop *L)
Returns the upper bound of the loop trip count as a normal unsigned value.
unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
const DataLayout & getDataLayout() const
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind)
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
PopcntSupportKind
Flags indicating the kind of support for population count.
@ TCC_Free
Expected to fold away in lowering.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
CastContextHint
Represents a hint about the context in which a cast is used.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:322
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
Definition: TypeSize.h:325
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:216
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM Value Representation.
Definition: Value.h:74
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
Base class of all SIMD vector types.
Definition: DerivedTypes.h:400
Type * getElementType() const
Definition: DerivedTypes.h:433
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:182
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
AddressSpace
Definition: NVPTXBaseInfo.h:21
bool canPeel(const Loop *L)
Definition: LoopPeel.cpp:83
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition: Alignment.h:208
InstructionCost Cost
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
unsigned PeelCount
A forced peeling factor (the number of bodied of the original loop that should be peeled off before t...
Parameters that control the generic loop unrolling transformation.
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...