LLVM 19.0.0git
RISCVTargetTransformInfo.h
Go to the documentation of this file.
1//===- RISCVTargetTransformInfo.h - RISC-V specific TTI ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file defines a TargetTransformInfo::Concept conforming object specific
10/// to the RISC-V target machine. It uses the target's detailed information to
11/// provide more precise answers to certain TTI queries, while letting the
12/// target independent and default TTI implementations handle the rest.
13///
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
17#define LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
18
19#include "RISCVSubtarget.h"
20#include "RISCVTargetMachine.h"
24#include "llvm/IR/Function.h"
25#include <optional>
26
27namespace llvm {
28
29class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
32
33 friend BaseT;
34
35 const RISCVSubtarget *ST;
36 const RISCVTargetLowering *TLI;
37
38 const RISCVSubtarget *getST() const { return ST; }
39 const RISCVTargetLowering *getTLI() const { return TLI; }
40
41 /// This function returns an estimate for VL to be used in VL based terms
42 /// of the cost model. For fixed length vectors, this is simply the
43 /// vector length. For scalable vectors, we return results consistent
44 /// with getVScaleForTuning under the assumption that clients are also
45 /// using that when comparing costs between scalar and vector representation.
46 /// This does unfortunately mean that we can both undershoot and overshot
47 /// the true cost significantly if getVScaleForTuning is wildly off for the
48 /// actual target hardware.
49 unsigned getEstimatedVLFor(VectorType *Ty);
50
51 InstructionCost getRISCVInstructionCost(ArrayRef<unsigned> OpCodes, MVT VT,
53
54 /// Return the cost of accessing a constant pool entry of the specified
55 /// type.
56 InstructionCost getConstantPoolLoadCost(Type *Ty,
58public:
59 explicit RISCVTTIImpl(const RISCVTargetMachine *TM, const Function &F)
60 : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
61 TLI(ST->getTargetLowering()) {}
62
63 bool areInlineCompatible(const Function *Caller,
64 const Function *Callee) const;
65
66 /// Return the cost of materializing an immediate for a value operand of
67 /// a store instruction.
70
73 InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
74 const APInt &Imm, Type *Ty,
76 Instruction *Inst = nullptr);
78 const APInt &Imm, Type *Ty,
80
82
83 bool shouldExpandReduction(const IntrinsicInst *II) const;
84 bool supportsScalableVectors() const { return ST->hasVInstructions(); }
85 bool enableOrderedReductions() const { return true; }
86 bool enableScalableVectorization() const { return ST->hasVInstructions(); }
88 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow) const {
89 return ST->hasVInstructions() ? TailFoldingStyle::Data
91 }
92 std::optional<unsigned> getMaxVScale() const;
93 std::optional<unsigned> getVScaleForTuning() const;
94
96
97 unsigned getRegUsageForType(Type *Ty);
98
99 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const;
100
102 // Epilogue vectorization is usually unprofitable - tail folding or
103 // a smaller VF would have been better. This a blunt hammer - we
104 // should re-examine this once vectorization is better tuned.
105 return false;
106 }
107
108 InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
109 Align Alignment, unsigned AddressSpace,
111
113 const Value *Base,
115 Type *AccessTy,
117
121
124
126 return ST->useRVVForFixedLengthVectors() ? 16 : 0;
127 }
128
130 ArrayRef<int> Mask,
132 VectorType *SubTp,
133 ArrayRef<const Value *> Args = std::nullopt);
134
137
139 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
140 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
141 bool UseMaskForCond = false, bool UseMaskForGaps = false);
142
143 InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
144 const Value *Ptr, bool VariableMask,
145 Align Alignment,
147 const Instruction *I);
148
149 InstructionCost getStridedMemoryOpCost(unsigned Opcode, Type *DataTy,
150 const Value *Ptr, bool VariableMask,
151 Align Alignment,
153 const Instruction *I);
154
155 InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
158 const Instruction *I = nullptr);
159
161 FastMathFlags FMF,
163
165 std::optional<FastMathFlags> FMF,
167
168 InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned,
169 Type *ResTy, VectorType *ValTy,
170 FastMathFlags FMF,
172
174 getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
177 const Instruction *I = nullptr);
178
179 InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
180 CmpInst::Predicate VecPred,
182 const Instruction *I = nullptr);
183
184 InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
185 const Instruction *I = nullptr);
186
188 InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
190 unsigned Index, Value *Op0, Value *Op1);
191
192 InstructionCost getArithmeticInstrCost(
193 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
194 TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None},
195 TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None},
196 ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
197 const Instruction *CxtI = nullptr);
198
200 return TLI->isLegalElementTypeForRVV(TLI->getValueType(DL, Ty));
201 }
202
203 bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) {
204 if (!ST->hasVInstructions())
205 return false;
206
207 EVT DataTypeVT = TLI->getValueType(DL, DataType);
208
209 // Only support fixed vectors if we know the minimum vector size.
210 if (DataTypeVT.isFixedLengthVector() && !ST->useRVVForFixedLengthVectors())
211 return false;
212
213 EVT ElemType = DataTypeVT.getScalarType();
214 if (!ST->hasFastUnalignedAccess() && Alignment < ElemType.getStoreSize())
215 return false;
216
217 return TLI->isLegalElementTypeForRVV(ElemType);
218
219 }
220
221 bool isLegalMaskedLoad(Type *DataType, Align Alignment) {
222 return isLegalMaskedLoadStore(DataType, Alignment);
223 }
224 bool isLegalMaskedStore(Type *DataType, Align Alignment) {
225 return isLegalMaskedLoadStore(DataType, Alignment);
226 }
227
228 bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment) {
229 if (!ST->hasVInstructions())
230 return false;
231
232 EVT DataTypeVT = TLI->getValueType(DL, DataType);
233
234 // Only support fixed vectors if we know the minimum vector size.
235 if (DataTypeVT.isFixedLengthVector() && !ST->useRVVForFixedLengthVectors())
236 return false;
237
238 EVT ElemType = DataTypeVT.getScalarType();
239 if (!ST->hasFastUnalignedAccess() && Alignment < ElemType.getStoreSize())
240 return false;
241
242 return TLI->isLegalElementTypeForRVV(ElemType);
243 }
244
245 bool isLegalMaskedGather(Type *DataType, Align Alignment) {
246 return isLegalMaskedGatherScatter(DataType, Alignment);
247 }
248 bool isLegalMaskedScatter(Type *DataType, Align Alignment) {
249 return isLegalMaskedGatherScatter(DataType, Alignment);
250 }
251
253 // Scalarize masked gather for RV64 if EEW=64 indices aren't supported.
254 return ST->is64Bit() && !ST->hasVInstructionsI64();
255 }
256
258 // Scalarize masked scatter for RV64 if EEW=64 indices aren't supported.
259 return ST->is64Bit() && !ST->hasVInstructionsI64();
260 }
261
262 bool isLegalStridedLoadStore(Type *DataType, Align Alignment) {
263 EVT DataTypeVT = TLI->getValueType(DL, DataType);
264 return TLI->isLegalStridedLoadStore(DataTypeVT, Alignment);
265 }
266
267 bool isLegalMaskedCompressStore(Type *DataTy, Align Alignment);
268
270 return TLI->isVScaleKnownToBeAPowerOfTwo();
271 }
272
273 /// \returns How the target needs this vector-predicated operation to be
274 /// transformed.
278 if (!ST->hasVInstructions() ||
279 (PI.getIntrinsicID() == Intrinsic::vp_reduce_mul &&
280 cast<VectorType>(PI.getArgOperand(1)->getType())
281 ->getElementType()
282 ->getIntegerBitWidth() != 1))
285 }
286
288 ElementCount VF) const {
289 if (!VF.isScalable())
290 return true;
291
292 Type *Ty = RdxDesc.getRecurrenceType();
293 if (!TLI->isLegalElementTypeForRVV(TLI->getValueType(DL, Ty)))
294 return false;
295
296 switch (RdxDesc.getRecurrenceKind()) {
297 case RecurKind::Add:
298 case RecurKind::FAdd:
299 case RecurKind::And:
300 case RecurKind::Or:
301 case RecurKind::Xor:
302 case RecurKind::SMin:
303 case RecurKind::SMax:
304 case RecurKind::UMin:
305 case RecurKind::UMax:
306 case RecurKind::FMin:
307 case RecurKind::FMax:
311 return true;
312 default:
313 return false;
314 }
315 }
316
318 // Don't interleave if the loop has been vectorized with scalable vectors.
319 if (VF.isScalable())
320 return 1;
321 // If the loop will not be vectorized, don't interleave the loop.
322 // Let regular unroll to unroll the loop.
323 return VF.isScalar() ? 1 : ST->getMaxInterleaveFactor();
324 }
325
327
329 unsigned getNumberOfRegisters(unsigned ClassID) const {
330 switch (ClassID) {
332 // 31 = 32 GPR - x0 (zero register)
333 // FIXME: Should we exclude fixed registers like SP, TP or GP?
334 return 31;
336 if (ST->hasStdExtF())
337 return 32;
338 return 0;
340 // Although there are 32 vector registers, v0 is special in that it is the
341 // only register that can be used to hold a mask.
342 // FIXME: Should we conservatively return 31 as the number of usable
343 // vector registers?
344 return ST->hasVInstructions() ? 32 : 0;
345 }
346 llvm_unreachable("unknown register class");
347 }
348
349 unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const {
350 if (Vector)
352 if (!Ty)
354
355 Type *ScalarTy = Ty->getScalarType();
356 if ((ScalarTy->isHalfTy() && ST->hasStdExtZfhmin()) ||
357 (ScalarTy->isFloatTy() && ST->hasStdExtF()) ||
358 (ScalarTy->isDoubleTy() && ST->hasStdExtD())) {
360 }
361
363 }
364
365 const char *getRegisterClassName(unsigned ClassID) const {
366 switch (ClassID) {
368 return "RISCV::GPRRC";
370 return "RISCV::FPRRC";
372 return "RISCV::VRRC";
373 }
374 llvm_unreachable("unknown register class");
375 }
376
379
381 return true;
382 }
383};
384
385} // end namespace llvm
386
387#endif // LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
static const Function * getParent(const Value *V)
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
TargetTransformInfo::VPLegalization VPLegalization
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
const char LLVMTargetMachineRef TM
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition: APInt.h:76
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Base class which can be used to help build a TTI implementation.
Definition: BasicTTIImpl.h:80
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1654
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:960
constexpr bool isScalar() const
Exactly one element.
Definition: TypeSize.h:307
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:44
Machine Value Type.
The optimization diagnostic interface.
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
bool shouldFoldTerminatingConditionAfterLSR() const
const char * getRegisterClassName(unsigned ClassID) const
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I)
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args=std::nullopt)
bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2)
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment)
InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow) const
TargetTransformInfo::VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const
bool isLegalMaskedStore(Type *DataType, Align Alignment)
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
unsigned getNumberOfRegisters(unsigned ClassID) const
bool isElementTypeLegalForScalableVector(Type *Ty) const
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=ArrayRef< const Value * >(), const Instruction *CxtI=nullptr)
unsigned getMaxInterleaveFactor(ElementCount VF)
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
bool isLegalMaskedLoadStore(Type *DataType, Align Alignment)
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr)
bool enableScalableVectorization() const
bool preferEpilogueVectorization() const
bool areInlineCompatible(const Function *Caller, const Function *Callee) const
bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment)
bool isVScaleKnownToBeAPowerOfTwo() const
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
bool isLegalMaskedLoad(Type *DataType, Align Alignment)
InstructionCost getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I)
bool supportsScalableVectors() const
std::optional< unsigned > getVScaleForTuning() const
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy, FastMathFlags FMF, TTI::TargetCostKind CostKind)
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
std::optional< unsigned > getMaxVScale() const
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
RISCVTTIImpl(const RISCVTargetMachine *TM, const Function &F)
InstructionCost getPointersChainCost(ArrayRef< const Value * > Ptrs, const Value *Base, const TTI::PointersChainInfo &Info, Type *AccessTy, TTI::TargetCostKind CostKind)
TargetTransformInfo::PopcntSupportKind getPopcntSupport(unsigned TyWidth)
bool shouldExpandReduction(const IntrinsicInst *II) const
InstructionCost getStoreImmCost(Type *VecTy, TTI::OperandValueInfo OpInfo, TTI::TargetCostKind CostKind)
Return the cost of materializing an immediate for a value operand of a store instruction.
bool isLegalMaskedCompressStore(Type *DataTy, Align Alignment)
bool isLegalStridedLoadStore(Type *DataType, Align Alignment)
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
unsigned getRegUsageForType(Type *Ty)
bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment)
bool isLegalMaskedGather(Type *DataType, Align Alignment)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpdInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
bool enableOrderedReductions() const
bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc, ElementCount VF) const
bool isLegalMaskedScatter(Type *DataType, Align Alignment)
unsigned getRegisterClassForType(bool Vector, Type *Ty=nullptr) const
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
unsigned getMinVectorRegisterBitWidth() const
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
bool isLegalElementTypeForRVV(EVT ScalarTy) const
bool isVScaleKnownToBeAPowerOfTwo() const override
Return true only if vscale must be a power of two.
bool isLegalStridedLoadStore(EVT DataType, Align Alignment) const
Return true if a stride load store of the given result type and alignment is legal.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
Definition: IVDescriptors.h:71
Type * getRecurrenceType() const
Returns the type of the recurrence.
RecurKind getRecurrenceKind() const
The main scalar evolution driver.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
const DataLayout & getDataLayout() const
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TargetCostKind
The kind of cost model.
PopcntSupportKind
Flags indicating the kind of support for population count.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
CastContextHint
Represents a hint about the context in which a cast is used.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition: Type.h:154
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
Definition: Type.h:143
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition: Type.h:157
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
Base class of all SIMD vector types.
Definition: DerivedTypes.h:403
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
AddressSpace
Definition: NVPTXBaseInfo.h:21
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ FAnyOf
Any_of reduction with select(fcmp(),x,y) where one of (x,y) is loop invariant, and both x and y are i...
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMax
FP max implemented in terms of select(cmp()).
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ And
Bitwise or logical AND of integers.
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ FMin
FP min implemented in terms of select(cmp()).
@ Add
Sum of integers.
@ FAdd
Sum of floats.
@ IAnyOf
Any_of reduction with select(icmp(),x,y) where one of (x,y) is loop invariant, and both x and y are i...
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
@ Data
Use predicate only to mask operations on data in the loop.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:34
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition: ValueTypes.h:380
bool isFixedLengthVector() const
Definition: ValueTypes.h:177
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition: ValueTypes.h:313
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Describe known properties for a set of pointers.
Parameters that control the generic loop unrolling transformation.