LLVM 23.0.0git
RISCVTargetTransformInfo.h
Go to the documentation of this file.
1//===- RISCVTargetTransformInfo.h - RISC-V specific TTI ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file defines a TargetTransformInfoImplBase conforming object specific
10/// to the RISC-V target machine. It uses the target's detailed information to
11/// provide more precise answers to certain TTI queries, while letting the
12/// target independent and default TTI implementations handle the rest.
13///
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
17#define LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
18
19#include "RISCVSubtarget.h"
20#include "RISCVTargetMachine.h"
23#include "llvm/IR/Function.h"
24#include <optional>
25
26namespace llvm {
27
28class RISCVTTIImpl final : public BasicTTIImplBase<RISCVTTIImpl> {
30 using TTI = TargetTransformInfo;
31
32 friend BaseT;
33
34 const RISCVSubtarget *ST;
35 const RISCVTargetLowering *TLI;
36
37 const RISCVSubtarget *getST() const { return ST; }
38 const RISCVTargetLowering *getTLI() const { return TLI; }
39
40 /// This function returns an estimate for VL to be used in VL based terms
41 /// of the cost model. For fixed length vectors, this is simply the
42 /// vector length. For scalable vectors, we return results consistent
43 /// with getVScaleForTuning under the assumption that clients are also
44 /// using that when comparing costs between scalar and vector representation.
45 /// This does unfortunately mean that we can both undershoot and overshot
46 /// the true cost significantly if getVScaleForTuning is wildly off for the
47 /// actual target hardware.
48 unsigned getEstimatedVLFor(VectorType *Ty) const;
49
50 /// This function calculates the costs for one or more RVV opcodes based
51 /// on the vtype and the cost kind.
52 /// \param Opcodes A list of opcodes of the RVV instruction to evaluate.
53 /// \param VT The MVT of vtype associated with the RVV instructions.
54 /// For widening/narrowing instructions where the result and source types
55 /// differ, it is important to check the spec to determine whether the vtype
56 /// refers to the result or source type.
57 /// \param CostKind The type of cost to compute.
58 InstructionCost getRISCVInstructionCost(ArrayRef<unsigned> OpCodes, MVT VT,
60
61 // Return the cost of generating a PC relative address
63 getStaticDataAddrGenerationCost(const TTI::TargetCostKind CostKind) const;
64
65 /// Return the cost of accessing a constant pool entry of the specified
66 /// type.
67 InstructionCost getConstantPoolLoadCost(Type *Ty,
69
70 /// If this shuffle can be lowered as a masked slide pair (at worst),
71 /// return a cost for it.
72 InstructionCost getSlideCost(FixedVectorType *Tp, ArrayRef<int> Mask,
74
75public:
76 explicit RISCVTTIImpl(const RISCVTargetMachine *TM, const Function &F)
77 : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),
78 TLI(ST->getTargetLowering()) {}
79
80 /// Return the cost of materializing an immediate for a value operand of
81 /// a store instruction.
84
86 TTI::TargetCostKind CostKind) const override;
87 InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
88 const APInt &Imm, Type *Ty,
90 Instruction *Inst = nullptr) const override;
92 getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
93 Type *Ty, TTI::TargetCostKind CostKind) const override;
94
95 /// \name EVL Support for predicated vectorization.
96 /// Whether the target supports the %evl parameter of VP intrinsic efficiently
97 /// in hardware. (see LLVM Language Reference - "Vector Predication
98 /// Intrinsics",
99 /// https://llvm.org/docs/LangRef.html#vector-predication-intrinsics and
100 /// "IR-level VP intrinsics",
101 /// https://llvm.org/docs/Proposals/VectorPredication.html#ir-level-vp-intrinsics).
102 bool hasActiveVectorLength() const override;
103
105 getPopcntSupport(unsigned TyWidth) const override;
106
108 unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType,
110 TTI::PartialReductionExtendKind OpBExtend, std::optional<unsigned> BinOp,
112 std::optional<FastMathFlags> FMF) const override;
113
114 bool shouldExpandReduction(const IntrinsicInst *II) const override;
115 bool supportsScalableVectors() const override {
116 return ST->hasVInstructions();
117 }
118 bool enableOrderedReductions() const override { return true; }
119 bool enableScalableVectorization() const override {
120 return ST->hasVInstructions();
121 }
123 return ST->hasVInstructions();
124 }
126 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow) const override {
127 return ST->hasVInstructions() ? TailFoldingStyle::DataWithEVL
129 }
130 std::optional<unsigned> getMaxVScale() const override;
131 std::optional<unsigned> getVScaleForTuning() const override;
132
135
136 unsigned getRegUsageForType(Type *Ty) const override;
137
138 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const override;
139
140 bool preferAlternateOpcodeVectorization() const override;
141
142 bool preferEpilogueVectorization() const override {
143 // Epilogue vectorization is usually unprofitable - tail folding or
144 // a smaller VF would have been better. This a blunt hammer - we
145 // should re-examine this once vectorization is better tuned.
146 return false;
147 }
148
149 bool shouldConsiderVectorizationRegPressure() const override { return true; }
150
153 TTI::TargetCostKind CostKind) const override;
154
157
160 const TTI::PointersChainInfo &Info, Type *AccessTy,
161 TTI::TargetCostKind CostKind) const override;
162
165 OptimizationRemarkEmitter *ORE) const override;
166
168 TTI::PeelingPreferences &PP) const override;
169
171 MemIntrinsicInfo &Info) const override;
172
173 unsigned getMinVectorRegisterBitWidth() const override {
174 return ST->useRVVForFixedLengthVectors() ? 16 : 0;
175 }
176
180 VectorType *SubTp, ArrayRef<const Value *> Args = {},
181 const Instruction *CxtI = nullptr) const override;
182
184 getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts,
185 bool Insert, bool Extract,
187 bool ForPoisonSrc = true, ArrayRef<Value *> VL = {},
189 TTI::VectorInstrContext::None) const override;
190
192 getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
193 TTI::TargetCostKind CostKind) const override;
194
196 getAddressComputationCost(Type *PTy, ScalarEvolution *SE, const SCEV *Ptr,
197 TTI::TargetCostKind CostKind) const override;
198
200 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
201 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
202 bool UseMaskForCond = false, bool UseMaskForGaps = false) const override;
203
204 InstructionCost getGatherScatterOpCost(const MemIntrinsicCostAttributes &MICA,
206
208 getExpandCompressMemoryOpCost(const MemIntrinsicCostAttributes &MICA,
210
211 InstructionCost getStridedMemoryOpCost(const MemIntrinsicCostAttributes &MICA,
213
216
218 getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
220 const Instruction *I = nullptr) const override;
221
223 getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF,
224 TTI::TargetCostKind CostKind) const override;
225
227 getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
228 std::optional<FastMathFlags> FMF,
229 TTI::TargetCostKind CostKind) const override;
230
232 getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy,
233 VectorType *ValTy, std::optional<FastMathFlags> FMF,
234 TTI::TargetCostKind CostKind) const override;
235
237 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
240 const Instruction *I = nullptr) const override;
241
243 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
247 const Instruction *I = nullptr) const override;
248
250 const Instruction *I = nullptr) const override;
251
255 unsigned Index, const Value *Op0, const Value *Op1,
257 TTI::VectorInstrContext::None) const override;
258
260 getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val,
262 unsigned Index) const override;
263
265 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
269 const Instruction *CxtI = nullptr) const override;
270
271 bool isElementTypeLegalForScalableVector(Type *Ty) const override {
272 return TLI->isLegalElementTypeForRVV(TLI->getValueType(DL, Ty));
273 }
274
275 bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) const {
276 if (!ST->hasVInstructions())
277 return false;
278
279 EVT DataTypeVT = TLI->getValueType(DL, DataType);
280
281 // Only support fixed vectors if we know the minimum vector size.
282 if (DataTypeVT.isFixedLengthVector() && !ST->useRVVForFixedLengthVectors())
283 return false;
284
285 EVT ElemType = DataTypeVT.getScalarType();
286 if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize())
287 return false;
288
289 return TLI->isLegalElementTypeForRVV(ElemType);
290 }
291
292 bool isLegalMaskedLoad(Type *DataType, Align Alignment,
293 unsigned /*AddressSpace*/,
294 TTI::MaskKind /*MaskKind*/) const override {
295 return isLegalMaskedLoadStore(DataType, Alignment);
296 }
297 bool isLegalMaskedStore(Type *DataType, Align Alignment,
298 unsigned /*AddressSpace*/,
299 TTI::MaskKind /*MaskKind*/) const override {
300 return isLegalMaskedLoadStore(DataType, Alignment);
301 }
302
303 bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment) const {
304 if (!ST->hasVInstructions())
305 return false;
306
307 EVT DataTypeVT = TLI->getValueType(DL, DataType);
308
309 // Only support fixed vectors if we know the minimum vector size.
310 if (DataTypeVT.isFixedLengthVector() && !ST->useRVVForFixedLengthVectors())
311 return false;
312
313 // We also need to check if the vector of address is valid.
314 EVT PointerTypeVT = EVT(TLI->getPointerTy(DL));
315 if (DataTypeVT.isScalableVector() &&
316 !TLI->isLegalElementTypeForRVV(PointerTypeVT))
317 return false;
318
319 EVT ElemType = DataTypeVT.getScalarType();
320 if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize())
321 return false;
322
323 return TLI->isLegalElementTypeForRVV(ElemType);
324 }
325
326 bool isLegalMaskedGather(Type *DataType, Align Alignment) const override {
327 return isLegalMaskedGatherScatter(DataType, Alignment);
328 }
329 bool isLegalMaskedScatter(Type *DataType, Align Alignment) const override {
330 return isLegalMaskedGatherScatter(DataType, Alignment);
331 }
332
334 Align Alignment) const override {
335 // Scalarize masked gather for RV64 if EEW=64 indices aren't supported.
336 return ST->is64Bit() && !ST->hasVInstructionsI64();
337 }
338
340 Align Alignment) const override {
341 // Scalarize masked scatter for RV64 if EEW=64 indices aren't supported.
342 return ST->is64Bit() && !ST->hasVInstructionsI64();
343 }
344
345 bool isLegalStridedLoadStore(Type *DataType, Align Alignment) const override {
346 EVT DataTypeVT = TLI->getValueType(DL, DataType);
347 return TLI->isLegalStridedLoadStore(DataTypeVT, Alignment);
348 }
349
350 bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor,
351 Align Alignment,
352 unsigned AddrSpace) const override {
353 return TLI->isLegalInterleavedAccessType(VTy, Factor, Alignment, AddrSpace,
354 DL);
355 }
356
357 bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const override;
358
359 bool isLegalMaskedCompressStore(Type *DataTy, Align Alignment) const override;
360
361 bool isVScaleKnownToBeAPowerOfTwo() const override {
362 return TLI->isVScaleKnownToBeAPowerOfTwo();
363 }
364
365 /// \returns How the target needs this vector-predicated operation to be
366 /// transformed.
368 getVPLegalizationStrategy(const VPIntrinsic &PI) const override {
370 if (!ST->hasVInstructions() ||
371 (PI.getIntrinsicID() == Intrinsic::vp_reduce_mul &&
373 ->getElementType()
374 ->getIntegerBitWidth() != 1))
377 }
378
380 ElementCount VF) const override {
381 if (!VF.isScalable())
382 return true;
383
384 Type *Ty = RdxDesc.getRecurrenceType();
385 if (!TLI->isLegalElementTypeForRVV(TLI->getValueType(DL, Ty)))
386 return false;
387
388 switch (RdxDesc.getRecurrenceKind()) {
389 case RecurKind::Add:
390 case RecurKind::Sub:
392 case RecurKind::And:
393 case RecurKind::Or:
394 case RecurKind::Xor:
395 case RecurKind::SMin:
396 case RecurKind::SMax:
397 case RecurKind::UMin:
398 case RecurKind::UMax:
399 case RecurKind::FMin:
400 case RecurKind::FMax:
401 return true;
402 case RecurKind::AnyOf:
403 case RecurKind::FAdd:
405 // We can't promote f16/bf16 fadd reductions and scalable vectors can't be
406 // expanded.
407 if (Ty->isBFloatTy() || (Ty->isHalfTy() && !ST->hasVInstructionsF16()))
408 return false;
409 return true;
410 default:
411 return false;
412 }
413 }
414
415 unsigned getMaxInterleaveFactor(ElementCount VF) const override {
416 // Don't interleave if the loop has been vectorized with scalable vectors.
417 if (VF.isScalable())
418 return 1;
419 // If the loop will not be vectorized, don't interleave the loop.
420 // Let regular unroll to unroll the loop.
421 return VF.isScalar() ? 1 : ST->getMaxInterleaveFactor();
422 }
423
424 bool enableInterleavedAccessVectorization() const override { return true; }
425
427 return ST->hasVInstructions();
428 }
429
430 unsigned getMinTripCountTailFoldingThreshold() const override;
431
433 unsigned getNumberOfRegisters(unsigned ClassID) const override {
434 switch (ClassID) {
436 // 31 = 32 GPR - x0 (zero register)
437 // FIXME: Should we exclude fixed registers like SP, TP or GP?
438 return 31;
440 if (ST->hasStdExtF())
441 return 32;
442 return 0;
444 // Although there are 32 vector registers, v0 is special in that it is the
445 // only register that can be used to hold a mask.
446 // FIXME: Should we conservatively return 31 as the number of usable
447 // vector registers?
448 return ST->hasVInstructions() ? 32 : 0;
449 }
450 llvm_unreachable("unknown register class");
451 }
452
454 getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const override;
455
457 Type *Ty = nullptr) const override {
458 if (Vector)
460 if (!Ty)
462
463 Type *ScalarTy = Ty->getScalarType();
464 if ((ScalarTy->isHalfTy() && ST->hasStdExtZfhmin()) ||
465 (ScalarTy->isFloatTy() && ST->hasStdExtF()) ||
466 (ScalarTy->isDoubleTy() && ST->hasStdExtD())) {
468 }
469
471 }
472
473 const char *getRegisterClassName(unsigned ClassID) const override {
474 switch (ClassID) {
476 return "RISCV::GPRRC";
478 return "RISCV::FPRRC";
480 return "RISCV::VRRC";
481 }
482 llvm_unreachable("unknown register class");
483 }
484
486 const TargetTransformInfo::LSRCost &C2) const override;
487
489 const Instruction &I,
490 bool &AllowPromotionWithoutCommonHeader) const override;
491 std::optional<unsigned> getMinPageSize() const override { return 4096; }
492 /// Return true if the (vector) instruction I will be lowered to an
493 /// instruction with a scalar splat operand for the given Operand number.
494 bool canSplatOperand(Instruction *I, int Operand) const;
495 /// Return true if a vector instruction will lower to a target instruction
496 /// able to splat the given operand.
497 bool canSplatOperand(unsigned Opcode, int Operand) const;
498
500 SmallVectorImpl<Use *> &Ops) const override;
501
503 enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const override;
504
505 bool enableSelectOptimize() const override {
506 return ST->enableSelectOptimize();
507 }
508
509 bool shouldTreatInstructionLikeSelect(const Instruction *I) const override;
510};
511
512} // end namespace llvm
513
514#endif // LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
TargetTransformInfo::VPLegalization VPLegalization
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
uint64_t IntrinsicInst * II
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition APInt.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
Value * getArgOperand(unsigned i) const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
Class to represent fixed width SIMD vectors.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Machine Value Type.
Information for memory intrinsic cost model.
The optimization diagnostic interface.
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
bool supportsScalableVectors() const override
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const override
InstructionCost getStridedMemoryOpCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const
bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) const
InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const override
unsigned getMinTripCountTailFoldingThreshold() const override
unsigned getRegisterClassForType(bool Vector, Type *Ty=nullptr) const override
TTI::AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const override
bool preferEpilogueVectorization() const override
InstructionCost getAddressComputationCost(Type *PTy, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const override
InstructionCost getStoreImmCost(Type *VecTy, TTI::OperandValueInfo OpInfo, TTI::TargetCostKind CostKind) const
Return the cost of materializing an immediate for a value operand of a store instruction.
bool isLegalMaskedStore(Type *DataType, Align Alignment, unsigned, TTI::MaskKind) const override
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const override
bool isElementTypeLegalForScalableVector(Type *Ty) const override
bool enableMaskedInterleavedAccessVectorization() const override
bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace) const override
std::optional< unsigned > getMinPageSize() const override
InstructionCost getCostOfKeepingLiveOverCall(ArrayRef< Type * > Tys) const override
bool enableSelectOptimize() const override
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override
bool hasActiveVectorLength() const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
bool shouldConsiderVectorizationRegPressure() const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) const override
const char * getRegisterClassName(unsigned ClassID) const override
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
bool canSplatOperand(Instruction *I, int Operand) const
Return true if the (vector) instruction I will be lowered to an instruction with a scalar splat opera...
bool enableInterleavedAccessVectorization() const override
bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment) const
bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2) const override
bool isLegalStridedLoadStore(Type *DataType, Align Alignment) const override
unsigned getRegUsageForType(Type *Ty) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow) const override
unsigned getMinVectorRegisterBitWidth() const override
bool isLegalMaskedScatter(Type *DataType, Align Alignment) const override
bool isLegalMaskedCompressStore(Type *DataTy, Align Alignment) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
InstructionCost getGatherScatterOpCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const
bool enableOrderedReductions() const override
InstructionCost getPartialReductionCost(unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType, ElementCount VF, TTI::PartialReductionExtendKind OpAExtend, TTI::PartialReductionExtendKind OpBExtend, std::optional< unsigned > BinOp, TTI::TargetCostKind CostKind, std::optional< FastMathFlags > FMF) const override
bool shouldTreatInstructionLikeSelect(const Instruction *I) const override
InstructionCost getExpandCompressMemoryOpCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const
RISCVTTIImpl(const RISCVTargetMachine *TM, const Function &F)
TargetTransformInfo::VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const override
bool preferAlternateOpcodeVectorization() const override
bool isVScaleKnownToBeAPowerOfTwo() const override
bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const override
Check if sinking I's operands to I's basic block is profitable, because the operands can be folded in...
unsigned getNumberOfRegisters(unsigned ClassID) const override
std::optional< unsigned > getMaxVScale() const override
bool shouldExpandReduction(const IntrinsicInst *II) const override
std::optional< unsigned > getVScaleForTuning() const override
InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override
Get memory intrinsic cost based on arguments.
bool isLegalMaskedGather(Type *DataType, Align Alignment) const override
bool isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned, TTI::MaskKind) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const override
InstructionCost getPointersChainCost(ArrayRef< const Value * > Ptrs, const Value *Base, const TTI::PointersChainInfo &Info, Type *AccessTy, TTI::TargetCostKind CostKind) const override
TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const override
InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
Estimate the overhead of scalarizing an instruction.
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpdInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc, ElementCount VF) const override
bool enableScalableVectorization() const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
InstructionCost getMaskedMemoryOpCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
bool shouldConsiderAddressTypePromotion(const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const override
See if I should be considered for address type promotion.
InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const override
bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) const override
TargetTransformInfo::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
Type * getRecurrenceType() const
Returns the type of the recurrence.
RecurKind getRecurrenceKind() const
The main scalar evolution driver.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
virtual const DataLayout & getDataLayout() const
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
VectorInstrContext
Represents a hint about the context in which an insert/extract is used.
@ None
The insert/extract is not used with a load/store.
MaskKind
Some targets only support masked load/store with a constant mask.
TargetCostKind
The kind of cost model.
PopcntSupportKind
Flags indicating the kind of support for population count.
AddressingModeKind
Which addressing mode Loop Strength Reduction will try to generate.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
CastContextHint
Represents a hint about the context in which a cast is used.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition Type.h:153
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
Definition Type.h:142
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition Type.h:156
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
Base class of all SIMD vector types.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ AnyOf
AnyOf reduction with select(cmp(),x,y) where one of (x,y) is loop invariant, and both x and y are int...
@ Xor
Bitwise or logical XOR of integers.
@ FMax
FP max implemented in terms of select(cmp()).
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ And
Bitwise or logical AND of integers.
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ FMin
FP min implemented in terms of select(cmp()).
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ AddChainWithSubs
A chain of adds and subs.
@ FAdd
Sum of floats.
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
@ None
Don't use tail folding.
@ DataWithEVL
Use predicated EVL instructions for tail-folding.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition ValueTypes.h:395
bool isFixedLengthVector() const
Definition ValueTypes.h:181
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition ValueTypes.h:323
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
Definition ValueTypes.h:174
Information about a load/store intrinsic defined by the target.
Returns options for expansion of memcmp. IsZeroCmp is.
Describe known properties for a set of pointers.
Parameters that control the generic loop unrolling transformation.