LLVM 22.0.0git
ARMTargetTransformInfo.h
Go to the documentation of this file.
1//===- ARMTargetTransformInfo.h - ARM specific TTI --------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file a TargetTransformInfoImplBase conforming object specific to the
11/// ARM target machine. It uses the target's detailed information to
12/// provide more precise answers to certain TTI queries, while letting the
13/// target independent and default TTI implementations handle the rest.
14//
15//===----------------------------------------------------------------------===//
16
17#ifndef LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H
18#define LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H
19
20#include "ARM.h"
21#include "ARMSubtarget.h"
22#include "ARMTargetMachine.h"
23#include "llvm/ADT/ArrayRef.h"
26#include "llvm/IR/Constant.h"
27#include "llvm/IR/Function.h"
29#include <optional>
30
31namespace llvm {
32
33class APInt;
35class Instruction;
36class Loop;
37class SCEV;
38class ScalarEvolution;
39class Type;
40class Value;
41
51
52// For controlling conversion of memcpy into Tail Predicated loop.
53namespace TPLoop {
55}
56
57class ARMTTIImpl final : public BasicTTIImplBase<ARMTTIImpl> {
58 using BaseT = BasicTTIImplBase<ARMTTIImpl>;
59 using TTI = TargetTransformInfo;
60
61 friend BaseT;
62
63 const ARMSubtarget *ST;
64 const ARMTargetLowering *TLI;
65
66 // Currently the following features are excluded from InlineFeaturesAllowed.
67 // ModeThumb, FeatureNoARM, ModeSoftFloat, FeatureFP64, FeatureD32
68 // Depending on whether they are set or unset, different
69 // instructions/registers are available. For example, inlining a callee with
70 // -thumb-mode in a caller with +thumb-mode, may cause the assembler to
71 // fail if the callee uses ARM only instructions, e.g. in inline asm.
72 const FeatureBitset InlineFeaturesAllowed = {
73 ARM::FeatureVFP2, ARM::FeatureVFP3, ARM::FeatureNEON, ARM::FeatureThumb2,
74 ARM::FeatureFP16, ARM::FeatureVFP4, ARM::FeatureFPARMv8,
75 ARM::FeatureFullFP16, ARM::FeatureFP16FML, ARM::FeatureHWDivThumb,
76 ARM::FeatureHWDivARM, ARM::FeatureDB, ARM::FeatureV7Clrex,
77 ARM::FeatureAcquireRelease, ARM::FeatureSlowFPBrcc,
78 ARM::FeaturePerfMon, ARM::FeatureTrustZone, ARM::Feature8MSecExt,
79 ARM::FeatureCrypto, ARM::FeatureCRC, ARM::FeatureRAS,
80 ARM::FeatureFPAO, ARM::FeatureFuseAES, ARM::FeatureZCZeroing,
81 ARM::FeatureProfUnpredicate, ARM::FeatureSlowVGETLNi32,
82 ARM::FeatureSlowVDUP32, ARM::FeaturePreferVMOVSR,
83 ARM::FeaturePrefISHSTBarrier, ARM::FeatureMuxedUnits,
84 ARM::FeatureSlowOddRegister, ARM::FeatureSlowLoadDSubreg,
85 ARM::FeatureDontWidenVMOVS, ARM::FeatureExpandMLx,
86 ARM::FeatureHasVMLxHazards, ARM::FeatureNEONForFPMovs,
87 ARM::FeatureNEONForFP, ARM::FeatureCheckVLDnAlign,
88 ARM::FeatureHasSlowFPVMLx, ARM::FeatureHasSlowFPVFMx,
89 ARM::FeatureVMLxForwarding, ARM::FeaturePref32BitThumb,
90 ARM::FeatureAvoidPartialCPSR, ARM::FeatureCheapPredicableCPSR,
91 ARM::FeatureAvoidMOVsShOp, ARM::FeatureHasRetAddrStack,
92 ARM::FeatureHasNoBranchPredictor, ARM::FeatureDSP, ARM::FeatureMP,
93 ARM::FeatureVirtualization, ARM::FeatureMClass, ARM::FeatureRClass,
94 ARM::FeatureAClass, ARM::FeatureStrictAlign, ARM::FeatureLongCalls,
95 ARM::FeatureExecuteOnly, ARM::FeatureReserveR9, ARM::FeatureNoMovt,
96 ARM::FeatureNoNegativeImmediates
97 };
98
99 const ARMSubtarget *getST() const { return ST; }
100 const ARMTargetLowering *getTLI() const { return TLI; }
101
102public:
103 explicit ARMTTIImpl(const ARMBaseTargetMachine *TM, const Function &F)
104 : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),
105 TLI(ST->getTargetLowering()) {}
106
107 bool areInlineCompatible(const Function *Caller,
108 const Function *Callee) const override;
109
110 bool enableInterleavedAccessVectorization() const override { return true; }
111
113 getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const override;
114
115 /// Floating-point computation using ARMv8 AArch32 Advanced
116 /// SIMD instructions remains unchanged from ARMv7. Only AArch64 SIMD
117 /// and Arm MVE are IEEE-754 compliant.
119 return !ST->isTargetDarwin() && !ST->hasMVEFloatOps();
120 }
121
122 std::optional<Instruction *>
124 std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
125 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
126 APInt &UndefElts2, APInt &UndefElts3,
127 std::function<void(Instruction *, unsigned, APInt, APInt &)>
128 SimplifyAndSetOp) const override;
129
130 /// \name Scalar TTI Implementations
131 /// @{
132
133 InstructionCost getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
134 const APInt &Imm,
135 Type *Ty) const override;
136
138 InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
139 TTI::TargetCostKind CostKind) const override;
140
141 InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
142 const APInt &Imm, Type *Ty,
144 Instruction *Inst = nullptr) const override;
145
146 /// @}
147
148 /// \name Vector TTI Implementations
149 /// @{
150
151 unsigned getNumberOfRegisters(unsigned ClassID) const override {
152 bool Vector = (ClassID == 1);
153 if (Vector) {
154 if (ST->hasNEON())
155 return 16;
156 if (ST->hasMVEIntegerOps())
157 return 8;
158 return 0;
159 }
160
161 if (ST->isThumb1Only())
162 return 8;
163 return 13;
164 }
165
168 switch (K) {
170 return TypeSize::getFixed(32);
172 if (ST->hasNEON())
173 return TypeSize::getFixed(128);
174 if (ST->hasMVEIntegerOps())
175 return TypeSize::getFixed(128);
176 return TypeSize::getFixed(0);
178 return TypeSize::getScalable(0);
179 }
180 llvm_unreachable("Unsupported register kind");
181 }
182
183 unsigned getMaxInterleaveFactor(ElementCount VF) const override {
184 return ST->getMaxInterleaveFactor();
185 }
186
187 bool isProfitableLSRChainElement(Instruction *I) const override;
188
189 bool
190 isLegalMaskedLoad(Type *DataTy, Align Alignment, unsigned AddressSpace,
191 TTI::MaskKind MaskKind =
193
194 bool
195 isLegalMaskedStore(Type *DataTy, Align Alignment, unsigned AddressSpace,
196 TTI::MaskKind MaskKind =
198 return isLegalMaskedLoad(DataTy, Alignment, AddressSpace, MaskKind);
199 }
200
202 Align Alignment) const override {
203 // For MVE, we have a custom lowering pass that will already have custom
204 // legalised any gathers that we can lower to MVE intrinsics, and want to
205 // expand all the rest. The pass runs before the masked intrinsic lowering
206 // pass.
207 return true;
208 }
209
211 Align Alignment) const override {
212 return forceScalarizeMaskedGather(VTy, Alignment);
213 }
214
215 bool isLegalMaskedGather(Type *Ty, Align Alignment) const override;
216
217 bool isLegalMaskedScatter(Type *Ty, Align Alignment) const override {
218 return isLegalMaskedGather(Ty, Alignment);
219 }
220
221 InstructionCost getMemcpyCost(const Instruction *I) const override;
222
224 return ST->getMaxInlineSizeThreshold();
225 }
226
227 int getNumMemOps(const IntrinsicInst *I) const;
228
232 VectorType *SubTp, ArrayRef<const Value *> Args = {},
233 const Instruction *CxtI = nullptr) const override;
234
235 bool preferInLoopReduction(RecurKind Kind, Type *Ty) const override;
236
237 bool preferPredicatedReductionSelect() const override;
238
239 bool shouldExpandReduction(const IntrinsicInst *II) const override {
240 return false;
241 }
242
244 const Instruction *I = nullptr) const override;
245
247 getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
249 const Instruction *I = nullptr) const override;
250
252 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
256 const Instruction *I = nullptr) const override;
257
259 InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
261 unsigned Index, const Value *Op0,
262 const Value *Op1) const override;
263
265 getAddressComputationCost(Type *Val, ScalarEvolution *SE, const SCEV *Ptr,
266 TTI::TargetCostKind CostKind) const override;
267
269 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
273 const Instruction *CxtI = nullptr) const override;
274
276 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
279 const Instruction *I = nullptr) const override;
280
282 getMaskedMemoryOpCost(const MemIntrinsicCostAttributes &MICA,
283 TTI::TargetCostKind CostKind) const override;
284
286 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
287 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
288 bool UseMaskForCond = false, bool UseMaskForGaps = false) const override;
289
291 getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
292 bool VariableMask, Align Alignment,
294 const Instruction *I = nullptr) const override;
295
297 getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
298 std::optional<FastMathFlags> FMF,
299 TTI::TargetCostKind CostKind) const override;
301 getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy,
302 VectorType *ValTy, std::optional<FastMathFlags> FMF,
303 TTI::TargetCostKind CostKind) const override;
305 getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy,
306 VectorType *ValTy,
307 TTI::TargetCostKind CostKind) const override;
308
310 getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF,
311 TTI::TargetCostKind CostKind) const override;
312
314 getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
315 TTI::TargetCostKind CostKind) const override;
316
317 /// getScalingFactorCost - Return the cost of the scaling used in
318 /// addressing mode represented by AM.
319 /// If the AM is supported, the return value must be >= 0.
320 /// If the AM is not supported, the return value is an invalid cost.
321 InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
322 StackOffset BaseOffset, bool HasBaseReg,
323 int64_t Scale,
324 unsigned AddrSpace) const override;
325
326 bool maybeLoweredToCall(Instruction &I) const;
327 bool isLoweredToCall(const Function *F) const override;
328 bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
329 AssumptionCache &AC, TargetLibraryInfo *LibInfo,
330 HardwareLoopInfo &HWLoopInfo) const override;
331 bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override;
332 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
334 OptimizationRemarkEmitter *ORE) const override;
335
337 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) const override;
338
339 void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
340 TTI::PeelingPreferences &PP) const override;
342 // In the ROPI and RWPI relocation models we can't have pointers to global
343 // variables or functions in constant data, so don't convert switches to
344 // lookup tables if any of the values would need relocation.
345 if (ST->isROPI() || ST->isRWPI())
346 return !C->needsDynamicRelocation();
347
348 return true;
349 }
350
351 bool hasArmWideBranch(bool Thumb) const override;
352
354 SmallVectorImpl<Use *> &Ops) const override;
355
356 unsigned getNumBytesToPadGlobalArray(unsigned Size,
357 Type *ArrayType) const override;
358
359 /// @}
360};
361
362/// isVREVMask - Check if a vector shuffle corresponds to a VREV
363/// instruction with the specified blocksize. (The order of the elements
364/// within each block of the vector is reversed.)
365inline bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
366 assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
367 "Only possible block sizes for VREV are: 16, 32, 64");
368
369 unsigned EltSz = VT.getScalarSizeInBits();
370 if (EltSz != 8 && EltSz != 16 && EltSz != 32)
371 return false;
372
373 unsigned BlockElts = M[0] + 1;
374 // If the first shuffle index is UNDEF, be optimistic.
375 if (M[0] < 0)
376 BlockElts = BlockSize / EltSz;
377
378 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
379 return false;
380
381 for (unsigned i = 0, e = M.size(); i < e; ++i) {
382 if (M[i] < 0)
383 continue; // ignore UNDEF indices
384 if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
385 return false;
386 }
387
388 return true;
389}
390
391} // end namespace llvm
392
393#endif // LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
uint64_t IntrinsicInst * II
static const int BlockSize
Definition TarWriter.cpp:33
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition APInt.h:78
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
InstructionCost getAddressComputationCost(Type *Val, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const override
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const override
bool isFPVectorizationPotentiallyUnsafe() const override
Floating-point computation using ARMv8 AArch32 Advanced SIMD instructions remains unchanged from ARMv...
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getMemcpyCost(const Instruction *I) const override
bool isLegalMaskedScatter(Type *Ty, Align Alignment) const override
InstructionCost getMaskedMemoryOpCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override
bool maybeLoweredToCall(Instruction &I) const
bool preferInLoopReduction(RecurKind Kind, Type *Ty) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *ValTy, TTI::TargetCostKind CostKind) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const override
bool hasArmWideBranch(bool Thumb) const override
bool shouldExpandReduction(const IntrinsicInst *II) const override
bool shouldBuildLookupTablesForConstant(Constant *C) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
int getNumMemOps(const IntrinsicInst *I) const
Given a memcpy/memset/memmove instruction, return the number of memory operations performed,...
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
InstructionCost getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty) const override
bool isLoweredToCall(const Function *F) const override
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const override
Check if sinking I's operands to I's basic block is profitable, because the operands can be folded in...
uint64_t getMaxMemIntrinsicInlineSizeThreshold() const override
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
bool isLegalMaskedStore(Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::MaskKind MaskKind=TTI::MaskKind::VariableOrConstantMask) const override
bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override
bool isLegalMaskedLoad(Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::MaskKind MaskKind=TTI::MaskKind::VariableOrConstantMask) const override
ARMTTIImpl(const ARMBaseTargetMachine *TM, const Function &F)
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
TTI::AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const override
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override
bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) const override
unsigned getNumberOfRegisters(unsigned ClassID) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
bool preferPredicatedReductionSelect() const override
bool isLegalMaskedGather(Type *Ty, Align Alignment) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
unsigned getNumBytesToPadGlobalArray(unsigned Size, Type *ArrayType) const override
bool isProfitableLSRChainElement(Instruction *I) const override
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
bool enableInterleavedAccessVectorization() const override
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override
getScalingFactorCost - Return the cost of the scaling used in addressing mode represented by AM.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
Class to represent array types.
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
This is an important base class in LLVM.
Definition Constant.h:43
Container class for subtarget features.
The core instruction combiner logic.
A wrapper class for inspecting calls to intrinsic functions.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
This class represents an analyzed expression in the program.
The main scalar evolution driver.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
virtual const DataLayout & getDataLayout() const
virtual InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
MaskKind
Some targets only support masked load/store with a constant mask.
TargetCostKind
The kind of cost model.
AddressingModeKind
Which addressing mode Loop Strength Reduction will try to generate.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
CastContextHint
Represents a hint about the context in which a cast is used.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
Definition TypeSize.h:346
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM Value Representation.
Definition Value.h:75
Base class of all SIMD vector types.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isVREVMask(ArrayRef< int > M, EVT VT, unsigned BlockSize)
isVREVMask - Check if a vector shuffle corresponds to a VREV instruction with the specified blocksize...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
uint64_t getScalarSizeInBits() const
Definition ValueTypes.h:385
Parameters that control the generic loop unrolling transformation.