17#ifndef LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H
18#define LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H
34class ARMTargetLowering;
42namespace TailPredication {
73 ARM::FeatureVFP2, ARM::FeatureVFP3, ARM::FeatureNEON, ARM::FeatureThumb2,
74 ARM::FeatureFP16, ARM::FeatureVFP4, ARM::FeatureFPARMv8,
75 ARM::FeatureFullFP16, ARM::FeatureFP16FML, ARM::FeatureHWDivThumb,
76 ARM::FeatureHWDivARM, ARM::FeatureDB, ARM::FeatureV7Clrex,
77 ARM::FeatureAcquireRelease, ARM::FeatureSlowFPBrcc,
78 ARM::FeaturePerfMon, ARM::FeatureTrustZone, ARM::Feature8MSecExt,
79 ARM::FeatureCrypto, ARM::FeatureCRC, ARM::FeatureRAS,
80 ARM::FeatureFPAO, ARM::FeatureFuseAES, ARM::FeatureZCZeroing,
81 ARM::FeatureProfUnpredicate, ARM::FeatureSlowVGETLNi32,
82 ARM::FeatureSlowVDUP32, ARM::FeaturePreferVMOVSR,
83 ARM::FeaturePrefISHSTBarrier, ARM::FeatureMuxedUnits,
84 ARM::FeatureSlowOddRegister, ARM::FeatureSlowLoadDSubreg,
85 ARM::FeatureDontWidenVMOVS, ARM::FeatureExpandMLx,
86 ARM::FeatureHasVMLxHazards, ARM::FeatureNEONForFPMovs,
87 ARM::FeatureNEONForFP, ARM::FeatureCheckVLDnAlign,
88 ARM::FeatureHasSlowFPVMLx, ARM::FeatureHasSlowFPVFMx,
89 ARM::FeatureVMLxForwarding, ARM::FeaturePref32BitThumb,
90 ARM::FeatureAvoidPartialCPSR, ARM::FeatureCheapPredicableCPSR,
91 ARM::FeatureAvoidMOVsShOp, ARM::FeatureHasRetAddrStack,
92 ARM::FeatureHasNoBranchPredictor, ARM::FeatureDSP, ARM::FeatureMP,
93 ARM::FeatureVirtualization, ARM::FeatureMClass, ARM::FeatureRClass,
94 ARM::FeatureAClass, ARM::FeatureNaClTrap, ARM::FeatureStrictAlign,
95 ARM::FeatureLongCalls, ARM::FeatureExecuteOnly, ARM::FeatureReserveR9,
96 ARM::FeatureNoMovt, ARM::FeatureNoNegativeImmediates
105 TLI(ST->getTargetLowering()) {}
119 return !ST->isTargetDarwin() && !ST->hasMVEFloatOps();
128 SimplifyAndSetOp)
const;
151 bool Vector = (ClassID == 1);
155 if (ST->hasMVEIntegerOps())
160 if (ST->isThumb1Only())
172 if (ST->hasMVEIntegerOps())
182 return ST->getMaxInterleaveFactor();
254 ArrayRef<const Value *>
Args = ArrayRef<const Value *>(),
255 const Instruction *CxtI =
nullptr);
261 const Instruction *
I =
nullptr);
268 unsigned Opcode, Type *VecTy,
unsigned Factor, ArrayRef<unsigned> Indices,
270 bool UseMaskForCond =
false,
bool UseMaskForGaps =
false);
273 const Value *
Ptr,
bool VariableMask,
276 const Instruction *
I =
nullptr);
279 std::optional<FastMathFlags> FMF,
283 std::optional<FastMathFlags> FMF,
297 int64_t BaseOffset,
bool HasBaseReg,
298 int64_t Scale,
unsigned AddrSpace)
const;
304 TargetLibraryInfo *LibInfo,
305 HardwareLoopInfo &HWLoopInfo);
307 AssumptionCache &AC, TargetLibraryInfo *TLI,
309 LoopVectorizationLegality *LVL,
310 InterleavedAccessInfo *IAI);
312 TTI::UnrollingPreferences &UP,
313 OptimizationRemarkEmitter *ORE);
319 TTI::PeelingPreferences &PP);
324 if (ST->isROPI() || ST->isRWPI())
325 return !
C->needsDynamicRelocation();
340 "Only possible block sizes for VREV are: 16, 32, 64");
343 if (EltSz != 8 && EltSz != 16 && EltSz != 32)
346 unsigned BlockElts = M[0] + 1;
354 for (
unsigned i = 0, e = M.size(); i < e; ++i) {
357 if ((
unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
amdgpu Simplify well known AMD library false FunctionCallee Callee
static const Function * getParent(const Value *V)
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static const int BlockSize
Class for arbitrary precision integers.
bool preferInLoopReduction(unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const
InstructionCost getAddressComputationCost(Type *Val, ScalarEvolution *SE, const SCEV *Ptr)
bool maybeLoweredToCall(Instruction &I)
bool enableInterleavedAccessVectorization()
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const
bool isLegalMaskedStore(Type *DataTy, Align Alignment)
bool isLegalMaskedLoad(Type *DataTy, Align Alignment)
InstructionCost getMemcpyCost(const Instruction *I)
bool shouldBuildLookupTablesForConstant(Constant *C) const
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args=std::nullopt)
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
bool isLoweredToCall(const Function *F)
InstructionCost getMulAccReductionCost(bool IsUnsigned, Type *ResTy, VectorType *ValTy, TTI::TargetCostKind CostKind)
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo)
bool isFPVectorizationPotentiallyUnsafe()
Floating-point computation using ARMv8 AArch32 Advanced SIMD instructions remains unchanged from ARMv...
bool preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *TLI, DominatorTree *DT, LoopVectorizationLegality *LVL, InterleavedAccessInfo *IAI)
int getNumMemOps(const IntrinsicInst *I) const
Given a memcpy/memset/memmove instruction, return the number of memory operations performed,...
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=ArrayRef< const Value * >(), const Instruction *CxtI=nullptr)
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
bool isLegalMaskedScatter(Type *Ty, Align Alignment)
bool shouldExpandReduction(const IntrinsicInst *II) const
bool hasArmWideBranch(bool Thumb) const
bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment)
ARMTTIImpl(const ARMBaseTargetMachine *TM, const Function &F)
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
bool isLegalMaskedGather(Type *Ty, Align Alignment)
bool areInlineCompatible(const Function *Caller, const Function *Callee) const
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment)
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const
getScalingFactorCost - Return the cost of the scaling used in addressing mode represented by AM.
TTI::AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const
InstructionCost getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty)
unsigned getMaxInterleaveFactor(ElementCount VF)
bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
unsigned getNumberOfRegisters(unsigned ClassID) const
bool isProfitableLSRChainElement(Instruction *I)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Base class which can be used to help build a TTI implementation.
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This is an important base class in LLVM.
Container class for subtarget features.
The core instruction combiner logic.
A wrapper class for inspecting calls to intrinsic functions.
Represents a single loop in the control flow graph.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM Value Representation.
Base class of all SIMD vector types.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ C
The default llvm calling convention, compatible with C.
@ ForceEnabledNoReductions
This is an optimization pass for GlobalISel generic memory operations.
bool isVREVMask(ArrayRef< int > M, EVT VT, unsigned BlockSize)
isVREVMask - Check if a vector shuffle corresponds to a VREV instruction with the specified blocksize...
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t getScalarSizeInBits() const