LLVM  14.0.0git
BasicTTIImpl.h
Go to the documentation of this file.
1 //===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file provides a helper that implements much of the TTI interface in
11 /// terms of the target-independent code generator and TargetLowering
12 /// interfaces.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17 #define LLVM_CODEGEN_BASICTTIIMPL_H
18 
19 #include "llvm/ADT/APInt.h"
20 #include "llvm/ADT/ArrayRef.h"
21 #include "llvm/ADT/BitVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/Analysis/LoopInfo.h"
32 #include "llvm/IR/BasicBlock.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/Instruction.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/Intrinsics.h"
41 #include "llvm/IR/Operator.h"
42 #include "llvm/IR/Type.h"
43 #include "llvm/IR/Value.h"
44 #include "llvm/Support/Casting.h"
50 #include <algorithm>
51 #include <cassert>
52 #include <cstdint>
53 #include <limits>
54 #include <utility>
55 
56 namespace llvm {
57 
58 class Function;
59 class GlobalValue;
60 class LLVMContext;
61 class ScalarEvolution;
62 class SCEV;
63 class TargetMachine;
64 
65 extern cl::opt<unsigned> PartialUnrollingThreshold;
66 
67 /// Base class which can be used to help build a TTI implementation.
68 ///
69 /// This class provides as much implementation of the TTI interface as is
70 /// possible using the target independent parts of the code generator.
71 ///
72 /// In order to subclass it, your class must implement a getST() method to
73 /// return the subtarget, and a getTLI() method to return the target lowering.
74 /// We need these methods implemented in the derived class so that this class
75 /// doesn't have to duplicate storage for them.
76 template <typename T>
78 private:
80  using TTI = TargetTransformInfo;
81 
82  /// Helper function to access this as a T.
83  T *thisT() { return static_cast<T *>(this); }
84 
85  /// Estimate a cost of Broadcast as an extract and sequence of insert
86  /// operations.
87  InstructionCost getBroadcastShuffleOverhead(FixedVectorType *VTy) {
88  InstructionCost Cost = 0;
89  // Broadcast cost is equal to the cost of extracting the zero'th element
90  // plus the cost of inserting it into every element of the result vector.
91  Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy, 0);
92 
93  for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
94  Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, i);
95  }
96  return Cost;
97  }
98 
99  /// Estimate a cost of shuffle as a sequence of extract and insert
100  /// operations.
101  InstructionCost getPermuteShuffleOverhead(FixedVectorType *VTy) {
102  InstructionCost Cost = 0;
103  // Shuffle cost is equal to the cost of extracting element from its argument
104  // plus the cost of inserting them onto the result vector.
105 
106  // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
107  // index 0 of first vector, index 1 of second vector,index 2 of first
108  // vector and finally index 3 of second vector and insert them at index
109  // <0,1,2,3> of result vector.
110  for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
111  Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, i);
112  Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy, i);
113  }
114  return Cost;
115  }
116 
117  /// Estimate a cost of subvector extraction as a sequence of extract and
118  /// insert operations.
119  InstructionCost getExtractSubvectorOverhead(VectorType *VTy, int Index,
120  FixedVectorType *SubVTy) {
121  assert(VTy && SubVTy &&
122  "Can only extract subvectors from vectors");
123  int NumSubElts = SubVTy->getNumElements();
124  assert((!isa<FixedVectorType>(VTy) ||
125  (Index + NumSubElts) <=
126  (int)cast<FixedVectorType>(VTy)->getNumElements()) &&
127  "SK_ExtractSubvector index out of range");
128 
129  InstructionCost Cost = 0;
130  // Subvector extraction cost is equal to the cost of extracting element from
131  // the source type plus the cost of inserting them into the result vector
132  // type.
133  for (int i = 0; i != NumSubElts; ++i) {
134  Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
135  i + Index);
136  Cost +=
137  thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy, i);
138  }
139  return Cost;
140  }
141 
142  /// Estimate a cost of subvector insertion as a sequence of extract and
143  /// insert operations.
144  InstructionCost getInsertSubvectorOverhead(VectorType *VTy, int Index,
145  FixedVectorType *SubVTy) {
146  assert(VTy && SubVTy &&
147  "Can only insert subvectors into vectors");
148  int NumSubElts = SubVTy->getNumElements();
149  assert((!isa<FixedVectorType>(VTy) ||
150  (Index + NumSubElts) <=
151  (int)cast<FixedVectorType>(VTy)->getNumElements()) &&
152  "SK_InsertSubvector index out of range");
153 
154  InstructionCost Cost = 0;
155  // Subvector insertion cost is equal to the cost of extracting element from
156  // the source type plus the cost of inserting them into the result vector
157  // type.
158  for (int i = 0; i != NumSubElts; ++i) {
159  Cost +=
160  thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy, i);
161  Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
162  i + Index);
163  }
164  return Cost;
165  }
166 
167  /// Local query method delegates up to T which *must* implement this!
168  const TargetSubtargetInfo *getST() const {
169  return static_cast<const T *>(this)->getST();
170  }
171 
172  /// Local query method delegates up to T which *must* implement this!
173  const TargetLoweringBase *getTLI() const {
174  return static_cast<const T *>(this)->getTLI();
175  }
176 
177  static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
178  switch (M) {
179  case TTI::MIM_Unindexed:
180  return ISD::UNINDEXED;
181  case TTI::MIM_PreInc:
182  return ISD::PRE_INC;
183  case TTI::MIM_PreDec:
184  return ISD::PRE_DEC;
185  case TTI::MIM_PostInc:
186  return ISD::POST_INC;
187  case TTI::MIM_PostDec:
188  return ISD::POST_DEC;
189  }
190  llvm_unreachable("Unexpected MemIndexedMode");
191  }
192 
193  InstructionCost getCommonMaskedMemoryOpCost(unsigned Opcode, Type *DataTy,
194  Align Alignment,
195  bool VariableMask,
196  bool IsGatherScatter,
198  auto *VT = cast<FixedVectorType>(DataTy);
199  // Assume the target does not have support for gather/scatter operations
200  // and provide a rough estimate.
201  //
202  // First, compute the cost of the individual memory operations.
203  InstructionCost AddrExtractCost =
204  IsGatherScatter
205  ? getVectorInstrCost(Instruction::ExtractElement,
207  PointerType::get(VT->getElementType(), 0),
208  VT->getNumElements()),
209  -1)
210  : 0;
211  InstructionCost LoadCost =
212  VT->getNumElements() *
213  (AddrExtractCost +
214  getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind));
215 
216  // Next, compute the cost of packing the result in a vector.
218  VT, Opcode != Instruction::Store, Opcode == Instruction::Store);
219 
220  InstructionCost ConditionalCost = 0;
221  if (VariableMask) {
222  // Compute the cost of conditionally executing the memory operations with
223  // variable masks. This includes extracting the individual conditions, a
224  // branches and PHIs to combine the results.
225  // NOTE: Estimating the cost of conditionally executing the memory
226  // operations accurately is quite difficult and the current solution
227  // provides a very rough estimate only.
228  ConditionalCost =
229  VT->getNumElements() *
231  Instruction::ExtractElement,
233  VT->getNumElements()),
234  -1) +
235  getCFInstrCost(Instruction::Br, CostKind) +
236  getCFInstrCost(Instruction::PHI, CostKind));
237  }
238 
239  return LoadCost + PackingCost + ConditionalCost;
240  }
241 
242 protected:
243  explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
244  : BaseT(DL) {}
245  virtual ~BasicTTIImplBase() = default;
246 
248 
249 public:
250  /// \name Scalar TTI Implementations
251  /// @{
253  unsigned AddressSpace, Align Alignment,
254  bool *Fast) const {
256  return getTLI()->allowsMisalignedMemoryAccesses(
257  E, AddressSpace, Alignment, MachineMemOperand::MONone, Fast);
258  }
259 
260  bool hasBranchDivergence() { return false; }
261 
262  bool useGPUDivergenceAnalysis() { return false; }
263 
264  bool isSourceOfDivergence(const Value *V) { return false; }
265 
266  bool isAlwaysUniform(const Value *V) { return false; }
267 
268  unsigned getFlatAddressSpace() {
269  // Return an invalid address space.
270  return -1;
271  }
272 
274  Intrinsic::ID IID) const {
275  return false;
276  }
277 
278  bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
279  return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
280  }
281 
282  unsigned getAssumedAddrSpace(const Value *V) const {
283  return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
284  }
285 
287  Value *NewV) const {
288  return nullptr;
289  }
290 
291  bool isLegalAddImmediate(int64_t imm) {
292  return getTLI()->isLegalAddImmediate(imm);
293  }
294 
295  bool isLegalICmpImmediate(int64_t imm) {
296  return getTLI()->isLegalICmpImmediate(imm);
297  }
298 
299  bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
300  bool HasBaseReg, int64_t Scale,
301  unsigned AddrSpace, Instruction *I = nullptr) {
303  AM.BaseGV = BaseGV;
304  AM.BaseOffs = BaseOffset;
305  AM.HasBaseReg = HasBaseReg;
306  AM.Scale = Scale;
307  return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
308  }
309 
311  const DataLayout &DL) const {
312  EVT VT = getTLI()->getValueType(DL, Ty);
313  return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
314  }
315 
317  const DataLayout &DL) const {
318  EVT VT = getTLI()->getValueType(DL, Ty);
319  return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
320  }
321 
324  }
325 
328  }
329 
332  }
333 
335  int64_t BaseOffset, bool HasBaseReg,
336  int64_t Scale, unsigned AddrSpace) {
338  AM.BaseGV = BaseGV;
339  AM.BaseOffs = BaseOffset;
340  AM.HasBaseReg = HasBaseReg;
341  AM.Scale = Scale;
342  return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace);
343  }
344 
345  bool isTruncateFree(Type *Ty1, Type *Ty2) {
346  return getTLI()->isTruncateFree(Ty1, Ty2);
347  }
348 
350  return getTLI()->isProfitableToHoist(I);
351  }
352 
353  bool useAA() const { return getST()->useAA(); }
354 
355  bool isTypeLegal(Type *Ty) {
356  EVT VT = getTLI()->getValueType(DL, Ty);
357  return getTLI()->isTypeLegal(VT);
358  }
359 
361  InstructionCost Val = getTLI()->getTypeLegalizationCost(DL, Ty).first;
362  assert(Val >= 0 && "Negative cost!");
363  return Val;
364  }
365 
366  InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr,
368  return BaseT::getGEPCost(PointeeType, Ptr, Operands);
369  }
370 
372  unsigned &JumpTableSize,
373  ProfileSummaryInfo *PSI,
375  /// Try to find the estimated number of clusters. Note that the number of
376  /// clusters identified in this function could be different from the actual
377  /// numbers found in lowering. This function ignore switches that are
378  /// lowered with a mix of jump table / bit test / BTree. This function was
379  /// initially intended to be used when estimating the cost of switch in
380  /// inline cost heuristic, but it's a generic cost model to be used in other
381  /// places (e.g., in loop unrolling).
382  unsigned N = SI.getNumCases();
383  const TargetLoweringBase *TLI = getTLI();
384  const DataLayout &DL = this->getDataLayout();
385 
386  JumpTableSize = 0;
387  bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
388 
389  // Early exit if both a jump table and bit test are not allowed.
390  if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
391  return N;
392 
393  APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
394  APInt MinCaseVal = MaxCaseVal;
395  for (auto CI : SI.cases()) {
396  const APInt &CaseVal = CI.getCaseValue()->getValue();
397  if (CaseVal.sgt(MaxCaseVal))
398  MaxCaseVal = CaseVal;
399  if (CaseVal.slt(MinCaseVal))
400  MinCaseVal = CaseVal;
401  }
402 
403  // Check if suitable for a bit test
404  if (N <= DL.getIndexSizeInBits(0u)) {
406  for (auto I : SI.cases())
407  Dests.insert(I.getCaseSuccessor());
408 
409  if (TLI->isSuitableForBitTests(Dests.size(), N, MinCaseVal, MaxCaseVal,
410  DL))
411  return 1;
412  }
413 
414  // Check if suitable for a jump table.
415  if (IsJTAllowed) {
416  if (N < 2 || N < TLI->getMinimumJumpTableEntries())
417  return N;
418  uint64_t Range =
419  (MaxCaseVal - MinCaseVal)
420  .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
421  // Check whether a range of clusters is dense enough for a jump table
422  if (TLI->isSuitableForJumpTable(&SI, N, Range, PSI, BFI)) {
423  JumpTableSize = Range;
424  return 1;
425  }
426  }
427  return N;
428  }
429 
431  const TargetLoweringBase *TLI = getTLI();
434  }
435 
437  const TargetMachine &TM = getTLI()->getTargetMachine();
438  // If non-PIC mode, do not generate a relative lookup table.
439  if (!TM.isPositionIndependent())
440  return false;
441 
442  /// Relative lookup table entries consist of 32-bit offsets.
443  /// Do not generate relative lookup tables for large code models
444  /// in 64-bit achitectures where 32-bit offsets might not be enough.
445  if (TM.getCodeModel() == CodeModel::Medium ||
446  TM.getCodeModel() == CodeModel::Large)
447  return false;
448 
449  Triple TargetTriple = TM.getTargetTriple();
450  if (!TargetTriple.isArch64Bit())
451  return false;
452 
453  // TODO: Triggers issues on aarch64 on darwin, so temporarily disable it
454  // there.
455  if (TargetTriple.getArch() == Triple::aarch64 && TargetTriple.isOSDarwin())
456  return false;
457 
458  return true;
459  }
460 
461  bool haveFastSqrt(Type *Ty) {
462  const TargetLoweringBase *TLI = getTLI();
463  EVT VT = TLI->getValueType(DL, Ty);
464  return TLI->isTypeLegal(VT) &&
466  }
467 
469  return true;
470  }
471 
473  // Check whether FADD is available, as a proxy for floating-point in
474  // general.
475  const TargetLoweringBase *TLI = getTLI();
476  EVT VT = TLI->getValueType(DL, Ty);
480  }
481 
482  unsigned getInliningThresholdMultiplier() { return 1; }
483  unsigned adjustInliningThreshold(const CallBase *CB) { return 0; }
484 
485  int getInlinerVectorBonusPercent() { return 150; }
486 
490  // This unrolling functionality is target independent, but to provide some
491  // motivation for its intended use, for x86:
492 
493  // According to the Intel 64 and IA-32 Architectures Optimization Reference
494  // Manual, Intel Core models and later have a loop stream detector (and
495  // associated uop queue) that can benefit from partial unrolling.
496  // The relevant requirements are:
497  // - The loop must have no more than 4 (8 for Nehalem and later) branches
498  // taken, and none of them may be calls.
499  // - The loop can have no more than 18 (28 for Nehalem and later) uops.
500 
501  // According to the Software Optimization Guide for AMD Family 15h
502  // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
503  // and loop buffer which can benefit from partial unrolling.
504  // The relevant requirements are:
505  // - The loop must have fewer than 16 branches
506  // - The loop must have less than 40 uops in all executed loop branches
507 
508  // The number of taken branches in a loop is hard to estimate here, and
509  // benchmarking has revealed that it is better not to be conservative when
510  // estimating the branch count. As a result, we'll ignore the branch limits
511  // until someone finds a case where it matters in practice.
512 
513  unsigned MaxOps;
514  const TargetSubtargetInfo *ST = getST();
515  if (PartialUnrollingThreshold.getNumOccurrences() > 0)
516  MaxOps = PartialUnrollingThreshold;
517  else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
518  MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
519  else
520  return;
521 
522  // Scan the loop: don't unroll loops with calls.
523  for (BasicBlock *BB : L->blocks()) {
524  for (Instruction &I : *BB) {
525  if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
526  if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
527  if (!thisT()->isLoweredToCall(F))
528  continue;
529  }
530 
531  if (ORE) {
532  ORE->emit([&]() {
533  return OptimizationRemark("TTI", "DontUnroll", L->getStartLoc(),
534  L->getHeader())
535  << "advising against unrolling the loop because it "
536  "contains a "
537  << ore::NV("Call", &I);
538  });
539  }
540  return;
541  }
542  }
543  }
544 
545  // Enable runtime and partial unrolling up to the specified size.
546  // Enable using trip count upper bound to unroll loops.
547  UP.Partial = UP.Runtime = UP.UpperBound = true;
548  UP.PartialThreshold = MaxOps;
549 
550  // Avoid unrolling when optimizing for size.
551  UP.OptSizeThreshold = 0;
553 
554  // Set number of instructions optimized when "back edge"
555  // becomes "fall through" to default value of 2.
556  UP.BEInsns = 2;
557  }
558 
561  PP.PeelCount = 0;
562  PP.AllowPeeling = true;
563  PP.AllowLoopNestsPeeling = false;
564  PP.PeelProfiledIterations = true;
565  }
566 
568  AssumptionCache &AC,
569  TargetLibraryInfo *LibInfo,
570  HardwareLoopInfo &HWLoopInfo) {
571  return BaseT::isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
572  }
573 
576  DominatorTree *DT,
577  const LoopAccessInfo *LAI) {
578  return BaseT::preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LAI);
579  }
580 
583  }
584 
586  IntrinsicInst &II) {
587  return BaseT::instCombineIntrinsic(IC, II);
588  }
589 
591  IntrinsicInst &II,
592  APInt DemandedMask,
593  KnownBits &Known,
594  bool &KnownBitsComputed) {
595  return BaseT::simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
596  KnownBitsComputed);
597  }
598 
600  InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
601  APInt &UndefElts2, APInt &UndefElts3,
602  std::function<void(Instruction *, unsigned, APInt, APInt &)>
603  SimplifyAndSetOp) {
605  IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
606  SimplifyAndSetOp);
607  }
608 
610  if (isa<LoadInst>(I))
611  return getST()->getSchedModel().DefaultLoadLatency;
612 
614  }
615 
616  virtual Optional<unsigned>
618  return Optional<unsigned>(
619  getST()->getCacheSize(static_cast<unsigned>(Level)));
620  }
621 
622  virtual Optional<unsigned>
624  Optional<unsigned> TargetResult =
625  getST()->getCacheAssociativity(static_cast<unsigned>(Level));
626 
627  if (TargetResult)
628  return TargetResult;
629 
631  }
632 
633  virtual unsigned getCacheLineSize() const {
634  return getST()->getCacheLineSize();
635  }
636 
637  virtual unsigned getPrefetchDistance() const {
638  return getST()->getPrefetchDistance();
639  }
640 
641  virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses,
642  unsigned NumStridedMemAccesses,
643  unsigned NumPrefetches,
644  bool HasCall) const {
645  return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
646  NumPrefetches, HasCall);
647  }
648 
649  virtual unsigned getMaxPrefetchIterationsAhead() const {
650  return getST()->getMaxPrefetchIterationsAhead();
651  }
652 
653  virtual bool enableWritePrefetching() const {
654  return getST()->enableWritePrefetching();
655  }
656 
657  /// @}
658 
659  /// \name Vector TTI Implementations
660  /// @{
661 
663  return TypeSize::getFixed(32);
664  }
665 
667 
668  /// Estimate the overhead of scalarizing an instruction. Insert and Extract
669  /// are set if the demanded result elements need to be inserted and/or
670  /// extracted from vectors.
672  const APInt &DemandedElts,
673  bool Insert, bool Extract) {
674  /// FIXME: a bitfield is not a reasonable abstraction for talking about
675  /// which elements are needed from a scalable vector
676  auto *Ty = cast<FixedVectorType>(InTy);
677 
678  assert(DemandedElts.getBitWidth() == Ty->getNumElements() &&
679  "Vector size mismatch");
680 
681  InstructionCost Cost = 0;
682 
683  for (int i = 0, e = Ty->getNumElements(); i < e; ++i) {
684  if (!DemandedElts[i])
685  continue;
686  if (Insert)
687  Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty, i);
688  if (Extract)
689  Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
690  }
691 
692  return Cost;
693  }
694 
695  /// Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
697  bool Extract) {
698  auto *Ty = cast<FixedVectorType>(InTy);
699 
700  APInt DemandedElts = APInt::getAllOnesValue(Ty->getNumElements());
701  return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
702  }
703 
704  /// Estimate the overhead of scalarizing an instructions unique
705  /// non-constant operands. The (potentially vector) types to use for each of
706  /// argument are passes via Tys.
708  ArrayRef<Type *> Tys) {
709  assert(Args.size() == Tys.size() && "Expected matching Args and Tys");
710 
711  InstructionCost Cost = 0;
712  SmallPtrSet<const Value*, 4> UniqueOperands;
713  for (int I = 0, E = Args.size(); I != E; I++) {
714  // Disregard things like metadata arguments.
715  const Value *A = Args[I];
716  Type *Ty = Tys[I];
717  if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
718  !Ty->isPtrOrPtrVectorTy())
719  continue;
720 
721  if (!isa<Constant>(A) && UniqueOperands.insert(A).second) {
722  if (auto *VecTy = dyn_cast<VectorType>(Ty))
723  Cost += getScalarizationOverhead(VecTy, false, true);
724  }
725  }
726 
727  return Cost;
728  }
729 
730  /// Estimate the overhead of scalarizing the inputs and outputs of an
731  /// instruction, with return type RetTy and arguments Args of type Tys. If
732  /// Args are unknown (empty), then the cost associated with one argument is
733  /// added as a heuristic.
736  ArrayRef<Type *> Tys) {
737  InstructionCost Cost = getScalarizationOverhead(RetTy, true, false);
738  if (!Args.empty())
740  else
741  // When no information on arguments is provided, we add the cost
742  // associated with one argument as a heuristic.
743  Cost += getScalarizationOverhead(RetTy, false, true);
744 
745  return Cost;
746  }
747 
748  unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
749 
751  unsigned Opcode, Type *Ty,
758  const Instruction *CxtI = nullptr) {
759  // Check if any of the operands are vector operands.
760  const TargetLoweringBase *TLI = getTLI();
761  int ISD = TLI->InstructionOpcodeToISD(Opcode);
762  assert(ISD && "Invalid opcode");
763 
764  // TODO: Handle more cost kinds.
766  return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind,
767  Opd1Info, Opd2Info,
768  Opd1PropInfo, Opd2PropInfo,
769  Args, CxtI);
770 
771  std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
772 
773  bool IsFloat = Ty->isFPOrFPVectorTy();
774  // Assume that floating point arithmetic operations cost twice as much as
775  // integer operations.
776  InstructionCost OpCost = (IsFloat ? 2 : 1);
777 
778  if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
779  // The operation is legal. Assume it costs 1.
780  // TODO: Once we have extract/insert subvector cost we need to use them.
781  return LT.first * OpCost;
782  }
783 
784  if (!TLI->isOperationExpand(ISD, LT.second)) {
785  // If the operation is custom lowered, then assume that the code is twice
786  // as expensive.
787  return LT.first * 2 * OpCost;
788  }
789 
790  // An 'Expand' of URem and SRem is special because it may default
791  // to expanding the operation into a sequence of sub-operations
792  // i.e. X % Y -> X-(X/Y)*Y.
793  if (ISD == ISD::UREM || ISD == ISD::SREM) {
794  bool IsSigned = ISD == ISD::SREM;
795  if (TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIVREM : ISD::UDIVREM,
796  LT.second) ||
797  TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIV : ISD::UDIV,
798  LT.second)) {
799  unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
800  InstructionCost DivCost = thisT()->getArithmeticInstrCost(
801  DivOpc, Ty, CostKind, Opd1Info, Opd2Info, Opd1PropInfo,
802  Opd2PropInfo);
803  InstructionCost MulCost =
804  thisT()->getArithmeticInstrCost(Instruction::Mul, Ty, CostKind);
805  InstructionCost SubCost =
806  thisT()->getArithmeticInstrCost(Instruction::Sub, Ty, CostKind);
807  return DivCost + MulCost + SubCost;
808  }
809  }
810 
811  // We cannot scalarize scalable vectors, so return Invalid.
812  if (isa<ScalableVectorType>(Ty))
814 
815  // Else, assume that we need to scalarize this op.
816  // TODO: If one of the types get legalized by splitting, handle this
817  // similarly to what getCastInstrCost() does.
818  if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
819  InstructionCost Cost = thisT()->getArithmeticInstrCost(
820  Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info,
821  Opd1PropInfo, Opd2PropInfo, Args, CxtI);
822  // Return the cost of multiple scalar invocation plus the cost of
823  // inserting and extracting the values.
824  SmallVector<Type *> Tys(Args.size(), Ty);
825  return getScalarizationOverhead(VTy, Args, Tys) +
826  VTy->getNumElements() * Cost;
827  }
828 
829  // We don't know anything about this scalar instruction.
830  return OpCost;
831  }
832 
834  ArrayRef<int> Mask) const {
835  int Limit = Mask.size() * 2;
836  if (Mask.empty() ||
837  // Extra check required by isSingleSourceMaskImpl function (called by
838  // ShuffleVectorInst::isSingleSourceMask).
839  any_of(Mask, [Limit](int I) { return I >= Limit; }))
840  return Kind;
841  switch (Kind) {
844  return TTI::SK_Reverse;
846  return TTI::SK_Broadcast;
847  break;
850  return TTI::SK_Select;
852  return TTI::SK_Transpose;
853  break;
854  case TTI::SK_Select:
855  case TTI::SK_Reverse:
856  case TTI::SK_Broadcast:
857  case TTI::SK_Transpose:
860  case TTI::SK_Splice:
861  break;
862  }
863  return Kind;
864  }
865 
867  ArrayRef<int> Mask, int Index,
868  VectorType *SubTp) {
869 
871  case TTI::SK_Broadcast:
872  return getBroadcastShuffleOverhead(cast<FixedVectorType>(Tp));
873  case TTI::SK_Select:
874  case TTI::SK_Splice:
875  case TTI::SK_Reverse:
876  case TTI::SK_Transpose:
879  return getPermuteShuffleOverhead(cast<FixedVectorType>(Tp));
881  return getExtractSubvectorOverhead(Tp, Index,
882  cast<FixedVectorType>(SubTp));
884  return getInsertSubvectorOverhead(Tp, Index,
885  cast<FixedVectorType>(SubTp));
886  }
887  llvm_unreachable("Unknown TTI::ShuffleKind");
888  }
889 
890  InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
893  const Instruction *I = nullptr) {
894  if (BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I) == 0)
895  return 0;
896 
897  const TargetLoweringBase *TLI = getTLI();
898  int ISD = TLI->InstructionOpcodeToISD(Opcode);
899  assert(ISD && "Invalid opcode");
900  std::pair<InstructionCost, MVT> SrcLT =
901  TLI->getTypeLegalizationCost(DL, Src);
902  std::pair<InstructionCost, MVT> DstLT =
903  TLI->getTypeLegalizationCost(DL, Dst);
904 
905  TypeSize SrcSize = SrcLT.second.getSizeInBits();
906  TypeSize DstSize = DstLT.second.getSizeInBits();
907  bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
908  bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
909 
910  switch (Opcode) {
911  default:
912  break;
913  case Instruction::Trunc:
914  // Check for NOOP conversions.
915  if (TLI->isTruncateFree(SrcLT.second, DstLT.second))
916  return 0;
918  case Instruction::BitCast:
919  // Bitcast between types that are legalized to the same type are free and
920  // assume int to/from ptr of the same size is also free.
921  if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
922  SrcSize == DstSize)
923  return 0;
924  break;
925  case Instruction::FPExt:
926  if (I && getTLI()->isExtFree(I))
927  return 0;
928  break;
929  case Instruction::ZExt:
930  if (TLI->isZExtFree(SrcLT.second, DstLT.second))
931  return 0;
933  case Instruction::SExt:
934  if (I && getTLI()->isExtFree(I))
935  return 0;
936 
937  // If this is a zext/sext of a load, return 0 if the corresponding
938  // extending load exists on target and the result type is legal.
939  if (CCH == TTI::CastContextHint::Normal) {
940  EVT ExtVT = EVT::getEVT(Dst);
941  EVT LoadVT = EVT::getEVT(Src);
942  unsigned LType =
943  ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD);
944  if (DstLT.first == SrcLT.first &&
945  TLI->isLoadExtLegal(LType, ExtVT, LoadVT))
946  return 0;
947  }
948  break;
949  case Instruction::AddrSpaceCast:
951  Dst->getPointerAddressSpace()))
952  return 0;
953  break;
954  }
955 
956  auto *SrcVTy = dyn_cast<VectorType>(Src);
957  auto *DstVTy = dyn_cast<VectorType>(Dst);
958 
959  // If the cast is marked as legal (or promote) then assume low cost.
960  if (SrcLT.first == DstLT.first &&
961  TLI->isOperationLegalOrPromote(ISD, DstLT.second))
962  return SrcLT.first;
963 
964  // Handle scalar conversions.
965  if (!SrcVTy && !DstVTy) {
966  // Just check the op cost. If the operation is legal then assume it costs
967  // 1.
968  if (!TLI->isOperationExpand(ISD, DstLT.second))
969  return 1;
970 
971  // Assume that illegal scalar instruction are expensive.
972  return 4;
973  }
974 
975  // Check vector-to-vector casts.
976  if (DstVTy && SrcVTy) {
977  // If the cast is between same-sized registers, then the check is simple.
978  if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
979 
980  // Assume that Zext is done using AND.
981  if (Opcode == Instruction::ZExt)
982  return SrcLT.first;
983 
984  // Assume that sext is done using SHL and SRA.
985  if (Opcode == Instruction::SExt)
986  return SrcLT.first * 2;
987 
988  // Just check the op cost. If the operation is legal then assume it
989  // costs
990  // 1 and multiply by the type-legalization overhead.
991  if (!TLI->isOperationExpand(ISD, DstLT.second))
992  return SrcLT.first * 1;
993  }
994 
995  // If we are legalizing by splitting, query the concrete TTI for the cost
996  // of casting the original vector twice. We also need to factor in the
997  // cost of the split itself. Count that as 1, to be consistent with
998  // TLI->getTypeLegalizationCost().
999  bool SplitSrc =
1000  TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
1002  bool SplitDst =
1003  TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
1005  if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isVector() &&
1006  DstVTy->getElementCount().isVector()) {
1007  Type *SplitDstTy = VectorType::getHalfElementsVectorType(DstVTy);
1008  Type *SplitSrcTy = VectorType::getHalfElementsVectorType(SrcVTy);
1009  T *TTI = static_cast<T *>(this);
1010  // If both types need to be split then the split is free.
1011  InstructionCost SplitCost =
1012  (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
1013  return SplitCost +
1014  (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
1015  CostKind, I));
1016  }
1017 
1018  // Scalarization cost is Invalid, can't assume any num elements.
1019  if (isa<ScalableVectorType>(DstVTy))
1020  return InstructionCost::getInvalid();
1021 
1022  // In other cases where the source or destination are illegal, assume
1023  // the operation will get scalarized.
1024  unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
1025  InstructionCost Cost = thisT()->getCastInstrCost(
1026  Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind, I);
1027 
1028  // Return the cost of multiple scalar invocation plus the cost of
1029  // inserting and extracting the values.
1030  return getScalarizationOverhead(DstVTy, true, true) + Num * Cost;
1031  }
1032 
1033  // We already handled vector-to-vector and scalar-to-scalar conversions.
1034  // This
1035  // is where we handle bitcast between vectors and scalars. We need to assume
1036  // that the conversion is scalarized in one way or another.
1037  if (Opcode == Instruction::BitCast) {
1038  // Illegal bitcasts are done by storing and loading from a stack slot.
1039  return (SrcVTy ? getScalarizationOverhead(SrcVTy, false, true) : 0) +
1040  (DstVTy ? getScalarizationOverhead(DstVTy, true, false) : 0);
1041  }
1042 
1043  llvm_unreachable("Unhandled cast");
1044  }
1045 
1047  VectorType *VecTy, unsigned Index) {
1048  return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1049  Index) +
1050  thisT()->getCastInstrCost(Opcode, Dst, VecTy->getElementType(),
1053  }
1054 
1056  const Instruction *I = nullptr) {
1057  return BaseT::getCFInstrCost(Opcode, CostKind, I);
1058  }
1059 
1060  InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
1061  CmpInst::Predicate VecPred,
1063  const Instruction *I = nullptr) {
1064  const TargetLoweringBase *TLI = getTLI();
1065  int ISD = TLI->InstructionOpcodeToISD(Opcode);
1066  assert(ISD && "Invalid opcode");
1067 
1068  // TODO: Handle other cost kinds.
1070  return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1071  I);
1072 
1073  // Selects on vectors are actually vector selects.
1074  if (ISD == ISD::SELECT) {
1075  assert(CondTy && "CondTy must exist");
1076  if (CondTy->isVectorTy())
1077  ISD = ISD::VSELECT;
1078  }
1079  std::pair<InstructionCost, MVT> LT =
1080  TLI->getTypeLegalizationCost(DL, ValTy);
1081 
1082  if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
1083  !TLI->isOperationExpand(ISD, LT.second)) {
1084  // The operation is legal. Assume it costs 1. Multiply
1085  // by the type-legalization overhead.
1086  return LT.first * 1;
1087  }
1088 
1089  // Otherwise, assume that the cast is scalarized.
1090  // TODO: If one of the types get legalized by splitting, handle this
1091  // similarly to what getCastInstrCost() does.
1092  if (auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
1093  unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
1094  if (CondTy)
1095  CondTy = CondTy->getScalarType();
1096  InstructionCost Cost = thisT()->getCmpSelInstrCost(
1097  Opcode, ValVTy->getScalarType(), CondTy, VecPred, CostKind, I);
1098 
1099  // Return the cost of multiple scalar invocation plus the cost of
1100  // inserting and extracting the values.
1101  return getScalarizationOverhead(ValVTy, true, false) + Num * Cost;
1102  }
1103 
1104  // Unknown scalar opcode.
1105  return 1;
1106  }
1107 
1108  InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
1109  unsigned Index) {
1110  std::pair<InstructionCost, MVT> LT =
1111  getTLI()->getTypeLegalizationCost(DL, Val->getScalarType());
1112 
1113  return LT.first;
1114  }
1115 
1116  InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src,
1117  MaybeAlign Alignment, unsigned AddressSpace,
1119  const Instruction *I = nullptr) {
1120  assert(!Src->isVoidTy() && "Invalid type");
1121  // Assume types, such as structs, are expensive.
1122  if (getTLI()->getValueType(DL, Src, true) == MVT::Other)
1123  return 4;
1124  std::pair<InstructionCost, MVT> LT =
1125  getTLI()->getTypeLegalizationCost(DL, Src);
1126 
1127  // Assuming that all loads of legal types cost 1.
1128  InstructionCost Cost = LT.first;
1130  return Cost;
1131 
1132  if (Src->isVectorTy() &&
1133  // In practice it's not currently possible to have a change in lane
1134  // length for extending loads or truncating stores so both types should
1135  // have the same scalable property.
1137  LT.second.getSizeInBits())) {
1138  // This is a vector load that legalizes to a larger type than the vector
1139  // itself. Unless the corresponding extending load or truncating store is
1140  // legal, then this will scalarize.
1142  EVT MemVT = getTLI()->getValueType(DL, Src);
1143  if (Opcode == Instruction::Store)
1144  LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
1145  else
1146  LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
1147 
1148  if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
1149  // This is a vector load/store for some illegal type that is scalarized.
1150  // We must account for the cost of building or decomposing the vector.
1151  Cost += getScalarizationOverhead(cast<VectorType>(Src),
1152  Opcode != Instruction::Store,
1153  Opcode == Instruction::Store);
1154  }
1155  }
1156 
1157  return Cost;
1158  }
1159 
1160  InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy,
1161  Align Alignment, unsigned AddressSpace,
1163  return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, true, false,
1164  CostKind);
1165  }
1166 
1167  InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
1168  const Value *Ptr, bool VariableMask,
1169  Align Alignment,
1171  const Instruction *I = nullptr) {
1172  return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask,
1173  true, CostKind);
1174  }
1175 
1177  unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1178  Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1179  bool UseMaskForCond = false, bool UseMaskForGaps = false) {
1180  auto *VT = cast<FixedVectorType>(VecTy);
1181 
1182  unsigned NumElts = VT->getNumElements();
1183  assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
1184 
1185  unsigned NumSubElts = NumElts / Factor;
1186  auto *SubVT = FixedVectorType::get(VT->getElementType(), NumSubElts);
1187 
1188  // Firstly, the cost of load/store operation.
1189  InstructionCost Cost;
1190  if (UseMaskForCond || UseMaskForGaps)
1191  Cost = thisT()->getMaskedMemoryOpCost(Opcode, VecTy, Alignment,
1193  else
1194  Cost = thisT()->getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace,
1195  CostKind);
1196 
1197  // Legalize the vector type, and get the legalized and unlegalized type
1198  // sizes.
1199  MVT VecTyLT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
1200  unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1201  unsigned VecTyLTSize = VecTyLT.getStoreSize();
1202 
1203  // Scale the cost of the memory operation by the fraction of legalized
1204  // instructions that will actually be used. We shouldn't account for the
1205  // cost of dead instructions since they will be removed.
1206  //
1207  // E.g., An interleaved load of factor 8:
1208  // %vec = load <16 x i64>, <16 x i64>* %ptr
1209  // %v0 = shufflevector %vec, undef, <0, 8>
1210  //
1211  // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
1212  // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
1213  // type). The other loads are unused.
1214  //
1215  // We only scale the cost of loads since interleaved store groups aren't
1216  // allowed to have gaps.
1217  if (Opcode == Instruction::Load && VecTySize > VecTyLTSize) {
1218  // The number of loads of a legal type it will take to represent a load
1219  // of the unlegalized vector type.
1220  unsigned NumLegalInsts = divideCeil(VecTySize, VecTyLTSize);
1221 
1222  // The number of elements of the unlegalized type that correspond to a
1223  // single legal instruction.
1224  unsigned NumEltsPerLegalInst = divideCeil(NumElts, NumLegalInsts);
1225 
1226  // Determine which legal instructions will be used.
1227  BitVector UsedInsts(NumLegalInsts, false);
1228  for (unsigned Index : Indices)
1229  for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1230  UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);
1231 
1232  // Scale the cost of the load by the fraction of legal instructions that
1233  // will be used.
1234  Cost *= UsedInsts.count() / NumLegalInsts;
1235  }
1236 
1237  // Then plus the cost of interleave operation.
1238  if (Opcode == Instruction::Load) {
1239  // The interleave cost is similar to extract sub vectors' elements
1240  // from the wide vector, and insert them into sub vectors.
1241  //
1242  // E.g. An interleaved load of factor 2 (with one member of index 0):
1243  // %vec = load <8 x i32>, <8 x i32>* %ptr
1244  // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
1245  // The cost is estimated as extract elements at 0, 2, 4, 6 from the
1246  // <8 x i32> vector and insert them into a <4 x i32> vector.
1247 
1248  assert(Indices.size() <= Factor &&
1249  "Interleaved memory op has too many members");
1250 
1251  for (unsigned Index : Indices) {
1252  assert(Index < Factor && "Invalid index for interleaved memory op");
1253 
1254  // Extract elements from loaded vector for each sub vector.
1255  for (unsigned i = 0; i < NumSubElts; i++)
1256  Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VT,
1257  Index + i * Factor);
1258  }
1259 
1260  InstructionCost InsSubCost = 0;
1261  for (unsigned i = 0; i < NumSubElts; i++)
1262  InsSubCost +=
1263  thisT()->getVectorInstrCost(Instruction::InsertElement, SubVT, i);
1264 
1265  Cost += Indices.size() * InsSubCost;
1266  } else {
1267  // The interleave cost is extract all elements from sub vectors, and
1268  // insert them into the wide vector.
1269  //
1270  // E.g. An interleaved store of factor 2:
1271  // %v0_v1 = shuffle %v0, %v1, <0, 4, 1, 5, 2, 6, 3, 7>
1272  // store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
1273  // The cost is estimated as extract all elements from both <4 x i32>
1274  // vectors and insert into the <8 x i32> vector.
1275 
1276  InstructionCost ExtSubCost = 0;
1277  for (unsigned i = 0; i < NumSubElts; i++)
1278  ExtSubCost +=
1279  thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVT, i);
1280  Cost += ExtSubCost * Factor;
1281 
1282  for (unsigned i = 0; i < NumElts; i++)
1283  Cost += static_cast<T *>(this)
1284  ->getVectorInstrCost(Instruction::InsertElement, VT, i);
1285  }
1286 
1287  if (!UseMaskForCond)
1288  return Cost;
1289 
1290  Type *I8Type = Type::getInt8Ty(VT->getContext());
1291  auto *MaskVT = FixedVectorType::get(I8Type, NumElts);
1292  SubVT = FixedVectorType::get(I8Type, NumSubElts);
1293 
1294  // The Mask shuffling cost is extract all the elements of the Mask
1295  // and insert each of them Factor times into the wide vector:
1296  //
1297  // E.g. an interleaved group with factor 3:
1298  // %mask = icmp ult <8 x i32> %vec1, %vec2
1299  // %interleaved.mask = shufflevector <8 x i1> %mask, <8 x i1> undef,
1300  // <24 x i32> <0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7>
1301  // The cost is estimated as extract all mask elements from the <8xi1> mask
1302  // vector and insert them factor times into the <24xi1> shuffled mask
1303  // vector.
1304  for (unsigned i = 0; i < NumSubElts; i++)
1305  Cost +=
1306  thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVT, i);
1307 
1308  for (unsigned i = 0; i < NumElts; i++)
1309  Cost +=
1310  thisT()->getVectorInstrCost(Instruction::InsertElement, MaskVT, i);
1311 
1312  // The Gaps mask is invariant and created outside the loop, therefore the
1313  // cost of creating it is not accounted for here. However if we have both
1314  // a MaskForGaps and some other mask that guards the execution of the
1315  // memory access, we need to account for the cost of And-ing the two masks
1316  // inside the loop.
1317  if (UseMaskForGaps)
1318  Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1319  CostKind);
1320 
1321  return Cost;
1322  }
1323 
1324  /// Get intrinsic cost based on arguments.
1327  // Check for generically free intrinsics.
1328  if (BaseT::getIntrinsicInstrCost(ICA, CostKind) == 0)
1329  return 0;
1330 
1331  // Assume that target intrinsics are cheap.
1332  Intrinsic::ID IID = ICA.getID();
1333  if (Function::isTargetIntrinsic(IID))
1335 
1336  if (ICA.isTypeBasedOnly())
1338 
1339  Type *RetTy = ICA.getReturnType();
1340 
1341  ElementCount RetVF =
1342  (RetTy->isVectorTy() ? cast<VectorType>(RetTy)->getElementCount()
1343  : ElementCount::getFixed(1));
1344  const IntrinsicInst *I = ICA.getInst();
1346  FastMathFlags FMF = ICA.getFlags();
1347  switch (IID) {
1348  default:
1349  break;
1350 
1351  case Intrinsic::cttz:
1352  // FIXME: If necessary, this should go in target-specific overrides.
1353  if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCttz())
1355  break;
1356 
1357  case Intrinsic::ctlz:
1358  // FIXME: If necessary, this should go in target-specific overrides.
1359  if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCtlz())
1361  break;
1362 
1363  case Intrinsic::memcpy:
1364  return thisT()->getMemcpyCost(ICA.getInst());
1365 
1366  case Intrinsic::masked_scatter: {
1367  const Value *Mask = Args[3];
1368  bool VarMask = !isa<Constant>(Mask);
1369  Align Alignment = cast<ConstantInt>(Args[2])->getAlignValue();
1370  return thisT()->getGatherScatterOpCost(Instruction::Store,
1371  ICA.getArgTypes()[0], Args[1],
1372  VarMask, Alignment, CostKind, I);
1373  }
1374  case Intrinsic::masked_gather: {
1375  const Value *Mask = Args[2];
1376  bool VarMask = !isa<Constant>(Mask);
1377  Align Alignment = cast<ConstantInt>(Args[1])->getAlignValue();
1378  return thisT()->getGatherScatterOpCost(Instruction::Load, RetTy, Args[0],
1379  VarMask, Alignment, CostKind, I);
1380  }
1381  case Intrinsic::experimental_stepvector: {
1382  if (isa<ScalableVectorType>(RetTy))
1384  // The cost of materialising a constant integer vector.
1386  }
1387  case Intrinsic::experimental_vector_extract: {
1388  // FIXME: Handle case where a scalable vector is extracted from a scalable
1389  // vector
1390  if (isa<ScalableVectorType>(RetTy))
1392  unsigned Index = cast<ConstantInt>(Args[1])->getZExtValue();
1393  return thisT()->getShuffleCost(TTI::SK_ExtractSubvector,
1394  cast<VectorType>(Args[0]->getType()), None,
1395  Index, cast<VectorType>(RetTy));
1396  }
1397  case Intrinsic::experimental_vector_insert: {
1398  // FIXME: Handle case where a scalable vector is inserted into a scalable
1399  // vector
1400  if (isa<ScalableVectorType>(Args[1]->getType()))
1402  unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
1403  return thisT()->getShuffleCost(
1404  TTI::SK_InsertSubvector, cast<VectorType>(Args[0]->getType()), None,
1405  Index, cast<VectorType>(Args[1]->getType()));
1406  }
1407  case Intrinsic::experimental_vector_reverse: {
1408  return thisT()->getShuffleCost(TTI::SK_Reverse,
1409  cast<VectorType>(Args[0]->getType()), None,
1410  0, cast<VectorType>(RetTy));
1411  }
1412  case Intrinsic::experimental_vector_splice: {
1413  unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
1414  return thisT()->getShuffleCost(TTI::SK_Splice,
1415  cast<VectorType>(Args[0]->getType()), None,
1416  Index, cast<VectorType>(RetTy));
1417  }
1418  case Intrinsic::vector_reduce_add:
1419  case Intrinsic::vector_reduce_mul:
1420  case Intrinsic::vector_reduce_and:
1421  case Intrinsic::vector_reduce_or:
1422  case Intrinsic::vector_reduce_xor:
1423  case Intrinsic::vector_reduce_smax:
1424  case Intrinsic::vector_reduce_smin:
1425  case Intrinsic::vector_reduce_fmax:
1426  case Intrinsic::vector_reduce_fmin:
1427  case Intrinsic::vector_reduce_umax:
1428  case Intrinsic::vector_reduce_umin: {
1429  IntrinsicCostAttributes Attrs(IID, RetTy, Args[0]->getType(), FMF, I, 1);
1431  }
1432  case Intrinsic::vector_reduce_fadd:
1433  case Intrinsic::vector_reduce_fmul: {
1435  IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF, I, 1);
1437  }
1438  case Intrinsic::fshl:
1439  case Intrinsic::fshr: {
1440  if (isa<ScalableVectorType>(RetTy))
1442  const Value *X = Args[0];
1443  const Value *Y = Args[1];
1444  const Value *Z = Args[2];
1445  TTI::OperandValueProperties OpPropsX, OpPropsY, OpPropsZ, OpPropsBW;
1446  TTI::OperandValueKind OpKindX = TTI::getOperandInfo(X, OpPropsX);
1447  TTI::OperandValueKind OpKindY = TTI::getOperandInfo(Y, OpPropsY);
1448  TTI::OperandValueKind OpKindZ = TTI::getOperandInfo(Z, OpPropsZ);
1450  OpPropsBW = isPowerOf2_32(RetTy->getScalarSizeInBits()) ? TTI::OP_PowerOf2
1451  : TTI::OP_None;
1452  // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
1453  // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
1454  InstructionCost Cost = 0;
1455  Cost +=
1456  thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
1457  Cost +=
1458  thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);
1459  Cost += thisT()->getArithmeticInstrCost(
1460  BinaryOperator::Shl, RetTy, CostKind, OpKindX, OpKindZ, OpPropsX);
1461  Cost += thisT()->getArithmeticInstrCost(
1462  BinaryOperator::LShr, RetTy, CostKind, OpKindY, OpKindZ, OpPropsY);
1463  // Non-constant shift amounts requires a modulo.
1464  if (OpKindZ != TTI::OK_UniformConstantValue &&
1466  Cost += thisT()->getArithmeticInstrCost(BinaryOperator::URem, RetTy,
1467  CostKind, OpKindZ, OpKindBW,
1468  OpPropsZ, OpPropsBW);
1469  // For non-rotates (X != Y) we must add shift-by-zero handling costs.
1470  if (X != Y) {
1471  Type *CondTy = RetTy->getWithNewBitWidth(1);
1472  Cost +=
1473  thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
1475  Cost +=
1476  thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
1478  }
1479  return Cost;
1480  }
1481  }
1482 
1483  // Assume that we need to scalarize this intrinsic.
1484  // Compute the scalarization overhead based on Args for a vector
1485  // intrinsic.
1486  InstructionCost ScalarizationCost = InstructionCost::getInvalid();
1487  if (RetVF.isVector() && !RetVF.isScalable()) {
1488  ScalarizationCost = 0;
1489  if (!RetTy->isVoidTy())
1490  ScalarizationCost +=
1491  getScalarizationOverhead(cast<VectorType>(RetTy), true, false);
1492  ScalarizationCost +=
1494  }
1495 
1496  IntrinsicCostAttributes Attrs(IID, RetTy, ICA.getArgTypes(), FMF, I,
1497  ScalarizationCost);
1498  return thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
1499  }
1500 
1501  /// Get intrinsic cost based on argument types.
1502  /// If ScalarizationCostPassed is std::numeric_limits<unsigned>::max(), the
1503  /// cost of scalarizing the arguments and the return value will be computed
1504  /// based on types.
1508  Intrinsic::ID IID = ICA.getID();
1509  Type *RetTy = ICA.getReturnType();
1510  const SmallVectorImpl<Type *> &Tys = ICA.getArgTypes();
1511  FastMathFlags FMF = ICA.getFlags();
1512  InstructionCost ScalarizationCostPassed = ICA.getScalarizationCost();
1513  bool SkipScalarizationCost = ICA.skipScalarizationCost();
1514 
1515  VectorType *VecOpTy = nullptr;
1516  if (!Tys.empty()) {
1517  // The vector reduction operand is operand 0 except for fadd/fmul.
1518  // Their operand 0 is a scalar start value, so the vector op is operand 1.
1519  unsigned VecTyIndex = 0;
1520  if (IID == Intrinsic::vector_reduce_fadd ||
1521  IID == Intrinsic::vector_reduce_fmul)
1522  VecTyIndex = 1;
1523  assert(Tys.size() > VecTyIndex && "Unexpected IntrinsicCostAttributes");
1524  VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
1525  }
1526 
1527  // Library call cost - other than size, make it expensive.
1528  unsigned SingleCallCost = CostKind == TTI::TCK_CodeSize ? 1 : 10;
1530  switch (IID) {
1531  default: {
1532  // Scalable vectors cannot be scalarized, so return Invalid.
1533  if (isa<ScalableVectorType>(RetTy) || any_of(Tys, [](const Type *Ty) {
1534  return isa<ScalableVectorType>(Ty);
1535  }))
1536  return InstructionCost::getInvalid();
1537 
1538  // Assume that we need to scalarize this intrinsic.
1539  InstructionCost ScalarizationCost =
1540  SkipScalarizationCost ? ScalarizationCostPassed : 0;
1541  unsigned ScalarCalls = 1;
1542  Type *ScalarRetTy = RetTy;
1543  if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
1544  if (!SkipScalarizationCost)
1545  ScalarizationCost = getScalarizationOverhead(RetVTy, true, false);
1546  ScalarCalls = std::max(ScalarCalls,
1547  cast<FixedVectorType>(RetVTy)->getNumElements());
1548  ScalarRetTy = RetTy->getScalarType();
1549  }
1550  SmallVector<Type *, 4> ScalarTys;
1551  for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
1552  Type *Ty = Tys[i];
1553  if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1554  if (!SkipScalarizationCost)
1555  ScalarizationCost += getScalarizationOverhead(VTy, false, true);
1556  ScalarCalls = std::max(ScalarCalls,
1557  cast<FixedVectorType>(VTy)->getNumElements());
1558  Ty = Ty->getScalarType();
1559  }
1560  ScalarTys.push_back(Ty);
1561  }
1562  if (ScalarCalls == 1)
1563  return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
1564 
1565  IntrinsicCostAttributes ScalarAttrs(IID, ScalarRetTy, ScalarTys, FMF);
1566  InstructionCost ScalarCost =
1567  thisT()->getIntrinsicInstrCost(ScalarAttrs, CostKind);
1568 
1569  return ScalarCalls * ScalarCost + ScalarizationCost;
1570  }
1571  // Look for intrinsics that can be lowered directly or turned into a scalar
1572  // intrinsic call.
1573  case Intrinsic::sqrt:
1574  ISDs.push_back(ISD::FSQRT);
1575  break;
1576  case Intrinsic::sin:
1577  ISDs.push_back(ISD::FSIN);
1578  break;
1579  case Intrinsic::cos:
1580  ISDs.push_back(ISD::FCOS);
1581  break;
1582  case Intrinsic::exp:
1583  ISDs.push_back(ISD::FEXP);
1584  break;
1585  case Intrinsic::exp2:
1586  ISDs.push_back(ISD::FEXP2);
1587  break;
1588  case Intrinsic::log:
1589  ISDs.push_back(ISD::FLOG);
1590  break;
1591  case Intrinsic::log10:
1592  ISDs.push_back(ISD::FLOG10);
1593  break;
1594  case Intrinsic::log2:
1595  ISDs.push_back(ISD::FLOG2);
1596  break;
1597  case Intrinsic::fabs:
1598  ISDs.push_back(ISD::FABS);
1599  break;
1600  case Intrinsic::canonicalize:
1601  ISDs.push_back(ISD::FCANONICALIZE);
1602  break;
1603  case Intrinsic::minnum:
1604  ISDs.push_back(ISD::FMINNUM);
1605  break;
1606  case Intrinsic::maxnum:
1607  ISDs.push_back(ISD::FMAXNUM);
1608  break;
1609  case Intrinsic::minimum:
1610  ISDs.push_back(ISD::FMINIMUM);
1611  break;
1612  case Intrinsic::maximum:
1613  ISDs.push_back(ISD::FMAXIMUM);
1614  break;
1615  case Intrinsic::copysign:
1616  ISDs.push_back(ISD::FCOPYSIGN);
1617  break;
1618  case Intrinsic::floor:
1619  ISDs.push_back(ISD::FFLOOR);
1620  break;
1621  case Intrinsic::ceil:
1622  ISDs.push_back(ISD::FCEIL);
1623  break;
1624  case Intrinsic::trunc:
1625  ISDs.push_back(ISD::FTRUNC);
1626  break;
1627  case Intrinsic::nearbyint:
1628  ISDs.push_back(ISD::FNEARBYINT);
1629  break;
1630  case Intrinsic::rint:
1631  ISDs.push_back(ISD::FRINT);
1632  break;
1633  case Intrinsic::round:
1634  ISDs.push_back(ISD::FROUND);
1635  break;
1636  case Intrinsic::roundeven:
1637  ISDs.push_back(ISD::FROUNDEVEN);
1638  break;
1639  case Intrinsic::pow:
1640  ISDs.push_back(ISD::FPOW);
1641  break;
1642  case Intrinsic::fma:
1643  ISDs.push_back(ISD::FMA);
1644  break;
1645  case Intrinsic::fmuladd:
1646  ISDs.push_back(ISD::FMA);
1647  break;
1648  case Intrinsic::experimental_constrained_fmuladd:
1649  ISDs.push_back(ISD::STRICT_FMA);
1650  break;
1651  // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
1652  case Intrinsic::lifetime_start:
1653  case Intrinsic::lifetime_end:
1654  case Intrinsic::sideeffect:
1655  case Intrinsic::pseudoprobe:
1656  case Intrinsic::arithmetic_fence:
1657  return 0;
1658  case Intrinsic::masked_store: {
1659  Type *Ty = Tys[0];
1660  Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
1661  return thisT()->getMaskedMemoryOpCost(Instruction::Store, Ty, TyAlign, 0,
1662  CostKind);
1663  }
1664  case Intrinsic::masked_load: {
1665  Type *Ty = RetTy;
1666  Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
1667  return thisT()->getMaskedMemoryOpCost(Instruction::Load, Ty, TyAlign, 0,
1668  CostKind);
1669  }
1670  case Intrinsic::vector_reduce_add:
1671  return thisT()->getArithmeticReductionCost(Instruction::Add, VecOpTy,
1672  None, CostKind);
1673  case Intrinsic::vector_reduce_mul:
1674  return thisT()->getArithmeticReductionCost(Instruction::Mul, VecOpTy,
1675  None, CostKind);
1676  case Intrinsic::vector_reduce_and:
1677  return thisT()->getArithmeticReductionCost(Instruction::And, VecOpTy,
1678  None, CostKind);
1679  case Intrinsic::vector_reduce_or:
1680  return thisT()->getArithmeticReductionCost(Instruction::Or, VecOpTy, None,
1681  CostKind);
1682  case Intrinsic::vector_reduce_xor:
1683  return thisT()->getArithmeticReductionCost(Instruction::Xor, VecOpTy,
1684  None, CostKind);
1685  case Intrinsic::vector_reduce_fadd:
1686  return thisT()->getArithmeticReductionCost(Instruction::FAdd, VecOpTy,
1687  FMF, CostKind);
1688  case Intrinsic::vector_reduce_fmul:
1689  return thisT()->getArithmeticReductionCost(Instruction::FMul, VecOpTy,
1690  FMF, CostKind);
1691  case Intrinsic::vector_reduce_smax:
1692  case Intrinsic::vector_reduce_smin:
1693  case Intrinsic::vector_reduce_fmax:
1694  case Intrinsic::vector_reduce_fmin:
1695  return thisT()->getMinMaxReductionCost(
1696  VecOpTy, cast<VectorType>(CmpInst::makeCmpResultType(VecOpTy)),
1697  /*IsUnsigned=*/false, CostKind);
1698  case Intrinsic::vector_reduce_umax:
1699  case Intrinsic::vector_reduce_umin:
1700  return thisT()->getMinMaxReductionCost(
1701  VecOpTy, cast<VectorType>(CmpInst::makeCmpResultType(VecOpTy)),
1702  /*IsUnsigned=*/true, CostKind);
1703  case Intrinsic::abs:
1704  case Intrinsic::smax:
1705  case Intrinsic::smin:
1706  case Intrinsic::umax:
1707  case Intrinsic::umin: {
1708  // abs(X) = select(icmp(X,0),X,sub(0,X))
1709  // minmax(X,Y) = select(icmp(X,Y),X,Y)
1710  Type *CondTy = RetTy->getWithNewBitWidth(1);
1711  InstructionCost Cost = 0;
1712  // TODO: Ideally getCmpSelInstrCost would accept an icmp condition code.
1713  Cost +=
1714  thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
1716  Cost +=
1717  thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
1719  // TODO: Should we add an OperandValueProperties::OP_Zero property?
1720  if (IID == Intrinsic::abs)
1721  Cost += thisT()->getArithmeticInstrCost(
1722  BinaryOperator::Sub, RetTy, CostKind, TTI::OK_UniformConstantValue);
1723  return Cost;
1724  }
1725  case Intrinsic::sadd_sat:
1726  case Intrinsic::ssub_sat: {
1727  Type *CondTy = RetTy->getWithNewBitWidth(1);
1728 
1729  Type *OpTy = StructType::create({RetTy, CondTy});
1730  Intrinsic::ID OverflowOp = IID == Intrinsic::sadd_sat
1731  ? Intrinsic::sadd_with_overflow
1732  : Intrinsic::ssub_with_overflow;
1733 
1734  // SatMax -> Overflow && SumDiff < 0
1735  // SatMin -> Overflow && SumDiff >= 0
1736  InstructionCost Cost = 0;
1737  IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
1738  nullptr, ScalarizationCostPassed);
1739  Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
1740  Cost +=
1741  thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
1743  Cost += 2 * thisT()->getCmpSelInstrCost(
1744  BinaryOperator::Select, RetTy, CondTy,
1746  return Cost;
1747  }
1748  case Intrinsic::uadd_sat:
1749  case Intrinsic::usub_sat: {
1750  Type *CondTy = RetTy->getWithNewBitWidth(1);
1751 
1752  Type *OpTy = StructType::create({RetTy, CondTy});
1753  Intrinsic::ID OverflowOp = IID == Intrinsic::uadd_sat
1754  ? Intrinsic::uadd_with_overflow
1755  : Intrinsic::usub_with_overflow;
1756 
1757  InstructionCost Cost = 0;
1758  IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
1759  nullptr, ScalarizationCostPassed);
1760  Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
1761  Cost +=
1762  thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
1764  return Cost;
1765  }
1766  case Intrinsic::smul_fix:
1767  case Intrinsic::umul_fix: {
1768  unsigned ExtSize = RetTy->getScalarSizeInBits() * 2;
1769  Type *ExtTy = RetTy->getWithNewBitWidth(ExtSize);
1770 
1771  unsigned ExtOp =
1772  IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
1774 
1775  InstructionCost Cost = 0;
1776  Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH, CostKind);
1777  Cost +=
1778  thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
1779  Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
1780  CCH, CostKind);
1781  Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, RetTy,
1784  Cost += thisT()->getArithmeticInstrCost(Instruction::Shl, RetTy, CostKind,
1787  Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy, CostKind);
1788  return Cost;
1789  }
1790  case Intrinsic::sadd_with_overflow:
1791  case Intrinsic::ssub_with_overflow: {
1792  Type *SumTy = RetTy->getContainedType(0);
1793  Type *OverflowTy = RetTy->getContainedType(1);
1794  unsigned Opcode = IID == Intrinsic::sadd_with_overflow
1796  : BinaryOperator::Sub;
1797 
1798  // LHSSign -> LHS >= 0
1799  // RHSSign -> RHS >= 0
1800  // SumSign -> Sum >= 0
1801  //
1802  // Add:
1803  // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign)
1804  // Sub:
1805  // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign)
1806  InstructionCost Cost = 0;
1807  Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
1808  Cost += 3 * thisT()->getCmpSelInstrCost(
1809  Instruction::ICmp, SumTy, OverflowTy,
1811  Cost += 2 * thisT()->getCmpSelInstrCost(
1812  Instruction::Select, OverflowTy, OverflowTy,
1814  Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, OverflowTy,
1815  CostKind);
1816  return Cost;
1817  }
1818  case Intrinsic::uadd_with_overflow:
1819  case Intrinsic::usub_with_overflow: {
1820  Type *SumTy = RetTy->getContainedType(0);
1821  Type *OverflowTy = RetTy->getContainedType(1);
1822  unsigned Opcode = IID == Intrinsic::uadd_with_overflow
1824  : BinaryOperator::Sub;
1825 
1826  InstructionCost Cost = 0;
1827  Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
1828  Cost +=
1829  thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy, OverflowTy,
1831  return Cost;
1832  }
1833  case Intrinsic::smul_with_overflow:
1834  case Intrinsic::umul_with_overflow: {
1835  Type *MulTy = RetTy->getContainedType(0);
1836  Type *OverflowTy = RetTy->getContainedType(1);
1837  unsigned ExtSize = MulTy->getScalarSizeInBits() * 2;
1838  Type *ExtTy = MulTy->getWithNewBitWidth(ExtSize);
1839 
1840  unsigned ExtOp =
1841  IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
1843 
1844  InstructionCost Cost = 0;
1845  Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH, CostKind);
1846  Cost +=
1847  thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
1848  Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
1849  CCH, CostKind);
1850  Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, MulTy,
1853 
1854  if (IID == Intrinsic::smul_with_overflow)
1855  Cost += thisT()->getArithmeticInstrCost(Instruction::AShr, MulTy,
1858 
1859  Cost +=
1860  thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, MulTy, OverflowTy,
1862  return Cost;
1863  }
1864  case Intrinsic::ctpop:
1865  ISDs.push_back(ISD::CTPOP);
1866  // In case of legalization use TCC_Expensive. This is cheaper than a
1867  // library call but still not a cheap instruction.
1868  SingleCallCost = TargetTransformInfo::TCC_Expensive;
1869  break;
1870  case Intrinsic::ctlz:
1871  ISDs.push_back(ISD::CTLZ);
1872  break;
1873  case Intrinsic::cttz:
1874  ISDs.push_back(ISD::CTTZ);
1875  break;
1876  case Intrinsic::bswap:
1877  ISDs.push_back(ISD::BSWAP);
1878  break;
1879  case Intrinsic::bitreverse:
1880  ISDs.push_back(ISD::BITREVERSE);
1881  break;
1882  }
1883 
1884  const TargetLoweringBase *TLI = getTLI();
1885  std::pair<InstructionCost, MVT> LT =
1886  TLI->getTypeLegalizationCost(DL, RetTy);
1887 
1890  for (unsigned ISD : ISDs) {
1891  if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
1892  if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
1893  TLI->isFAbsFree(LT.second)) {
1894  return 0;
1895  }
1896 
1897  // The operation is legal. Assume it costs 1.
1898  // If the type is split to multiple registers, assume that there is some
1899  // overhead to this.
1900  // TODO: Once we have extract/insert subvector cost we need to use them.
1901  if (LT.first > 1)
1902  LegalCost.push_back(LT.first * 2);
1903  else
1904  LegalCost.push_back(LT.first * 1);
1905  } else if (!TLI->isOperationExpand(ISD, LT.second)) {
1906  // If the operation is custom lowered then assume
1907  // that the code is twice as expensive.
1908  CustomCost.push_back(LT.first * 2);
1909  }
1910  }
1911 
1912  auto *MinLegalCostI = std::min_element(LegalCost.begin(), LegalCost.end());
1913  if (MinLegalCostI != LegalCost.end())
1914  return *MinLegalCostI;
1915 
1916  auto MinCustomCostI =
1917  std::min_element(CustomCost.begin(), CustomCost.end());
1918  if (MinCustomCostI != CustomCost.end())
1919  return *MinCustomCostI;
1920 
1921  // If we can't lower fmuladd into an FMA estimate the cost as a floating
1922  // point mul followed by an add.
1923  if (IID == Intrinsic::fmuladd)
1924  return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
1925  CostKind) +
1926  thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
1927  CostKind);
1928  if (IID == Intrinsic::experimental_constrained_fmuladd) {
1929  IntrinsicCostAttributes FMulAttrs(
1930  Intrinsic::experimental_constrained_fmul, RetTy, Tys);
1931  IntrinsicCostAttributes FAddAttrs(
1932  Intrinsic::experimental_constrained_fadd, RetTy, Tys);
1933  return thisT()->getIntrinsicInstrCost(FMulAttrs, CostKind) +
1934  thisT()->getIntrinsicInstrCost(FAddAttrs, CostKind);
1935  }
1936 
1937  // Else, assume that we need to scalarize this intrinsic. For math builtins
1938  // this will emit a costly libcall, adding call overhead and spills. Make it
1939  // very expensive.
1940  if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
1941  // Scalable vectors cannot be scalarized, so return Invalid.
1942  if (isa<ScalableVectorType>(RetTy) || any_of(Tys, [](const Type *Ty) {
1943  return isa<ScalableVectorType>(Ty);
1944  }))
1945  return InstructionCost::getInvalid();
1946 
1947  InstructionCost ScalarizationCost =
1948  SkipScalarizationCost ? ScalarizationCostPassed
1949  : getScalarizationOverhead(RetVTy, true, false);
1950 
1951  unsigned ScalarCalls = cast<FixedVectorType>(RetVTy)->getNumElements();
1952  SmallVector<Type *, 4> ScalarTys;
1953  for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
1954  Type *Ty = Tys[i];
1955  if (Ty->isVectorTy())
1956  Ty = Ty->getScalarType();
1957  ScalarTys.push_back(Ty);
1958  }
1959  IntrinsicCostAttributes Attrs(IID, RetTy->getScalarType(), ScalarTys, FMF);
1960  InstructionCost ScalarCost =
1961  thisT()->getIntrinsicInstrCost(Attrs, CostKind);
1962  for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
1963  if (auto *VTy = dyn_cast<VectorType>(Tys[i])) {
1964  if (!ICA.skipScalarizationCost())
1965  ScalarizationCost += getScalarizationOverhead(VTy, false, true);
1966  ScalarCalls = std::max(ScalarCalls,
1967  cast<FixedVectorType>(VTy)->getNumElements());
1968  }
1969  }
1970  return ScalarCalls * ScalarCost + ScalarizationCost;
1971  }
1972 
1973  // This is going to be turned into a library call, make it expensive.
1974  return SingleCallCost;
1975  }
1976 
1977  /// Compute a cost of the given call instruction.
1978  ///
1979  /// Compute the cost of calling function F with return type RetTy and
1980  /// argument types Tys. F might be nullptr, in this case the cost of an
1981  /// arbitrary call with the specified signature will be returned.
1982  /// This is used, for instance, when we estimate call of a vector
1983  /// counterpart of the given function.
1984  /// \param F Called function, might be nullptr.
1985  /// \param RetTy Return value types.
1986  /// \param Tys Argument types.
1987  /// \returns The cost of Call instruction.
1991  return 10;
1992  }
1993 
1994  unsigned getNumberOfParts(Type *Tp) {
1995  std::pair<InstructionCost, MVT> LT =
1996  getTLI()->getTypeLegalizationCost(DL, Tp);
1997  return *LT.first.getValue();
1998  }
1999 
2001  const SCEV *) {
2002  return 0;
2003  }
2004 
2005  /// Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
2006  /// We're assuming that reduction operation are performing the following way:
2007  ///
2008  /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
2009  /// <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
2010  /// \----------------v-------------/ \----------v------------/
2011  /// n/2 elements n/2 elements
2012  /// %red1 = op <n x t> %val, <n x t> val1
2013  /// After this operation we have a vector %red1 where only the first n/2
2014  /// elements are meaningful, the second n/2 elements are undefined and can be
2015  /// dropped. All other operations are actually working with the vector of
2016  /// length n/2, not n, though the real vector length is still n.
2017  /// %val2 = shufflevector<n x t> %red1, <n x t> %undef,
2018  /// <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
2019  /// \----------------v-------------/ \----------v------------/
2020  /// n/4 elements 3*n/4 elements
2021  /// %red2 = op <n x t> %red1, <n x t> val2 - working with the vector of
2022  /// length n/2, the resulting vector has length n/4 etc.
2023  ///
2024  /// The cost model should take into account that the actual length of the
2025  /// vector is reduced on each iteration.
2028  Type *ScalarTy = Ty->getElementType();
2029  unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
2030  if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
2031  ScalarTy == IntegerType::getInt1Ty(Ty->getContext()) &&
2032  NumVecElts >= 2) {
2033  // Or reduction for i1 is represented as:
2034  // %val = bitcast <ReduxWidth x i1> to iReduxWidth
2035  // %res = cmp ne iReduxWidth %val, 0
2036  // And reduction for i1 is represented as:
2037  // %val = bitcast <ReduxWidth x i1> to iReduxWidth
2038  // %res = cmp eq iReduxWidth %val, 11111
2039  Type *ValTy = IntegerType::get(Ty->getContext(), NumVecElts);
2040  return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
2042  thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
2045  }
2046  unsigned NumReduxLevels = Log2_32(NumVecElts);
2047  InstructionCost ArithCost = 0;
2048  InstructionCost ShuffleCost = 0;
2049  std::pair<InstructionCost, MVT> LT =
2050  thisT()->getTLI()->getTypeLegalizationCost(DL, Ty);
2051  unsigned LongVectorCount = 0;
2052  unsigned MVTLen =
2053  LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
2054  while (NumVecElts > MVTLen) {
2055  NumVecElts /= 2;
2056  VectorType *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
2057  ShuffleCost += thisT()->getShuffleCost(TTI::SK_ExtractSubvector, Ty, None,
2058  NumVecElts, SubTy);
2059  ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy, CostKind);
2060  Ty = SubTy;
2061  ++LongVectorCount;
2062  }
2063 
2064  NumReduxLevels -= LongVectorCount;
2065 
2066  // The minimal length of the vector is limited by the real length of vector
2067  // operations performed on the current platform. That's why several final
2068  // reduction operations are performed on the vectors with the same
2069  // architecture-dependent length.
2070 
2071  // By default reductions need one shuffle per reduction level.
2072  ShuffleCost += NumReduxLevels * thisT()->getShuffleCost(
2073  TTI::SK_PermuteSingleSrc, Ty, None, 0, Ty);
2074  ArithCost += NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty);
2075  return ShuffleCost + ArithCost +
2076  thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
2077  }
2078 
2079  /// Try to calculate the cost of performing strict (in-order) reductions,
2080  /// which involves doing a sequence of floating point additions in lane
2081  /// order, starting with an initial value. For example, consider a scalar
2082  /// initial value 'InitVal' of type float and a vector of type <4 x float>:
2083  ///
2084  /// Vector = <float %v0, float %v1, float %v2, float %v3>
2085  ///
2086  /// %add1 = %InitVal + %v0
2087  /// %add2 = %add1 + %v1
2088  /// %add3 = %add2 + %v2
2089  /// %add4 = %add3 + %v3
2090  ///
2091  /// As a simple estimate we can say the cost of such a reduction is 4 times
2092  /// the cost of a scalar FP addition. We can only estimate the costs for
2093  /// fixed-width vectors here because for scalable vectors we do not know the
2094  /// runtime number of operations.
2097  // Targets must implement a default value for the scalable case, since
2098  // we don't know how many lanes the vector has.
2099  if (isa<ScalableVectorType>(Ty))
2100  return InstructionCost::getInvalid();
2101 
2102  auto *VTy = cast<FixedVectorType>(Ty);
2103  InstructionCost ExtractCost =
2104  getScalarizationOverhead(VTy, /*Insert=*/false, /*Extract=*/true);
2105  InstructionCost ArithCost = thisT()->getArithmeticInstrCost(
2106  Opcode, VTy->getElementType(), CostKind);
2107  ArithCost *= VTy->getNumElements();
2108 
2109  return ExtractCost + ArithCost;
2110  }
2111 
2116  return getOrderedReductionCost(Opcode, Ty, CostKind);
2117  return getTreeReductionCost(Opcode, Ty, CostKind);
2118  }
2119 
2120  /// Try to calculate op costs for min/max reduction operations.
2121  /// \param CondTy Conditional type for the Select instruction.
2123  bool IsUnsigned,
2125  Type *ScalarTy = Ty->getElementType();
2126  Type *ScalarCondTy = CondTy->getElementType();
2127  unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
2128  unsigned NumReduxLevels = Log2_32(NumVecElts);
2129  unsigned CmpOpcode;
2130  if (Ty->isFPOrFPVectorTy()) {
2131  CmpOpcode = Instruction::FCmp;
2132  } else {
2133  assert(Ty->isIntOrIntVectorTy() &&
2134  "expecting floating point or integer type for min/max reduction");
2135  CmpOpcode = Instruction::ICmp;
2136  }
2137  InstructionCost MinMaxCost = 0;
2138  InstructionCost ShuffleCost = 0;
2139  std::pair<InstructionCost, MVT> LT =
2140  thisT()->getTLI()->getTypeLegalizationCost(DL, Ty);
2141  unsigned LongVectorCount = 0;
2142  unsigned MVTLen =
2143  LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
2144  while (NumVecElts > MVTLen) {
2145  NumVecElts /= 2;
2146  auto *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
2147  CondTy = FixedVectorType::get(ScalarCondTy, NumVecElts);
2148 
2149  ShuffleCost += thisT()->getShuffleCost(TTI::SK_ExtractSubvector, Ty, None,
2150  NumVecElts, SubTy);
2151  MinMaxCost +=
2152  thisT()->getCmpSelInstrCost(CmpOpcode, SubTy, CondTy,
2154  thisT()->getCmpSelInstrCost(Instruction::Select, SubTy, CondTy,
2156  Ty = SubTy;
2157  ++LongVectorCount;
2158  }
2159 
2160  NumReduxLevels -= LongVectorCount;
2161 
2162  // The minimal length of the vector is limited by the real length of vector
2163  // operations performed on the current platform. That's why several final
2164  // reduction opertions are perfomed on the vectors with the same
2165  // architecture-dependent length.
2166  ShuffleCost += NumReduxLevels * thisT()->getShuffleCost(
2167  TTI::SK_PermuteSingleSrc, Ty, None, 0, Ty);
2168  MinMaxCost +=
2169  NumReduxLevels *
2170  (thisT()->getCmpSelInstrCost(CmpOpcode, Ty, CondTy,
2172  thisT()->getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
2174  // The last min/max should be in vector registers and we counted it above.
2175  // So just need a single extractelement.
2176  return ShuffleCost + MinMaxCost +
2177  thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
2178  }
2179 
2180  InstructionCost getExtendedAddReductionCost(bool IsMLA, bool IsUnsigned,
2181  Type *ResTy, VectorType *Ty,
2183  // Without any native support, this is equivalent to the cost of
2184  // vecreduce.add(ext) or if IsMLA vecreduce.add(mul(ext, ext))
2185  VectorType *ExtTy = VectorType::get(ResTy, Ty);
2186  InstructionCost RedCost = thisT()->getArithmeticReductionCost(
2187  Instruction::Add, ExtTy, None, CostKind);
2188  InstructionCost MulCost = 0;
2189  InstructionCost ExtCost = thisT()->getCastInstrCost(
2190  IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
2192  if (IsMLA) {
2193  MulCost =
2194  thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2195  ExtCost *= 2;
2196  }
2197 
2198  return RedCost + MulCost + ExtCost;
2199  }
2200 
2202 
2203  /// @}
2204 };
2205 
2206 /// Concrete BasicTTIImpl that can be used if no further customization
2207 /// is needed.
2208 class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
2210 
2212 
2213  const TargetSubtargetInfo *ST;
2214  const TargetLoweringBase *TLI;
2215 
2216  const TargetSubtargetInfo *getST() const { return ST; }
2217  const TargetLoweringBase *getTLI() const { return TLI; }
2218 
2219 public:
2220  explicit BasicTTIImpl(const TargetMachine *TM, const Function &F);
2221 };
2222 
2223 } // end namespace llvm
2224 
2225 #endif // LLVM_CODEGEN_BASICTTIIMPL_H
llvm::ShuffleVectorInst::isZeroEltSplatMask
static bool isZeroEltSplatMask(ArrayRef< int > Mask)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
Definition: Instructions.cpp:2174
llvm::MCSubtargetInfo::enableWritePrefetching
virtual bool enableWritePrefetching() const
Definition: MCSubtargetInfo.cpp:359
llvm::ISD::FROUNDEVEN
@ FROUNDEVEN
Definition: ISDOpcodes.h:884
i
i
Definition: README.txt:29
llvm::InstructionCost
Definition: InstructionCost.h:29
llvm::BasicTTIImplBase::getVectorSplitCost
InstructionCost getVectorSplitCost()
Definition: BasicTTIImpl.h:2201
llvm::BasicTTIImplBase::getFPOpCost
InstructionCost getFPOpCost(Type *Ty)
Definition: BasicTTIImpl.h:472
ValueTypes.h
llvm::TargetTransformInfo::UnrollingPreferences::BEInsns
unsigned BEInsns
Definition: TargetTransformInfo.h:474
llvm::TargetTransformInfo::UnrollingPreferences::PartialOptSizeThreshold
unsigned PartialOptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size, like OptSizeThreshold,...
Definition: TargetTransformInfo.h:453
llvm::TargetTransformInfo::SK_Select
@ SK_Select
Selects elements from the corresponding lane of either source operand.
Definition: TargetTransformInfo.h:855
Attrs
Function Attrs
Definition: README_ALTIVEC.txt:215
llvm::TargetTransformInfo::UnrollingPreferences::Runtime
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
Definition: TargetTransformInfo.h:481
llvm::TargetTransformInfo::TargetCostKind
TargetCostKind
The kind of cost model.
Definition: TargetTransformInfo.h:211
llvm::MVT::getStoreSize
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition: MachineValueType.h:1072
llvm::BasicTTIImplBase::getOperandsScalarizationOverhead
InstructionCost getOperandsScalarizationOverhead(ArrayRef< const Value * > Args, ArrayRef< Type * > Tys)
Estimate the overhead of scalarizing an instructions unique non-constant operands.
Definition: BasicTTIImpl.h:707
llvm::TargetTransformInfoImplBase::isHardwareLoopProfitable
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const
Definition: TargetTransformInfoImpl.h:152
llvm::TargetTransformInfo::TCC_Expensive
@ TCC_Expensive
The cost of a 'div' instruction on x86.
Definition: TargetTransformInfo.h:264
llvm::ISD::MemIndexedMode
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:1304
llvm::BasicTTIImplBase::shouldBuildRelLookupTables
bool shouldBuildRelLookupTables() const
Definition: BasicTTIImpl.h:436
MathExtras.h
llvm::TargetTransformInfo::UnrollingPreferences::PartialThreshold
unsigned PartialThreshold
The cost threshold for the unrolled loop, like Threshold, but used for partial/runtime unrolling (set...
Definition: TargetTransformInfo.h:449
llvm
---------------------— PointerInfo ------------------------------------—
Definition: AllocatorList.h:23
llvm::TargetLoweringBase
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
Definition: TargetLowering.h:191
llvm::Type::getInt1Ty
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:200
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::BasicTTIImplBase::getCacheAssociativity
virtual Optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const
Definition: BasicTTIImpl.h:623
llvm::TargetLoweringBase::Legal
@ Legal
Definition: TargetLowering.h:196
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:112
llvm::TargetTransformInfoImplBase::preferPredicateOverEpilogue
bool preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *TLI, DominatorTree *DT, const LoopAccessInfo *LAI) const
Definition: TargetTransformInfoImpl.h:158
llvm::BasicTTIImplBase::getInliningThresholdMultiplier
unsigned getInliningThresholdMultiplier()
Definition: BasicTTIImpl.h:482
llvm::BasicTTIImplBase::isTruncateFree
bool isTruncateFree(Type *Ty1, Type *Ty2)
Definition: BasicTTIImpl.h:345
llvm::BasicTTIImplBase::isAlwaysUniform
bool isAlwaysUniform(const Value *V)
Definition: BasicTTIImpl.h:266
llvm::TargetTransformInfo::MemIndexedMode
MemIndexedMode
The type of load/store indexing.
Definition: TargetTransformInfo.h:1279
Insert
Vector Rotate Left Mask Mask Insert
Definition: README_P9.txt:112
llvm::CmpInst::Predicate
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:720
llvm::ISD::BR_JT
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:946
llvm::Type::isPointerTy
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:228
ceil
We have fiadd patterns now but the followings have the same cost and complexity We need a way to specify the later is more profitable def def The FP stackifier should handle simple permutates to reduce number of shuffle e g ceil
Definition: README-FPStack.txt:54
llvm::ElementCount
Definition: TypeSize.h:386
llvm::BasicTTIImplBase::isLegalICmpImmediate
bool isLegalICmpImmediate(int64_t imm)
Definition: BasicTTIImpl.h:295
llvm::ISD::FMINNUM
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:898
llvm::BasicTTIImplBase::isNoopAddrSpaceCast
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Definition: BasicTTIImpl.h:278
llvm::MCSubtargetInfo::getSchedModel
const MCSchedModel & getSchedModel() const
Get the machine model for this subtarget's CPU.
Definition: MCSubtargetInfo.h:162
llvm::Function
Definition: Function.h:61
llvm::Loop
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:530
llvm::ISD::PRE_DEC
@ PRE_DEC
Definition: ISDOpcodes.h:1304
llvm::BitVector::set
BitVector & set()
Definition: BitVector.h:343
llvm::ISD::BSWAP
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:666
llvm::ISD::UDIV
@ UDIV
Definition: ISDOpcodes.h:243
llvm::BasicTTIImplBase::getCFInstrCost
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
Definition: BasicTTIImpl.h:1055
llvm::BasicTTIImplBase::isTypeLegal
bool isTypeLegal(Type *Ty)
Definition: BasicTTIImpl.h:355
llvm::PointerType::get
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Definition: Type.cpp:693
minimum
Should compile r2 movcc movcs str strb mov lr r1 movcs movcc mov lr r1 str mov mov cmp r1 movlo r2 str bx lr r0 mov mov cmp r0 movhs r2 mov r1 bx lr Some of the NEON intrinsics may be appropriate for more general either as target independent intrinsics or perhaps elsewhere in the ARM backend Some of them may also be lowered to target independent and perhaps some new SDNodes could be added For minimum
Definition: README.txt:489
C1
instcombine should handle this C2 when C1
Definition: README.txt:263
llvm::Type::getScalarType
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:319
llvm::CodeModel::Medium
@ Medium
Definition: CodeGen.h:28
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::TargetTransformInfo::PeelingPreferences::AllowPeeling
bool AllowPeeling
Allow peeling off loop iterations.
Definition: TargetTransformInfo.h:537
llvm::TargetTransformInfoImplBase::isLSRCostLess
bool isLSRCostLess(TTI::LSRCost &C1, TTI::LSRCost &C2) const
Definition: TargetTransformInfoImpl.h:208
llvm::BasicTTIImplBase::instCombineIntrinsic
Optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II)
Definition: BasicTTIImpl.h:585
llvm::BasicTTIImplBase::getArithmeticInstrCost
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, TTI::OperandValueKind Opd1Info=TTI::OK_AnyValue, TTI::OperandValueKind Opd2Info=TTI::OK_AnyValue, TTI::OperandValueProperties Opd1PropInfo=TTI::OP_None, TTI::OperandValueProperties Opd2PropInfo=TTI::OP_None, ArrayRef< const Value * > Args=ArrayRef< const Value * >(), const Instruction *CxtI=nullptr)
Definition: BasicTTIImpl.h:750
ErrorHandling.h
llvm::CmpInst::makeCmpResultType
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:1031
llvm::TargetTransformInfo
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Definition: TargetTransformInfo.h:168
llvm::Type::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:733
llvm::Loop::getStartLoc
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition: LoopInfo.cpp:633
llvm::IntrinsicCostAttributes::getReturnType
Type * getReturnType() const
Definition: TargetTransformInfo.h:150
llvm::BasicTTIImplBase::BasicTTIImplBase
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
Definition: BasicTTIImpl.h:243
llvm::BasicTTIImplBase::allowsMisalignedMemoryAccesses
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, bool *Fast) const
Definition: BasicTTIImpl.h:252
llvm::ScalarEvolution
The main scalar evolution driver.
Definition: ScalarEvolution.h:443
llvm::IntrinsicCostAttributes::getInst
const IntrinsicInst * getInst() const
Definition: TargetTransformInfo.h:149
llvm::ISD::FLOG2
@ FLOG2
Definition: ISDOpcodes.h:875
OptimizationRemarkEmitter.h
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:151
llvm::Triple
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:45
llvm::ISD::FMA
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:466
llvm::Type::isFPOrFPVectorTy
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:189
llvm::TargetTransformInfo::TCK_CodeSize
@ TCK_CodeSize
Instruction code size.
Definition: TargetTransformInfo.h:214
llvm::TargetLoweringBase::isLegalICmpImmediate
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
Definition: TargetLowering.h:2390
APInt.h
llvm::BasicTTIImplBase::isFCmpOrdCheaperThanFCmpZero
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty)
Definition: BasicTTIImpl.h:468
llvm::BasicTTIImplBase::~BasicTTIImplBase
virtual ~BasicTTIImplBase()=default
llvm::TargetTransformInfo::UnrollingPreferences::Partial
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...
Definition: TargetTransformInfo.h:477
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
llvm::TargetLoweringBase::getTruncStoreAction
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
Definition: TargetLowering.h:1251
llvm::APInt::getBitWidth
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1581
llvm::TargetLoweringBase::isIndexedLoadLegal
bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
Definition: TargetLowering.h:1290
llvm::BasicTTIImplBase::isLSRCostLess
bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2)
Definition: BasicTTIImpl.h:322
llvm::TargetTransformInfo::PeelingPreferences
Definition: TargetTransformInfo.h:531
llvm::TargetLoweringBase::isProfitableToHoist
virtual bool isProfitableToHoist(Instruction *I) const
Definition: TargetLowering.h:2518
llvm::BasicTTIImplBase::improveShuffleKindFromMask
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask) const
Definition: BasicTTIImpl.h:833
llvm::TargetLoweringBase::isSuitableForJumpTable
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
Definition: TargetLoweringBase.cpp:1619
llvm::BasicTTIImplBase::getInstructionLatency
InstructionCost getInstructionLatency(const Instruction *I)
Definition: BasicTTIImpl.h:609
llvm::Optional
Definition: APInt.h:33
llvm::BasicTTIImplBase::isIndexedStoreLegal
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const
Definition: BasicTTIImpl.h:316
T
#define T
Definition: Mips16ISelLowering.cpp:341
llvm::BasicTTIImplBase::getMinPrefetchStride
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
Definition: BasicTTIImpl.h:641
llvm::SmallPtrSet< const BasicBlock *, 4 >
llvm::TargetTransformInfo::OP_PowerOf2
@ OP_PowerOf2
Definition: TargetTransformInfo.h:879
llvm::ore::NV
DiagnosticInfoOptimizationBase::Argument NV
Definition: OptimizationRemarkEmitter.h:136
llvm::TargetLoweringBase::getTypeLegalizationCost
std::pair< InstructionCost, MVT > getTypeLegalizationCost(const DataLayout &DL, Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
Definition: TargetLoweringBase.cpp:1838
Operator.h
llvm::VectorType::getElementType
Type * getElementType() const
Definition: DerivedTypes.h:421
llvm::ISD::EXTLOAD
@ EXTLOAD
Definition: ISDOpcodes.h:1335
llvm::StructType::create
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition: Type.cpp:479
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:491
llvm::ShuffleVectorInst::isReverseMask
static bool isReverseMask(ArrayRef< int > Mask)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
Definition: Instructions.cpp:2162
llvm::BasicTTIImplBase::getRegisterBitWidth
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
Definition: BasicTTIImpl.h:662
llvm::TargetLoweringBase::LegalizeAction
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
Definition: TargetLowering.h:195
llvm::FastMathFlags
Convenience struct for specifying and reasoning about fast-math flags.
Definition: Operator.h:161
llvm::TargetLoweringBase::isCheapToSpeculateCtlz
virtual bool isCheapToSpeculateCtlz() const
Return true if it is cheap to speculate a call to intrinsic ctlz.
Definition: TargetLowering.h:604
llvm::FixedVectorType
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:524
llvm::BitmaskEnumDetail::Mask
std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::TargetTransformInfo::SK_PermuteSingleSrc
@ SK_PermuteSingleSrc
Shuffle elements of single source vector with any shuffle mask.
Definition: TargetTransformInfo.h:863
llvm::Type::getInt8Ty
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:201
llvm::LinearPolySize::isScalable
bool isScalable() const
Returns whether the size is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:299
llvm::BasicTTIImplBase::isNumRegsMajorCostOfLSR
bool isNumRegsMajorCostOfLSR()
Definition: BasicTTIImpl.h:326
llvm::APIntOps::umin
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition: APInt.h:2178
llvm::TargetTransformInfo::CacheLevel
CacheLevel
The possible cache levels.
Definition: TargetTransformInfo.h:941
llvm::ISD::FABS
@ FABS
Definition: ISDOpcodes.h:867
llvm::TargetTransformInfoImplCRTPBase::getGEPCost
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency)
Definition: TargetTransformInfoImpl.h:861
llvm::BasicTTIImplBase::isSourceOfDivergence
bool isSourceOfDivergence(const Value *V)
Definition: BasicTTIImpl.h:264
llvm::IntrinsicCostAttributes::getScalarizationCost
InstructionCost getScalarizationCost() const
Definition: TargetTransformInfo.h:152
llvm::TargetTransformInfo::SK_Broadcast
@ SK_Broadcast
Broadcast element 0 to all other elements.
Definition: TargetTransformInfo.h:853
F
#define F(x, y, z)
Definition: MD5.cpp:56
llvm::LinearPolySize< TypeSize >::isKnownLT
static bool isKnownLT(const LinearPolySize &LHS, const LinearPolySize &RHS)
Definition: TypeSize.h:329
TargetTransformInfoImpl.h
llvm::BasicTTIImplBase::getTreeReductionCost
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind)
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
Definition: BasicTTIImpl.h:2026
llvm::Triple::isArch64Bit
bool isArch64Bit() const
Test whether the architecture is 64-bit.
Definition: Triple.cpp:1336
llvm::IntrinsicCostAttributes::skipScalarizationCost
bool skipScalarizationCost() const
Definition: TargetTransformInfo.h:160
llvm::BasicTTIImplBase::rewriteIntrinsicWithAddressSpace
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
Definition: BasicTTIImpl.h:286
llvm::TargetTransformInfo::requiresOrderedReduction
static bool requiresOrderedReduction(Optional< FastMathFlags > FMF)
A helper function to determine the type of reduction algorithm used for a given Opcode and set of Fas...
Definition: TargetTransformInfo.h:1153
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
llvm::IntrinsicCostAttributes::getFlags
FastMathFlags getFlags() const
Definition: TargetTransformInfo.h:151
floor
We have fiadd patterns now but the followings have the same cost and complexity We need a way to specify the later is more profitable def def The FP stackifier should handle simple permutates to reduce number of shuffle e g floor
Definition: README-FPStack.txt:54
MachineValueType.h
llvm::ISD::BRIND
@ BRIND
BRIND - Indirect branch.
Definition: ISDOpcodes.h:942
llvm::AArch64CC::LT
@ LT
Definition: AArch64BaseInfo.h:266
Context
LLVMContext & Context
Definition: NVVMIntrRange.cpp:66
llvm::ElementCount::isScalar
bool isScalar() const
Counting predicates.
Definition: TypeSize.h:396
llvm::TargetTransformInfoImplBase::getDataLayout
const DataLayout & getDataLayout() const
Definition: TargetTransformInfoImpl.h:48
llvm::BasicTTIImplBase::getPrefetchDistance
virtual unsigned getPrefetchDistance() const
Definition: BasicTTIImpl.h:637
llvm::DataLayout::getIndexSizeInBits
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
Definition: DataLayout.h:414
llvm::ISD::FFLOOR
@ FFLOOR
Definition: ISDOpcodes.h:885
llvm::TargetLoweringBase::isLegalAddImmediate
virtual bool isLegalAddImmediate(int64_t) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
Definition: TargetLowering.h:2397
Instruction.h
llvm::TargetTransformInfoImplBase::isNumRegsMajorCostOfLSR
bool isNumRegsMajorCostOfLSR() const
Definition: TargetTransformInfoImpl.h:215
CommandLine.h
llvm::FixedVectorType::getNumElements
unsigned getNumElements() const
Definition: DerivedTypes.h:567
TargetLowering.h
llvm::IntrinsicCostAttributes::getArgTypes
const SmallVectorImpl< Type * > & getArgTypes() const
Definition: TargetTransformInfo.h:154
llvm::MCSubtargetInfo::getMaxPrefetchIterationsAhead
virtual unsigned getMaxPrefetchIterationsAhead() const
Return the maximum prefetch distance in terms of loop iterations.
Definition: MCSubtargetInfo.cpp:355
llvm::Intrinsic::getType
FunctionType * getType(LLVMContext &Context, ID id, ArrayRef< Type * > Tys=None)
Return the function type for an intrinsic.
Definition: Function.cpp:1292
llvm::TargetTransformInfo::SK_PermuteTwoSrc
@ SK_PermuteTwoSrc
Merge elements from two source vectors into one with any shuffle mask.
Definition: TargetTransformInfo.h:861
llvm::TargetTransformInfo::PeelingPreferences::PeelProfiledIterations
bool PeelProfiledIterations
Allow peeling basing on profile.
Definition: TargetTransformInfo.h:544
llvm::BlockFrequencyInfo
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Definition: BlockFrequencyInfo.h:37
llvm::TargetLoweringBase::isOperationLegalOrCustom
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
Definition: TargetLowering.h:1113
llvm::MCSubtargetInfo::getPrefetchDistance
virtual unsigned getPrefetchDistance() const
Return the preferred prefetch distance in terms of instructions.
Definition: MCSubtargetInfo.cpp:351
llvm::BasicTTIImplBase::getUnrollingPreferences
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
Definition: BasicTTIImpl.h:487
TargetMachine.h
llvm::SPII::Load
@ Load
Definition: SparcInstrInfo.h:32
llvm::ISD::CTLZ
@ CTLZ
Definition: ISDOpcodes.h:668
llvm::TargetTransformInfoImplBase
Base class for use as a mix-in that aids implementing a TargetTransformInfo-compatible class.
Definition: TargetTransformInfoImpl.h:34
llvm::BasicTTIImplBase::adjustInliningThreshold
unsigned adjustInliningThreshold(const CallBase *CB)
Definition: BasicTTIImpl.h:483
llvm::ISD::SELECT
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:679
llvm::TargetTransformInfoImplBase::getCacheAssociativity
llvm::Optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const
Definition: TargetTransformInfoImpl.h:427
Constants.h
llvm::BasicTTIImplBase::getVectorInstrCost
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index)
Definition: BasicTTIImpl.h:1108
llvm::TargetTransformInfo::OP_None
@ OP_None
Definition: TargetTransformInfo.h:879
llvm::TargetTransformInfo::ShuffleKind
ShuffleKind
The various kinds of shuffle patterns for vector queries.
Definition: TargetTransformInfo.h:852
llvm::BasicTTIImplBase::useGPUDivergenceAnalysis
bool useGPUDivergenceAnalysis()
Definition: BasicTTIImpl.h:262
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::Triple::isOSDarwin
bool isOSDarwin() const
isOSDarwin - Is this a "Darwin" OS (macOS, iOS, tvOS or watchOS).
Definition: Triple.h:484
llvm::BasicTTIImplBase::enableWritePrefetching
virtual bool enableWritePrefetching() const
Definition: BasicTTIImpl.h:653
llvm::TargetTransformInfo::CastContextHint
CastContextHint
Represents a hint about the context in which a cast is used.
Definition: TargetTransformInfo.h:1055
llvm::BasicTTIImplBase::getAddressComputationCost
InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *, const SCEV *)
Definition: BasicTTIImpl.h:2000
llvm::EVT
Extended Value Type.
Definition: ValueTypes.h:35
Intrinsics.h
llvm::TargetLoweringBase::AddrMode::HasBaseReg
bool HasBaseReg
Definition: TargetLowering.h:2352
llvm::BitVector::count
size_type count() const
count - Returns the number of bits which are set.
Definition: BitVector.h:154
round
static uint64_t round(uint64_t Acc, uint64_t Input)
Definition: xxhash.cpp:57
llvm::BasicTTIImplBase::getFlatAddressSpace
unsigned getFlatAddressSpace()
Definition: BasicTTIImpl.h:268
InstrTypes.h
Y
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
llvm::BasicTTIImplBase::getArithmeticReductionCost
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, Optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
Definition: BasicTTIImpl.h:2112
llvm::ISD::FROUND
@ FROUND
Definition: ISDOpcodes.h:883
llvm::LoopBase::blocks
iterator_range< block_iterator > blocks() const
Definition: LoopInfo.h:178
llvm::BasicTTIImplBase::getExtractWithExtendCost
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index)
Definition: BasicTTIImpl.h:1046
llvm::Type::isVectorTy
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:237
llvm::BasicTTIImplBase::getCacheSize
virtual Optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const
Definition: BasicTTIImpl.h:617
llvm::ISD::UDIVREM
@ UDIVREM
Definition: ISDOpcodes.h:256
llvm::MaybeAlign
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:109
llvm::Log2_32
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:596
llvm::TargetLoweringBase::isCheapToSpeculateCttz
virtual bool isCheapToSpeculateCttz() const
Return true if it is cheap to speculate a call to intrinsic cttz.
Definition: TargetLowering.h:599
llvm::BasicTTIImplBase::getMaxInterleaveFactor
unsigned getMaxInterleaveFactor(unsigned VF)
Definition: BasicTTIImpl.h:748
llvm::Instruction
Definition: Instruction.h:45
llvm::TargetTransformInfo::MIM_PreDec
@ MIM_PreDec
Pre-decrementing.
Definition: TargetTransformInfo.h:1282
llvm::Type::getScalarSizeInBits
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition: Type.cpp:153
llvm::TargetLoweringBase::isLegalAddressingMode
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
Definition: TargetLoweringBase.cpp:1921
llvm::TargetMachine::getAssumedAddrSpace
virtual unsigned getAssumedAddrSpace(const Value *V) const
If the specified generic pointer could be assumed as a pointer to a specific address space,...
Definition: TargetMachine.h:312
llvm::BasicTTIImplBase::getInlinerVectorBonusPercent
int getInlinerVectorBonusPercent()
Definition: BasicTTIImpl.h:485
llvm::ISD::FNEARBYINT
@ FNEARBYINT
Definition: ISDOpcodes.h:882
llvm::ISD::FRINT
@ FRINT
Definition: ISDOpcodes.h:881
llvm::TargetTransformInfoImplBase::getCFInstrCost
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const
Definition: TargetTransformInfoImpl.h:522
llvm::BasicTTIImplBase::getCacheLineSize
virtual unsigned getCacheLineSize() const
Definition: BasicTTIImpl.h:633
BitVector.h
llvm::TargetTransformInfoImplCRTPBase
CRTP base class for use as a mix-in that aids implementing a TargetTransformInfo-compatible class.
Definition: TargetTransformInfoImpl.h:850
SmallPtrSet.h
llvm::BitVector
Definition: BitVector.h:74
llvm::ISD::UNINDEXED
@ UNINDEXED
Definition: ISDOpcodes.h:1304
llvm::FixedVectorType::get
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:650
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::Triple::getArch
ArchType getArch() const
getArch - Get the parsed architecture type of this triple.
Definition: Triple.h:307
llvm::AddressSpace
AddressSpace
Definition: NVPTXBaseInfo.h:21
llvm::BasicTTIImplBase::getCmpSelInstrCost
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
Definition: BasicTTIImpl.h:1060
llvm::PartialUnrollingThreshold
cl::opt< unsigned > PartialUnrollingThreshold
llvm::BasicTTIImplBase::getEstimatedNumberOfCaseClusters
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
Definition: BasicTTIImpl.h:371
llvm::BasicTTIImplBase::getCastInstrCost
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
Definition: BasicTTIImpl.h:890
llvm::None
const NoneType None
Definition: None.h:23
llvm::LinearPolySize< TypeSize >::getFixed
static TypeSize getFixed(ScalarTy MinVal)
Definition: TypeSize.h:284
llvm::lltok::Kind
Kind
Definition: LLToken.h:18
Type.h
llvm::IntrinsicCostAttributes
Definition: TargetTransformInfo.h:118
X
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
llvm::BasicTTIImplBase::shouldBuildLookupTables
bool shouldBuildLookupTables()
Definition: BasicTTIImpl.h:430
llvm::maxnum
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE maxNum semantics.
Definition: APFloat.h:1309
LoopInfo.h
llvm::TargetTransformInfoImplBase::emitGetActiveLaneMask
bool emitGetActiveLaneMask() const
Definition: TargetTransformInfoImpl.h:165
llvm::TargetTransformInfoImplBase::isProfitableLSRChainElement
bool isProfitableLSRChainElement(Instruction *I) const
Definition: TargetTransformInfoImpl.h:217
llvm::ProfileSummaryInfo
Analysis providing profile information.
Definition: ProfileSummaryInfo.h:39
Operands
mir Rename Register Operands
Definition: MIRNamerPass.cpp:78
llvm::ISD::POST_INC
@ POST_INC
Definition: ISDOpcodes.h:1304
getCalledFunction
static const Function * getCalledFunction(const Value *V, bool LookThroughBitCast, bool &IsNoBuiltin)
Definition: MemoryBuiltins.cpp:118
llvm::TargetTransformInfoImplBase::simplifyDemandedVectorEltsIntrinsic
Optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
Definition: TargetTransformInfoImpl.h:181
llvm::ISD::FPOW
@ FPOW
Definition: ISDOpcodes.h:873
llvm::Type::isIntegerTy
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:201
llvm::ISD::FADD
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:377
llvm::TargetTransformInfo::SK_Reverse
@ SK_Reverse
Reverse the order of the vector.
Definition: TargetTransformInfo.h:854
llvm::VectorType
Base class of all SIMD vector types.
Definition: DerivedTypes.h:388
llvm::TargetTransformInfo::CastContextHint::Normal
@ Normal
The cast is used with a normal load/store.
llvm::ISD::FMINIMUM
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
Definition: ISDOpcodes.h:911
BasicBlock.h
llvm::APInt::slt
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition: APInt.h:1224
llvm::SCEV
This class represents an analyzed expression in the program.
Definition: ScalarEvolution.h:78
llvm::BasicTTIImplBase::isLegalAddressingMode
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr)
Definition: BasicTTIImpl.h:299
llvm::GlobalValue
Definition: GlobalValue.h:44
llvm::divideCeil
uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition: MathExtras.h:742
llvm::TargetTransformInfo::SK_InsertSubvector
@ SK_InsertSubvector
InsertSubvector. Index indicates start offset.
Definition: TargetTransformInfo.h:859
llvm::TargetMachine::isNoopAddrSpaceCast
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
Definition: TargetMachine.h:302
llvm::BasicTTIImplBase::simplifyDemandedUseBitsIntrinsic
Optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
Definition: BasicTTIImpl.h:590
llvm::ISD::FLOG10
@ FLOG10
Definition: ISDOpcodes.h:876
llvm::BasicTTIImplBase::haveFastSqrt
bool haveFastSqrt(Type *Ty)
Definition: BasicTTIImpl.h:461
llvm::TargetTransformInfo::MIM_PostInc
@ MIM_PostInc
Post-incrementing.
Definition: TargetTransformInfo.h:1283
llvm::BasicTTIImplBase::simplifyDemandedVectorEltsIntrinsic
Optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Definition: BasicTTIImpl.h:599
Index
uint32_t Index
Definition: ELFObjHandler.cpp:84
uint64_t
llvm::Type::getWithNewBitWidth
Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
Definition: DerivedTypes.h:726
llvm::TargetTransformInfo::LSRCost
Definition: TargetTransformInfo.h:410
llvm::TargetLoweringBase::isLoadExtLegal
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
Definition: TargetLowering.h:1237
llvm::TargetLoweringBase::isTypeLegal
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
Definition: TargetLowering.h:894
llvm::TargetLoweringBase::getLoadExtAction
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
Definition: TargetLowering.h:1225
llvm::ARM_MB::ST
@ ST
Definition: ARMBaseInfo.h:73
llvm::TargetTransformInfo::OK_UniformConstantValue
@ OK_UniformConstantValue
Definition: TargetTransformInfo.h:874
llvm::TargetLoweringBase::isSuitableForBitTests
bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
Definition: TargetLowering.h:1187
llvm::BasicTTIImplBase::isIndexedLoadLegal
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const
Definition: BasicTTIImpl.h:310
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
llvm::TargetLoweringBase::isOperationLegalOrPromote
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
Definition: TargetLowering.h:1127
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::TargetTransformInfoImplBase::simplifyDemandedUseBitsIntrinsic
Optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
Definition: TargetTransformInfoImpl.h:175
llvm::TargetTransformInfo::UnrollingPreferences
Parameters that control the generic loop unrolling transformation.
Definition: TargetTransformInfo.h:424
llvm::EVT::getEVT
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:558
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::TargetTransformInfoImplBase::getCastInstrCost
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I) const
Definition: TargetTransformInfoImpl.h:478
llvm::TargetTransformInfo::OperandValueProperties
OperandValueProperties
Additional properties of an operand's values.
Definition: TargetTransformInfo.h:879
llvm::LoopAccessInfo
Drive the analysis of memory accesses in the loop.
Definition: LoopAccessAnalysis.h:525
llvm::ISD::FCOPYSIGN
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:476
llvm::BasicTTIImplBase
Base class which can be used to help build a TTI implementation.
Definition: BasicTTIImpl.h:77
ArrayRef.h
llvm::TargetTransformInfo::PeelingPreferences::AllowLoopNestsPeeling
bool AllowLoopNestsPeeling
Allow peeling off loop iterations for loop nests.
Definition: TargetTransformInfo.h:539
llvm::ISD::ZEXTLOAD
@ ZEXTLOAD
Definition: ISDOpcodes.h:1335
maximum
Should compile r2 movcc movcs str strb mov lr r1 movcs movcc mov lr r1 str mov mov cmp r1 movlo r2 str bx lr r0 mov mov cmp r0 movhs r2 mov r1 bx lr Some of the NEON intrinsics may be appropriate for more general either as target independent intrinsics or perhaps elsewhere in the ARM backend Some of them may also be lowered to target independent and perhaps some new SDNodes could be added For maximum
Definition: README.txt:489
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::EVT::getIntegerVT
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:65
llvm::TargetLoweringBase::allowsMisalignedMemoryAccesses
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, bool *=nullptr) const
Determine if the target supports unaligned memory accesses.
Definition: TargetLowering.h:1640
llvm::TargetMachine
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
llvm::MVT::Other
@ Other
Definition: MachineValueType.h:42
memcpy
<%struct.s * > cast struct s *S to sbyte *< sbyte * > sbyte uint cast struct s *agg result to sbyte *< sbyte * > sbyte uint cast struct s *memtmp to sbyte *< sbyte * > sbyte uint ret void llc ends up issuing two memcpy or custom lower memcpy(of small size) to be ldmia/stmia. I think option 2 is better but the current register allocator cannot allocate a chunk of registers at a time. A feasible temporary solution is to use specific physical registers at the lowering time for small(<
SI
StandardInstrumentations SI(Debug, VerifyEach)
llvm::OptimizationRemarkEmitter::emit
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Definition: OptimizationRemarkEmitter.cpp:77
llvm::BasicTTIImplBase::getMaxVScale
Optional< unsigned > getMaxVScale() const
Definition: BasicTTIImpl.h:666
llvm::TargetTransformInfoImplBase::isLoweredToCall
bool isLoweredToCall(const Function *F) const
Definition: TargetTransformInfoImpl.h:116
llvm::CmpInst::BAD_ICMP_PREDICATE
@ BAD_ICMP_PREDICATE
Definition: InstrTypes.h:753
function
print Print MemDeps of function
Definition: MemDepPrinter.cpp:83
llvm::Type::isVoidTy
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:138
llvm::BasicTTIImplBase::getInterleavedMemoryOpCost
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
Definition: BasicTTIImpl.h:1176
llvm::MVT
Machine Value Type.
Definition: MachineValueType.h:31
llvm::TargetTransformInfo::SK_Splice
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
Definition: TargetTransformInfo.h:865
llvm::TargetTransformInfo::getCastInstrCost
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
Definition: TargetTransformInfo.cpp:775
llvm::BasicTTIImpl::BasicTTIImpl
BasicTTIImpl(const TargetMachine *TM, const Function &F)
Definition: BasicTargetTransformInfo.cpp:32
llvm::ISD::POST_DEC
@ POST_DEC
Definition: ISDOpcodes.h:1304
llvm::TargetTransformInfo::OperandValueKind
OperandValueKind
Additional information about an operand's possible values.
Definition: TargetTransformInfo.h:871
llvm::ElementCount::isVector
bool isVector() const
One or more elements.
Definition: TypeSize.h:398
llvm::TargetSubtargetInfo::useAA
virtual bool useAA() const
Enable use of alias analysis during code generation (during MI scheduling, DAGCombine,...
Definition: TargetSubtargetInfo.cpp:60
llvm::MCSubtargetInfo::getMinPrefetchStride
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
Return the minimum stride necessary to trigger software prefetching.
Definition: MCSubtargetInfo.cpp:363
llvm::APInt
Class for arbitrary precision integers.
Definition: APInt.h:70
llvm::BasicTTIImplBase::getMaxPrefetchIterationsAhead
virtual unsigned getMaxPrefetchIterationsAhead() const
Definition: BasicTTIImpl.h:649
llvm::APIntOps::smin
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Definition: APInt.h:2168
llvm::TargetTransformInfo::PeelingPreferences::PeelCount
unsigned PeelCount
A forced peeling factor (the number of bodied of the original loop that should be peeled off before t...
Definition: TargetTransformInfo.h:535
llvm::ISD::FMAXIMUM
@ FMAXIMUM
Definition: ISDOpcodes.h:912
llvm::BasicTTIImplBase::emitGetActiveLaneMask
bool emitGetActiveLaneMask()
Definition: BasicTTIImpl.h:581
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::LoopInfo
Definition: LoopInfo.h:1083
llvm::ISD::PRE_INC
@ PRE_INC
Definition: ISDOpcodes.h:1304
llvm::OptimizationRemarkEmitter
The optimization diagnostic interface.
Definition: OptimizationRemarkEmitter.h:33
llvm::APInt::getAllOnesValue
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
Definition: APInt.h:567
llvm::any_of
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1554
DataLayout.h
llvm::AssumptionCache
A cache of @llvm.assume calls within a function.
Definition: AssumptionCache.h:41
llvm::BasicTTIImplBase::getScalarizationOverhead
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract)
Estimate the overhead of scalarizing an instruction.
Definition: BasicTTIImpl.h:671
llvm::TargetTransformInfo::TCK_SizeAndLatency
@ TCK_SizeAndLatency
The weighted sum of size and latency.
Definition: TargetTransformInfo.h:215
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
llvm::ISD::SREM
@ SREM
Definition: ISDOpcodes.h:244
llvm::BasicTTIImplBase::getRegUsageForType
InstructionCost getRegUsageForType(Type *Ty)
Definition: BasicTTIImpl.h:360
llvm::SPII::Store
@ Store
Definition: SparcInstrInfo.h:33
llvm::TargetTransformInfoImplCRTPBase::getInstructionLatency
InstructionCost getInstructionLatency(const Instruction *I)
Definition: TargetTransformInfoImpl.h:1148
llvm::TargetLoweringBase::AddrMode::BaseGV
GlobalValue * BaseGV
Definition: TargetLowering.h:2350
CostKind
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
TargetSubtargetInfo.h
trunc
We have fiadd patterns now but the followings have the same cost and complexity We need a way to specify the later is more profitable def def The FP stackifier should handle simple permutates to reduce number of shuffle e g trunc
Definition: README-FPStack.txt:63
llvm::ISD::FEXP
@ FEXP
Definition: ISDOpcodes.h:877
llvm::PICLevel::Level
Level
Definition: CodeGen.h:33
llvm::TargetLoweringBase::InstructionOpcodeToISD
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
Definition: TargetLoweringBase.cpp:1758
llvm::Type::isPtrOrPtrVectorTy
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:234
llvm::TargetTransformInfoImplBase::getCmpSelInstrCost
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I) const
Definition: TargetTransformInfoImpl.h:531
llvm::ISD::FEXP2
@ FEXP2
Definition: ISDOpcodes.h:878
llvm::BasicTTIImplBase::getPeelingPreferences
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
Definition: BasicTTIImpl.h:559
llvm::TargetTransformInfo::MIM_Unindexed
@ MIM_Unindexed
No indexing.
Definition: TargetTransformInfo.h:1280
llvm::VectorType::getHalfElementsVectorType
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
Definition: DerivedTypes.h:492
llvm::AMDGPUISD::BFI
@ BFI
Definition: AMDGPUISelLowering.h:421
llvm::TargetTransformInfo::OK_AnyValue
@ OK_AnyValue
Definition: TargetTransformInfo.h:872
llvm::BasicTTIImplBase::getScalarizationOverhead
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract)
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
Definition: BasicTTIImpl.h:696
LLVM_FALLTHROUGH
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:273
llvm::Type::getContext
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:127
llvm::TargetTransformInfo::SK_Transpose
@ SK_Transpose
Transpose two vectors.
Definition: TargetTransformInfo.h:858
llvm::TargetSubtargetInfo
TargetSubtargetInfo - Generic base class for all target subtargets.
Definition: TargetSubtargetInfo.h:59
llvm::ISD::SEXTLOAD
@ SEXTLOAD
Definition: ISDOpcodes.h:1335
llvm::BasicTTIImplBase::useAA
bool useAA() const
Definition: BasicTTIImpl.h:353
llvm::TargetTransformInfo::CastContextHint::None
@ None
The cast is not used with a load/store of any kind.
llvm::SmallPtrSetImplBase::size
size_type size() const
Definition: SmallPtrSet.h:92
llvm::TargetTransformInfo::MIM_PreInc
@ MIM_PreInc
Pre-incrementing.
Definition: TargetTransformInfo.h:1281
llvm::BasicTTIImplBase::getCallInstrCost
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency)
Compute a cost of the given call instruction.
Definition: BasicTTIImpl.h:1989
llvm::TargetLoweringBase::isFreeAddrSpaceCast
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
Definition: TargetLoweringBase.cpp:944
llvm::TargetTransformInfoImplBase::getArithmeticInstrCost
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo, TTI::OperandValueProperties Opd2PropInfo, ArrayRef< const Value * > Args, const Instruction *CxtI=nullptr) const
Definition: TargetTransformInfoImpl.h:449
llvm::ISD::FSQRT
@ FSQRT
Definition: ISDOpcodes.h:868
llvm::MCID::Select
@ Select
Definition: MCInstrDesc.h:162
llvm::TargetLoweringBase::getTargetMachine
const TargetMachine & getTargetMachine() const
Definition: TargetLowering.h:338
llvm::BasicTTIImplBase::isHardwareLoopProfitable
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo)
Definition: BasicTTIImpl.h:567
llvm::APIntOps::umax
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
Definition: APInt.h:2183
Constant.h
llvm::TargetLoweringBase::AddrMode::BaseOffs
int64_t BaseOffs
Definition: TargetLowering.h:2351
llvm::minnum
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE minNum semantics.
Definition: APFloat.h:1298
llvm::TargetLoweringBase::isFAbsFree
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
Definition: TargetLowering.h:2708
llvm::ISD::STRICT_FMA
@ STRICT_FMA
Definition: ISDOpcodes.h:392
llvm::ISD::FMAXNUM
@ FMAXNUM
Definition: ISDOpcodes.h:899
llvm::BasicTTIImpl
Concrete BasicTTIImpl that can be used if no further customization is needed.
Definition: BasicTTIImpl.h:2208
llvm::KnownBits
Definition: KnownBits.h:23
llvm::TargetLoweringBase::AddrMode::Scale
int64_t Scale
Definition: TargetLowering.h:2353
llvm::TargetLoweringBase::isIndexedStoreLegal
bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
Definition: TargetLowering.h:1304
llvm::TargetTransformInfo::UnrollingPreferences::UpperBound
bool UpperBound
Allow using trip count upper bound to unroll loops.
Definition: TargetTransformInfo.h:491
llvm::ISD::FCOS
@ FCOS
Definition: ISDOpcodes.h:871
llvm::ISD::FCEIL
@ FCEIL
Definition: ISDOpcodes.h:879
llvm::ShuffleVectorInst::isSelectMask
static bool isSelectMask(ArrayRef< int > Mask)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
Definition: Instructions.cpp:2186
llvm::Type::isIntOrIntVectorTy
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:207
llvm::ISD::FSIN
@ FSIN
Definition: ISDOpcodes.h:870
ISDOpcodes.h
llvm::TypeSize
Definition: TypeSize.h:417
llvm::MCSchedModel::DefaultLoadLatency
static const unsigned DefaultLoadLatency
Definition: MCSchedule.h:287
Casting.h
llvm::BasicTTIImplBase::getMaskedMemoryOpCost
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
Definition: BasicTTIImpl.h:1160
llvm::TargetLoweringBase::Custom
@ Custom
Definition: TargetLowering.h:200
llvm::TargetLoweringBase::isOperationExpand
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
Definition: TargetLowering.h:1212
llvm::BitWidth
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:147
llvm::LoopBase::getHeader
BlockT * getHeader() const
Definition: LoopInfo.h:104
llvm::BasicTTIImplBase::isProfitableToHoist
bool isProfitableToHoist(Instruction *I)
Definition: BasicTTIImpl.h:349
llvm::BasicTTIImplBase::isLegalAddImmediate
bool isLegalAddImmediate(int64_t imm)
Definition: BasicTTIImpl.h:291
llvm::BasicTTIImplBase::getScalarizationOverhead
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys)
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
Definition: BasicTTIImpl.h:734
llvm::TargetLibraryInfo
Provides information about what library functions are available for the current target.
Definition: TargetLibraryInfo.h:219
llvm::Function::isTargetIntrinsic
bool isTargetIntrinsic() const
isTargetIntrinsic - Returns true if this function is an intrinsic and the intrinsic is specific to a ...
Definition: Function.cpp:702
llvm::ISD::SDIV
@ SDIV
Definition: ISDOpcodes.h:242
llvm::log2
static double log2(double V)
Definition: AMDGPULibCalls.cpp:842
llvm::TargetLoweringBase::getScalingFactorCost
virtual InstructionCost getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS=0) const
Return the cost of the scaling factor used in the addressing mode represented by AM for this target,...
Definition: TargetLowering.h:2378
llvm::BasicTTIImplBase::collectFlatAddressOperands
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
Definition: BasicTTIImpl.h:273
llvm::MCID::Add
@ Add
Definition: MCInstrDesc.h:183
llvm::InstCombiner
The core instruction combiner logic.
Definition: InstCombiner.h:45
llvm::TargetTransformInfoImplBase::instCombineIntrinsic
Optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
Definition: TargetTransformInfoImpl.h:169
llvm::TargetLoweringBase::isOperationLegalOrCustomOrPromote
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
Definition: TargetLowering.h:1141
llvm::TargetTransformInfoImplBase::DL
const DataLayout & DL
Definition: TargetTransformInfoImpl.h:38
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:45
llvm::HardwareLoopInfo
Attributes of a target dependent hardware loop.
Definition: TargetTransformInfo.h:95
llvm::InstructionCost::getInvalid
static InstructionCost getInvalid(CostType Val=0)
Definition: InstructionCost.h:73
llvm::BasicTTIImplBase::getMinMaxReductionCost
InstructionCost getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy, bool IsUnsigned, TTI::TargetCostKind CostKind)
Try to calculate op costs for min/max reduction operations.
Definition: BasicTTIImpl.h:2122
llvm::CodeModel::Large
@ Large
Definition: CodeGen.h:28
llvm::TargetLoweringBase::isTruncateFree
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
Definition: TargetLowering.h:2501
llvm::OptimizationRemark
Diagnostic information for applied optimization remarks.
Definition: DiagnosticInfo.h:684
llvm::BasicTTIImplBase::getGatherScatterOpCost
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
Definition: BasicTTIImpl.h:1167
Instructions.h
llvm::IntrinsicCostAttributes::getID
Intrinsic::ID getID() const
Definition: TargetTransformInfo.h:148
llvm::TargetLoweringBase::areJTsAllowed
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
Definition: TargetLowering.h:1159
llvm::BasicTTIImplBase::getGEPCost
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands)
Definition: BasicTTIImpl.h:366
SmallVector.h
llvm::TargetTransformInfo::RegisterKind
RegisterKind
Definition: TargetTransformInfo.h:900
llvm::ISD::UREM
@ UREM
Definition: ISDOpcodes.h:245
llvm::TargetLoweringBase::Expand
@ Expand
Definition: TargetLowering.h:198
llvm::BasicTTIImplBase::getShuffleCost
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, int Index, VectorType *SubTp)
Definition: BasicTTIImpl.h:866
N
#define N
llvm::BasicTTIImplBase::getIntrinsicInstrCost
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on arguments.
Definition: BasicTTIImpl.h:1325
llvm::ISD::BITREVERSE
@ BITREVERSE
Definition: ISDOpcodes.h:670
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
llvm::ISD::CTTZ
@ CTTZ
Definition: ISDOpcodes.h:667
llvm::BasicTTIImplBase::getNumberOfParts
unsigned getNumberOfParts(Type *Tp)
Definition: BasicTTIImpl.h:1994
TargetTransformInfo.h
llvm::BasicTTIImplBase::getMemoryOpCost
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
Definition: BasicTTIImpl.h:1116
llvm::TargetLoweringBase::AddrMode
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
Definition: TargetLowering.h:2349
llvm::TargetLoweringBase::isZExtFree
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
Definition: TargetLowering.h:2587
llvm::TargetTransformInfo::MIM_PostDec
@ MIM_PostDec
Post-decrementing.
Definition: TargetTransformInfo.h:1284
llvm::SmallVectorImpl< int >
llvm::BasicTTIImplBase::getAssumedAddrSpace
unsigned getAssumedAddrSpace(const Value *V) const
Definition: BasicTTIImpl.h:282
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1161
llvm::APInt::sgt
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition: APInt.h:1294
DerivedTypes.h
TM
const char LLVMTargetMachineRef TM
Definition: PassBuilderBindings.cpp:47
llvm::IntegerType::get
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:275
llvm::TargetTransformInfo::UnrollingPreferences::OptSizeThreshold
unsigned OptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size (set to UINT_MAX to disable).
Definition: TargetTransformInfo.h:446
llvm::BasicTTIImplBase::preferPredicateOverEpilogue
bool preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *TLI, DominatorTree *DT, const LoopAccessInfo *LAI)
Definition: BasicTTIImpl.h:574
llvm::BasicTTIImplBase::getTypeBasedIntrinsicInstrCost
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on argument types.
Definition: BasicTTIImpl.h:1506
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::BasicTTIImplBase::getScalingFactorCost
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace)
Definition: BasicTTIImpl.h:334
llvm::MCSubtargetInfo::getCacheLineSize
virtual Optional< unsigned > getCacheLineSize(unsigned Level) const
Return the target cache line size in bytes at a given level.
Definition: MCSubtargetInfo.cpp:347
llvm::ISD::SDIVREM
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:255
llvm::TargetTransformInfo::getOperandInfo
static OperandValueKind getOperandInfo(const Value *V, OperandValueProperties &OpProps)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
Definition: TargetTransformInfo.cpp:660
llvm::TargetTransformInfo::TCC_Basic
@ TCC_Basic
The cost of a typical 'add' instruction.
Definition: TargetTransformInfo.h:263
llvm::SwitchInst
Multiway switch.
Definition: Instructions.h:3206
llvm::AMDGPU::HSAMD::Kernel::Key::Args
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Definition: AMDGPUMetadata.h:389
llvm::TargetLoweringBase::getValueType
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
Definition: TargetLowering.h:1408
llvm::IntrinsicCostAttributes::isTypeBasedOnly
bool isTypeBasedOnly() const
Definition: TargetTransformInfo.h:156
llvm::MCSubtargetInfo::getCacheAssociativity
virtual Optional< unsigned > getCacheAssociativity(unsigned Level) const
Return the cache associatvity for the given level of cache.
Definition: MCSubtargetInfo.cpp:343
llvm::TargetLoweringBase::TypeSplitVector
@ TypeSplitVector
Definition: TargetLowering.h:212
llvm::TargetTransformInfoImplBase::getIntrinsicInstrCost
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
Definition: TargetTransformInfoImpl.h:571
llvm::ISD::FTRUNC
@ FTRUNC
Definition: ISDOpcodes.h:880
llvm::TargetTransformInfo::OK_NonUniformConstantValue
@ OK_NonUniformConstantValue
Definition: TargetTransformInfo.h:875
llvm::BasicTTIImplBase::getOrderedReductionCost
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind)
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
Definition: BasicTTIImpl.h:2095
Value.h
llvm::abs
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Definition: APFloat.h:1284
llvm::ISD::FCANONICALIZE
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
Definition: ISDOpcodes.h:483
llvm::MachineMemOperand::MONone
@ MONone
Definition: MachineMemOperand.h:133
llvm::BasicTTIImplBase::hasBranchDivergence
bool hasBranchDivergence()
Definition: BasicTTIImpl.h:260
llvm::ISD::FLOG
@ FLOG
Definition: ISDOpcodes.h:874
llvm::BasicTTIImplBase::getExtendedAddReductionCost
InstructionCost getExtendedAddReductionCost(bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind)
Definition: BasicTTIImpl.h:2180
llvm::Type::getContainedType
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
Definition: Type.h:348
llvm::Value
LLVM Value Representation.
Definition: Value.h:75
llvm::TargetTransformInfo::TCK_RecipThroughput
@ TCK_RecipThroughput
Reciprocal throughput.
Definition: TargetTransformInfo.h:212
llvm::BasicTTIImplBase::isProfitableLSRChainElement
bool isProfitableLSRChainElement(Instruction *I)
Definition: BasicTTIImpl.h:330
llvm::IntrinsicCostAttributes::getArgs
const SmallVectorImpl< const Value * > & getArgs() const
Definition: TargetTransformInfo.h:153
llvm::VectorType::get
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:634
llvm::ShuffleVectorInst::isTransposeMask
static bool isTransposeMask(ArrayRef< int > Mask)
Return true if this shuffle mask is a transpose mask.
Definition: Instructions.cpp:2199
llvm::TargetTransformInfo::SK_ExtractSubvector
@ SK_ExtractSubvector
ExtractSubvector Index indicates start offset.
Definition: TargetTransformInfo.h:860
llvm::Triple::aarch64
@ aarch64
Definition: Triple.h:52
llvm::APIntOps::smax
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
Definition: APInt.h:2173
llvm::ISD::CTPOP
@ CTPOP
Definition: ISDOpcodes.h:669
llvm::codeview::PublicSymFlags::Function
@ Function
llvm::TargetLoweringBase::getTypeAction
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
Definition: TargetLowering.h:928
llvm::Type::getPrimitiveSizeInBits
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:128
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:364
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38
llvm::ISD::VSELECT
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition: ISDOpcodes.h:688