LLVM 19.0.0git
BasicTTIImpl.h
Go to the documentation of this file.
1//===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file provides a helper that implements much of the TTI interface in
11/// terms of the target-independent code generator and TargetLowering
12/// interfaces.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
18
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/ArrayRef.h"
21#include "llvm/ADT/BitVector.h"
33#include "llvm/IR/BasicBlock.h"
34#include "llvm/IR/Constant.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/InstrTypes.h"
39#include "llvm/IR/Instruction.h"
41#include "llvm/IR/Intrinsics.h"
42#include "llvm/IR/Operator.h"
43#include "llvm/IR/Type.h"
44#include "llvm/IR/Value.h"
52#include <algorithm>
53#include <cassert>
54#include <cstdint>
55#include <limits>
56#include <optional>
57#include <utility>
58
59namespace llvm {
60
61class Function;
62class GlobalValue;
63class LLVMContext;
64class ScalarEvolution;
65class SCEV;
66class TargetMachine;
67
68extern cl::opt<unsigned> PartialUnrollingThreshold;
69
70/// Base class which can be used to help build a TTI implementation.
71///
72/// This class provides as much implementation of the TTI interface as is
73/// possible using the target independent parts of the code generator.
74///
75/// In order to subclass it, your class must implement a getST() method to
76/// return the subtarget, and a getTLI() method to return the target lowering.
77/// We need these methods implemented in the derived class so that this class
78/// doesn't have to duplicate storage for them.
79template <typename T>
81private:
84
85 /// Helper function to access this as a T.
86 T *thisT() { return static_cast<T *>(this); }
87
88 /// Estimate a cost of Broadcast as an extract and sequence of insert
89 /// operations.
90 InstructionCost getBroadcastShuffleOverhead(FixedVectorType *VTy,
93 // Broadcast cost is equal to the cost of extracting the zero'th element
94 // plus the cost of inserting it into every element of the result vector.
95 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
96 CostKind, 0, nullptr, nullptr);
97
98 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
99 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
100 CostKind, i, nullptr, nullptr);
101 }
102 return Cost;
103 }
104
105 /// Estimate a cost of shuffle as a sequence of extract and insert
106 /// operations.
107 InstructionCost getPermuteShuffleOverhead(FixedVectorType *VTy,
110 // Shuffle cost is equal to the cost of extracting element from its argument
111 // plus the cost of inserting them onto the result vector.
112
113 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
114 // index 0 of first vector, index 1 of second vector,index 2 of first
115 // vector and finally index 3 of second vector and insert them at index
116 // <0,1,2,3> of result vector.
117 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
118 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
119 CostKind, i, nullptr, nullptr);
120 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
121 CostKind, i, nullptr, nullptr);
122 }
123 return Cost;
124 }
125
126 /// Estimate a cost of subvector extraction as a sequence of extract and
127 /// insert operations.
128 InstructionCost getExtractSubvectorOverhead(VectorType *VTy,
130 int Index,
131 FixedVectorType *SubVTy) {
132 assert(VTy && SubVTy &&
133 "Can only extract subvectors from vectors");
134 int NumSubElts = SubVTy->getNumElements();
135 assert((!isa<FixedVectorType>(VTy) ||
136 (Index + NumSubElts) <=
137 (int)cast<FixedVectorType>(VTy)->getNumElements()) &&
138 "SK_ExtractSubvector index out of range");
139
141 // Subvector extraction cost is equal to the cost of extracting element from
142 // the source type plus the cost of inserting them into the result vector
143 // type.
144 for (int i = 0; i != NumSubElts; ++i) {
145 Cost +=
146 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
147 CostKind, i + Index, nullptr, nullptr);
148 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
149 CostKind, i, nullptr, nullptr);
150 }
151 return Cost;
152 }
153
154 /// Estimate a cost of subvector insertion as a sequence of extract and
155 /// insert operations.
156 InstructionCost getInsertSubvectorOverhead(VectorType *VTy,
158 int Index,
159 FixedVectorType *SubVTy) {
160 assert(VTy && SubVTy &&
161 "Can only insert subvectors into vectors");
162 int NumSubElts = SubVTy->getNumElements();
163 assert((!isa<FixedVectorType>(VTy) ||
164 (Index + NumSubElts) <=
165 (int)cast<FixedVectorType>(VTy)->getNumElements()) &&
166 "SK_InsertSubvector index out of range");
167
169 // Subvector insertion cost is equal to the cost of extracting element from
170 // the source type plus the cost of inserting them into the result vector
171 // type.
172 for (int i = 0; i != NumSubElts; ++i) {
173 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
174 CostKind, i, nullptr, nullptr);
175 Cost +=
176 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, CostKind,
177 i + Index, nullptr, nullptr);
178 }
179 return Cost;
180 }
181
182 /// Local query method delegates up to T which *must* implement this!
183 const TargetSubtargetInfo *getST() const {
184 return static_cast<const T *>(this)->getST();
185 }
186
187 /// Local query method delegates up to T which *must* implement this!
188 const TargetLoweringBase *getTLI() const {
189 return static_cast<const T *>(this)->getTLI();
190 }
191
192 static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
193 switch (M) {
195 return ISD::UNINDEXED;
196 case TTI::MIM_PreInc:
197 return ISD::PRE_INC;
198 case TTI::MIM_PreDec:
199 return ISD::PRE_DEC;
200 case TTI::MIM_PostInc:
201 return ISD::POST_INC;
202 case TTI::MIM_PostDec:
203 return ISD::POST_DEC;
204 }
205 llvm_unreachable("Unexpected MemIndexedMode");
206 }
207
208 InstructionCost getCommonMaskedMemoryOpCost(unsigned Opcode, Type *DataTy,
209 Align Alignment,
210 bool VariableMask,
211 bool IsGatherScatter,
213 // We cannot scalarize scalable vectors, so return Invalid.
214 if (isa<ScalableVectorType>(DataTy))
216
217 auto *VT = cast<FixedVectorType>(DataTy);
218 // Assume the target does not have support for gather/scatter operations
219 // and provide a rough estimate.
220 //
221 // First, compute the cost of the individual memory operations.
222 InstructionCost AddrExtractCost =
223 IsGatherScatter
224 ? getVectorInstrCost(Instruction::ExtractElement,
226 PointerType::get(VT->getElementType(), 0),
227 VT->getNumElements()),
228 CostKind, -1, nullptr, nullptr)
229 : 0;
230 InstructionCost LoadCost =
231 VT->getNumElements() *
232 (AddrExtractCost +
233 getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind));
234
235 // Next, compute the cost of packing the result in a vector.
236 InstructionCost PackingCost =
237 getScalarizationOverhead(VT, Opcode != Instruction::Store,
238 Opcode == Instruction::Store, CostKind);
239
240 InstructionCost ConditionalCost = 0;
241 if (VariableMask) {
242 // Compute the cost of conditionally executing the memory operations with
243 // variable masks. This includes extracting the individual conditions, a
244 // branches and PHIs to combine the results.
245 // NOTE: Estimating the cost of conditionally executing the memory
246 // operations accurately is quite difficult and the current solution
247 // provides a very rough estimate only.
248 ConditionalCost =
249 VT->getNumElements() *
251 Instruction::ExtractElement,
253 VT->getNumElements()),
254 CostKind, -1, nullptr, nullptr) +
255 getCFInstrCost(Instruction::Br, CostKind) +
256 getCFInstrCost(Instruction::PHI, CostKind));
257 }
258
259 return LoadCost + PackingCost + ConditionalCost;
260 }
261
262protected:
264 : BaseT(DL) {}
265 virtual ~BasicTTIImplBase() = default;
266
268
269public:
270 /// \name Scalar TTI Implementations
271 /// @{
273 unsigned AddressSpace, Align Alignment,
274 unsigned *Fast) const {
276 return getTLI()->allowsMisalignedMemoryAccesses(
278 }
279
280 bool hasBranchDivergence(const Function *F = nullptr) { return false; }
281
282 bool isSourceOfDivergence(const Value *V) { return false; }
283
284 bool isAlwaysUniform(const Value *V) { return false; }
285
286 bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
287 return false;
288 }
289
290 bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const {
291 return true;
292 }
293
295 // Return an invalid address space.
296 return -1;
297 }
298
300 Intrinsic::ID IID) const {
301 return false;
302 }
303
304 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
305 return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
306 }
307
308 unsigned getAssumedAddrSpace(const Value *V) const {
309 return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
310 }
311
312 bool isSingleThreaded() const {
313 return getTLI()->getTargetMachine().Options.ThreadModel ==
315 }
316
317 std::pair<const Value *, unsigned>
319 return getTLI()->getTargetMachine().getPredicatedAddrSpace(V);
320 }
321
323 Value *NewV) const {
324 return nullptr;
325 }
326
327 bool isLegalAddImmediate(int64_t imm) {
328 return getTLI()->isLegalAddImmediate(imm);
329 }
330
331 bool isLegalICmpImmediate(int64_t imm) {
332 return getTLI()->isLegalICmpImmediate(imm);
333 }
334
335 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
336 bool HasBaseReg, int64_t Scale,
337 unsigned AddrSpace, Instruction *I = nullptr) {
339 AM.BaseGV = BaseGV;
340 AM.BaseOffs = BaseOffset;
341 AM.HasBaseReg = HasBaseReg;
342 AM.Scale = Scale;
343 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
344 }
345
346 int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) {
347 return getTLI()->getPreferredLargeGEPBaseOffset(MinOffset, MaxOffset);
348 }
349
350 unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
351 Type *ScalarValTy) const {
352 auto &&IsSupportedByTarget = [this, ScalarMemTy, ScalarValTy](unsigned VF) {
353 auto *SrcTy = FixedVectorType::get(ScalarMemTy, VF / 2);
354 EVT VT = getTLI()->getValueType(DL, SrcTy);
355 if (getTLI()->isOperationLegal(ISD::STORE, VT) ||
356 getTLI()->isOperationCustom(ISD::STORE, VT))
357 return true;
358
359 EVT ValVT =
360 getTLI()->getValueType(DL, FixedVectorType::get(ScalarValTy, VF / 2));
361 EVT LegalizedVT =
362 getTLI()->getTypeToTransformTo(ScalarMemTy->getContext(), VT);
363 return getTLI()->isTruncStoreLegal(LegalizedVT, ValVT);
364 };
365 while (VF > 2 && IsSupportedByTarget(VF))
366 VF /= 2;
367 return VF;
368 }
369
371 const DataLayout &DL) const {
372 EVT VT = getTLI()->getValueType(DL, Ty);
373 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
374 }
375
377 const DataLayout &DL) const {
378 EVT VT = getTLI()->getValueType(DL, Ty);
379 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
380 }
381
384 }
385
388 }
389
393 }
394
397 }
398
400 int64_t BaseOffset, bool HasBaseReg,
401 int64_t Scale, unsigned AddrSpace) {
403 AM.BaseGV = BaseGV;
404 AM.BaseOffs = BaseOffset;
405 AM.HasBaseReg = HasBaseReg;
406 AM.Scale = Scale;
407 if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace))
408 return 0;
409 return -1;
410 }
411
412 bool isTruncateFree(Type *Ty1, Type *Ty2) {
413 return getTLI()->isTruncateFree(Ty1, Ty2);
414 }
415
417 return getTLI()->isProfitableToHoist(I);
418 }
419
420 bool useAA() const { return getST()->useAA(); }
421
422 bool isTypeLegal(Type *Ty) {
423 EVT VT = getTLI()->getValueType(DL, Ty);
424 return getTLI()->isTypeLegal(VT);
425 }
426
427 unsigned getRegUsageForType(Type *Ty) {
428 EVT ETy = getTLI()->getValueType(DL, Ty);
429 return getTLI()->getNumRegisters(Ty->getContext(), ETy);
430 }
431
435 return BaseT::getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind);
436 }
437
439 unsigned &JumpTableSize,
441 BlockFrequencyInfo *BFI) {
442 /// Try to find the estimated number of clusters. Note that the number of
443 /// clusters identified in this function could be different from the actual
444 /// numbers found in lowering. This function ignore switches that are
445 /// lowered with a mix of jump table / bit test / BTree. This function was
446 /// initially intended to be used when estimating the cost of switch in
447 /// inline cost heuristic, but it's a generic cost model to be used in other
448 /// places (e.g., in loop unrolling).
449 unsigned N = SI.getNumCases();
450 const TargetLoweringBase *TLI = getTLI();
451 const DataLayout &DL = this->getDataLayout();
452
453 JumpTableSize = 0;
454 bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
455
456 // Early exit if both a jump table and bit test are not allowed.
457 if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
458 return N;
459
460 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
461 APInt MinCaseVal = MaxCaseVal;
462 for (auto CI : SI.cases()) {
463 const APInt &CaseVal = CI.getCaseValue()->getValue();
464 if (CaseVal.sgt(MaxCaseVal))
465 MaxCaseVal = CaseVal;
466 if (CaseVal.slt(MinCaseVal))
467 MinCaseVal = CaseVal;
468 }
469
470 // Check if suitable for a bit test
471 if (N <= DL.getIndexSizeInBits(0u)) {
473 for (auto I : SI.cases())
474 Dests.insert(I.getCaseSuccessor());
475
476 if (TLI->isSuitableForBitTests(Dests.size(), N, MinCaseVal, MaxCaseVal,
477 DL))
478 return 1;
479 }
480
481 // Check if suitable for a jump table.
482 if (IsJTAllowed) {
483 if (N < 2 || N < TLI->getMinimumJumpTableEntries())
484 return N;
485 uint64_t Range =
486 (MaxCaseVal - MinCaseVal)
487 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
488 // Check whether a range of clusters is dense enough for a jump table
489 if (TLI->isSuitableForJumpTable(&SI, N, Range, PSI, BFI)) {
490 JumpTableSize = Range;
491 return 1;
492 }
493 }
494 return N;
495 }
496
498 const TargetLoweringBase *TLI = getTLI();
499 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
500 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
501 }
502
504 const TargetMachine &TM = getTLI()->getTargetMachine();
505 // If non-PIC mode, do not generate a relative lookup table.
506 if (!TM.isPositionIndependent())
507 return false;
508
509 /// Relative lookup table entries consist of 32-bit offsets.
510 /// Do not generate relative lookup tables for large code models
511 /// in 64-bit achitectures where 32-bit offsets might not be enough.
512 if (TM.getCodeModel() == CodeModel::Medium ||
513 TM.getCodeModel() == CodeModel::Large)
514 return false;
515
516 Triple TargetTriple = TM.getTargetTriple();
517 if (!TargetTriple.isArch64Bit())
518 return false;
519
520 // TODO: Triggers issues on aarch64 on darwin, so temporarily disable it
521 // there.
522 if (TargetTriple.getArch() == Triple::aarch64 && TargetTriple.isOSDarwin())
523 return false;
524
525 return true;
526 }
527
528 bool haveFastSqrt(Type *Ty) {
529 const TargetLoweringBase *TLI = getTLI();
530 EVT VT = TLI->getValueType(DL, Ty);
531 return TLI->isTypeLegal(VT) &&
533 }
534
536 return true;
537 }
538
540 // Check whether FADD is available, as a proxy for floating-point in
541 // general.
542 const TargetLoweringBase *TLI = getTLI();
543 EVT VT = TLI->getValueType(DL, Ty);
547 }
548
550 const Function &Fn) const {
551 switch (Inst.getOpcode()) {
552 default:
553 break;
554 case Instruction::SDiv:
555 case Instruction::SRem:
556 case Instruction::UDiv:
557 case Instruction::URem: {
558 if (!isa<ConstantInt>(Inst.getOperand(1)))
559 return false;
560 EVT VT = getTLI()->getValueType(DL, Inst.getType());
561 return !getTLI()->isIntDivCheap(VT, Fn.getAttributes());
562 }
563 };
564
565 return false;
566 }
567
568 unsigned getInliningThresholdMultiplier() const { return 1; }
569 unsigned adjustInliningThreshold(const CallBase *CB) { return 0; }
570 unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const {
571 return 0;
572 }
573
574 int getInlinerVectorBonusPercent() const { return 150; }
575
579 // This unrolling functionality is target independent, but to provide some
580 // motivation for its intended use, for x86:
581
582 // According to the Intel 64 and IA-32 Architectures Optimization Reference
583 // Manual, Intel Core models and later have a loop stream detector (and
584 // associated uop queue) that can benefit from partial unrolling.
585 // The relevant requirements are:
586 // - The loop must have no more than 4 (8 for Nehalem and later) branches
587 // taken, and none of them may be calls.
588 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
589
590 // According to the Software Optimization Guide for AMD Family 15h
591 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
592 // and loop buffer which can benefit from partial unrolling.
593 // The relevant requirements are:
594 // - The loop must have fewer than 16 branches
595 // - The loop must have less than 40 uops in all executed loop branches
596
597 // The number of taken branches in a loop is hard to estimate here, and
598 // benchmarking has revealed that it is better not to be conservative when
599 // estimating the branch count. As a result, we'll ignore the branch limits
600 // until someone finds a case where it matters in practice.
601
602 unsigned MaxOps;
603 const TargetSubtargetInfo *ST = getST();
604 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
606 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
607 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
608 else
609 return;
610
611 // Scan the loop: don't unroll loops with calls.
612 for (BasicBlock *BB : L->blocks()) {
613 for (Instruction &I : *BB) {
614 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
615 if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
616 if (!thisT()->isLoweredToCall(F))
617 continue;
618 }
619
620 if (ORE) {
621 ORE->emit([&]() {
622 return OptimizationRemark("TTI", "DontUnroll", L->getStartLoc(),
623 L->getHeader())
624 << "advising against unrolling the loop because it "
625 "contains a "
626 << ore::NV("Call", &I);
627 });
628 }
629 return;
630 }
631 }
632 }
633
634 // Enable runtime and partial unrolling up to the specified size.
635 // Enable using trip count upper bound to unroll loops.
636 UP.Partial = UP.Runtime = UP.UpperBound = true;
637 UP.PartialThreshold = MaxOps;
638
639 // Avoid unrolling when optimizing for size.
640 UP.OptSizeThreshold = 0;
642
643 // Set number of instructions optimized when "back edge"
644 // becomes "fall through" to default value of 2.
645 UP.BEInsns = 2;
646 }
647
650 PP.PeelCount = 0;
651 PP.AllowPeeling = true;
652 PP.AllowLoopNestsPeeling = false;
653 PP.PeelProfiledIterations = true;
654 }
655
657 AssumptionCache &AC,
658 TargetLibraryInfo *LibInfo,
659 HardwareLoopInfo &HWLoopInfo) {
660 return BaseT::isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
661 }
662
665 }
666
668 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) {
669 return BaseT::getPreferredTailFoldingStyle(IVUpdateMayOverflow);
670 }
671
672 std::optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
673 IntrinsicInst &II) {
674 return BaseT::instCombineIntrinsic(IC, II);
675 }
676
677 std::optional<Value *>
679 APInt DemandedMask, KnownBits &Known,
680 bool &KnownBitsComputed) {
681 return BaseT::simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
682 KnownBitsComputed);
683 }
684
686 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
687 APInt &UndefElts2, APInt &UndefElts3,
688 std::function<void(Instruction *, unsigned, APInt, APInt &)>
689 SimplifyAndSetOp) {
691 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
692 SimplifyAndSetOp);
693 }
694
695 virtual std::optional<unsigned>
697 return std::optional<unsigned>(
698 getST()->getCacheSize(static_cast<unsigned>(Level)));
699 }
700
701 virtual std::optional<unsigned>
703 std::optional<unsigned> TargetResult =
704 getST()->getCacheAssociativity(static_cast<unsigned>(Level));
705
706 if (TargetResult)
707 return TargetResult;
708
709 return BaseT::getCacheAssociativity(Level);
710 }
711
712 virtual unsigned getCacheLineSize() const {
713 return getST()->getCacheLineSize();
714 }
715
716 virtual unsigned getPrefetchDistance() const {
717 return getST()->getPrefetchDistance();
718 }
719
720 virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses,
721 unsigned NumStridedMemAccesses,
722 unsigned NumPrefetches,
723 bool HasCall) const {
724 return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
725 NumPrefetches, HasCall);
726 }
727
728 virtual unsigned getMaxPrefetchIterationsAhead() const {
729 return getST()->getMaxPrefetchIterationsAhead();
730 }
731
732 virtual bool enableWritePrefetching() const {
733 return getST()->enableWritePrefetching();
734 }
735
736 virtual bool shouldPrefetchAddressSpace(unsigned AS) const {
737 return getST()->shouldPrefetchAddressSpace(AS);
738 }
739
740 /// @}
741
742 /// \name Vector TTI Implementations
743 /// @{
744
746 return TypeSize::getFixed(32);
747 }
748
749 std::optional<unsigned> getMaxVScale() const { return std::nullopt; }
750 std::optional<unsigned> getVScaleForTuning() const { return std::nullopt; }
751 bool isVScaleKnownToBeAPowerOfTwo() const { return false; }
752
753 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
754 /// are set if the demanded result elements need to be inserted and/or
755 /// extracted from vectors.
757 const APInt &DemandedElts,
758 bool Insert, bool Extract,
760 /// FIXME: a bitfield is not a reasonable abstraction for talking about
761 /// which elements are needed from a scalable vector
762 if (isa<ScalableVectorType>(InTy))
764 auto *Ty = cast<FixedVectorType>(InTy);
765
766 assert(DemandedElts.getBitWidth() == Ty->getNumElements() &&
767 "Vector size mismatch");
768
770
771 for (int i = 0, e = Ty->getNumElements(); i < e; ++i) {
772 if (!DemandedElts[i])
773 continue;
774 if (Insert)
775 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
776 CostKind, i, nullptr, nullptr);
777 if (Extract)
778 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
779 CostKind, i, nullptr, nullptr);
780 }
781
782 return Cost;
783 }
784
785 /// Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
787 bool Extract,
789 if (isa<ScalableVectorType>(InTy))
791 auto *Ty = cast<FixedVectorType>(InTy);
792
793 APInt DemandedElts = APInt::getAllOnes(Ty->getNumElements());
794 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
795 CostKind);
796 }
797
798 /// Estimate the overhead of scalarizing an instructions unique
799 /// non-constant operands. The (potentially vector) types to use for each of
800 /// argument are passes via Tys.
805 assert(Args.size() == Tys.size() && "Expected matching Args and Tys");
806
808 SmallPtrSet<const Value*, 4> UniqueOperands;
809 for (int I = 0, E = Args.size(); I != E; I++) {
810 // Disregard things like metadata arguments.
811 const Value *A = Args[I];
812 Type *Ty = Tys[I];
813 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
814 !Ty->isPtrOrPtrVectorTy())
815 continue;
816
817 if (!isa<Constant>(A) && UniqueOperands.insert(A).second) {
818 if (auto *VecTy = dyn_cast<VectorType>(Ty))
819 Cost += getScalarizationOverhead(VecTy, /*Insert*/ false,
820 /*Extract*/ true, CostKind);
821 }
822 }
823
824 return Cost;
825 }
826
827 /// Estimate the overhead of scalarizing the inputs and outputs of an
828 /// instruction, with return type RetTy and arguments Args of type Tys. If
829 /// Args are unknown (empty), then the cost associated with one argument is
830 /// added as a heuristic.
836 RetTy, /*Insert*/ true, /*Extract*/ false, CostKind);
837 if (!Args.empty())
839 else
840 // When no information on arguments is provided, we add the cost
841 // associated with one argument as a heuristic.
842 Cost += getScalarizationOverhead(RetTy, /*Insert*/ false,
843 /*Extract*/ true, CostKind);
844
845 return Cost;
846 }
847
848 /// Estimate the cost of type-legalization and the legalized type.
849 std::pair<InstructionCost, MVT> getTypeLegalizationCost(Type *Ty) const {
850 LLVMContext &C = Ty->getContext();
851 EVT MTy = getTLI()->getValueType(DL, Ty);
852
854 // We keep legalizing the type until we find a legal kind. We assume that
855 // the only operation that costs anything is the split. After splitting
856 // we need to handle two types.
857 while (true) {
859
861 // Ensure we return a sensible simple VT here, since many callers of
862 // this function require it.
863 MVT VT = MTy.isSimple() ? MTy.getSimpleVT() : MVT::i64;
864 return std::make_pair(InstructionCost::getInvalid(), VT);
865 }
866
867 if (LK.first == TargetLoweringBase::TypeLegal)
868 return std::make_pair(Cost, MTy.getSimpleVT());
869
870 if (LK.first == TargetLoweringBase::TypeSplitVector ||
872 Cost *= 2;
873
874 // Do not loop with f128 type.
875 if (MTy == LK.second)
876 return std::make_pair(Cost, MTy.getSimpleVT());
877
878 // Keep legalizing the type.
879 MTy = LK.second;
880 }
881 }
882
883 unsigned getMaxInterleaveFactor(ElementCount VF) { return 1; }
884
886 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
889 ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
890 const Instruction *CxtI = nullptr) {
891 // Check if any of the operands are vector operands.
892 const TargetLoweringBase *TLI = getTLI();
893 int ISD = TLI->InstructionOpcodeToISD(Opcode);
894 assert(ISD && "Invalid opcode");
895
896 // TODO: Handle more cost kinds.
898 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind,
899 Opd1Info, Opd2Info,
900 Args, CxtI);
901
902 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
903
904 bool IsFloat = Ty->isFPOrFPVectorTy();
905 // Assume that floating point arithmetic operations cost twice as much as
906 // integer operations.
907 InstructionCost OpCost = (IsFloat ? 2 : 1);
908
909 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
910 // The operation is legal. Assume it costs 1.
911 // TODO: Once we have extract/insert subvector cost we need to use them.
912 return LT.first * OpCost;
913 }
914
915 if (!TLI->isOperationExpand(ISD, LT.second)) {
916 // If the operation is custom lowered, then assume that the code is twice
917 // as expensive.
918 return LT.first * 2 * OpCost;
919 }
920
921 // An 'Expand' of URem and SRem is special because it may default
922 // to expanding the operation into a sequence of sub-operations
923 // i.e. X % Y -> X-(X/Y)*Y.
924 if (ISD == ISD::UREM || ISD == ISD::SREM) {
925 bool IsSigned = ISD == ISD::SREM;
926 if (TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIVREM : ISD::UDIVREM,
927 LT.second) ||
928 TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIV : ISD::UDIV,
929 LT.second)) {
930 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
931 InstructionCost DivCost = thisT()->getArithmeticInstrCost(
932 DivOpc, Ty, CostKind, Opd1Info, Opd2Info);
933 InstructionCost MulCost =
934 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty, CostKind);
935 InstructionCost SubCost =
936 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty, CostKind);
937 return DivCost + MulCost + SubCost;
938 }
939 }
940
941 // We cannot scalarize scalable vectors, so return Invalid.
942 if (isa<ScalableVectorType>(Ty))
944
945 // Else, assume that we need to scalarize this op.
946 // TODO: If one of the types get legalized by splitting, handle this
947 // similarly to what getCastInstrCost() does.
948 if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
949 InstructionCost Cost = thisT()->getArithmeticInstrCost(
950 Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info,
951 Args, CxtI);
952 // Return the cost of multiple scalar invocation plus the cost of
953 // inserting and extracting the values.
954 SmallVector<Type *> Tys(Args.size(), Ty);
955 return getScalarizationOverhead(VTy, Args, Tys, CostKind) +
956 VTy->getNumElements() * Cost;
957 }
958
959 // We don't know anything about this scalar instruction.
960 return OpCost;
961 }
962
964 ArrayRef<int> Mask,
965 VectorType *Ty, int &Index,
966 VectorType *&SubTy) const {
967 if (Mask.empty())
968 return Kind;
969 int NumSrcElts = Ty->getElementCount().getKnownMinValue();
970 switch (Kind) {
972 if (ShuffleVectorInst::isReverseMask(Mask, NumSrcElts))
973 return TTI::SK_Reverse;
974 if (ShuffleVectorInst::isZeroEltSplatMask(Mask, NumSrcElts))
975 return TTI::SK_Broadcast;
976 if (ShuffleVectorInst::isExtractSubvectorMask(Mask, NumSrcElts, Index) &&
977 (Index + Mask.size()) <= (size_t)NumSrcElts) {
978 SubTy = FixedVectorType::get(Ty->getElementType(), Mask.size());
980 }
981 break;
983 int NumSubElts;
984 if (Mask.size() > 2 && ShuffleVectorInst::isInsertSubvectorMask(
985 Mask, NumSrcElts, NumSubElts, Index)) {
986 if (Index + NumSubElts > NumSrcElts)
987 return Kind;
988 SubTy = FixedVectorType::get(Ty->getElementType(), NumSubElts);
990 }
991 if (ShuffleVectorInst::isSelectMask(Mask, NumSrcElts))
992 return TTI::SK_Select;
993 if (ShuffleVectorInst::isTransposeMask(Mask, NumSrcElts))
994 return TTI::SK_Transpose;
995 if (ShuffleVectorInst::isSpliceMask(Mask, NumSrcElts, Index))
996 return TTI::SK_Splice;
997 break;
998 }
999 case TTI::SK_Select:
1000 case TTI::SK_Reverse:
1001 case TTI::SK_Broadcast:
1002 case TTI::SK_Transpose:
1005 case TTI::SK_Splice:
1006 break;
1007 }
1008 return Kind;
1009 }
1010
1012 ArrayRef<int> Mask,
1014 VectorType *SubTp,
1015 ArrayRef<const Value *> Args = std::nullopt) {
1016 switch (improveShuffleKindFromMask(Kind, Mask, Tp, Index, SubTp)) {
1017 case TTI::SK_Broadcast:
1018 if (auto *FVT = dyn_cast<FixedVectorType>(Tp))
1019 return getBroadcastShuffleOverhead(FVT, CostKind);
1021 case TTI::SK_Select:
1022 case TTI::SK_Splice:
1023 case TTI::SK_Reverse:
1024 case TTI::SK_Transpose:
1027 if (auto *FVT = dyn_cast<FixedVectorType>(Tp))
1028 return getPermuteShuffleOverhead(FVT, CostKind);
1031 return getExtractSubvectorOverhead(Tp, CostKind, Index,
1032 cast<FixedVectorType>(SubTp));
1034 return getInsertSubvectorOverhead(Tp, CostKind, Index,
1035 cast<FixedVectorType>(SubTp));
1036 }
1037 llvm_unreachable("Unknown TTI::ShuffleKind");
1038 }
1039
1040 InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1043 const Instruction *I = nullptr) {
1044 if (BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I) == 0)
1045 return 0;
1046
1047 const TargetLoweringBase *TLI = getTLI();
1048 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1049 assert(ISD && "Invalid opcode");
1050 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Src);
1051 std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(Dst);
1052
1053 TypeSize SrcSize = SrcLT.second.getSizeInBits();
1054 TypeSize DstSize = DstLT.second.getSizeInBits();
1055 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
1056 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
1057
1058 switch (Opcode) {
1059 default:
1060 break;
1061 case Instruction::Trunc:
1062 // Check for NOOP conversions.
1063 if (TLI->isTruncateFree(SrcLT.second, DstLT.second))
1064 return 0;
1065 [[fallthrough]];
1066 case Instruction::BitCast:
1067 // Bitcast between types that are legalized to the same type are free and
1068 // assume int to/from ptr of the same size is also free.
1069 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
1070 SrcSize == DstSize)
1071 return 0;
1072 break;
1073 case Instruction::FPExt:
1074 if (I && getTLI()->isExtFree(I))
1075 return 0;
1076 break;
1077 case Instruction::ZExt:
1078 if (TLI->isZExtFree(SrcLT.second, DstLT.second))
1079 return 0;
1080 [[fallthrough]];
1081 case Instruction::SExt:
1082 if (I && getTLI()->isExtFree(I))
1083 return 0;
1084
1085 // If this is a zext/sext of a load, return 0 if the corresponding
1086 // extending load exists on target and the result type is legal.
1087 if (CCH == TTI::CastContextHint::Normal) {
1088 EVT ExtVT = EVT::getEVT(Dst);
1089 EVT LoadVT = EVT::getEVT(Src);
1090 unsigned LType =
1091 ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD);
1092 if (DstLT.first == SrcLT.first &&
1093 TLI->isLoadExtLegal(LType, ExtVT, LoadVT))
1094 return 0;
1095 }
1096 break;
1097 case Instruction::AddrSpaceCast:
1098 if (TLI->isFreeAddrSpaceCast(Src->getPointerAddressSpace(),
1099 Dst->getPointerAddressSpace()))
1100 return 0;
1101 break;
1102 }
1103
1104 auto *SrcVTy = dyn_cast<VectorType>(Src);
1105 auto *DstVTy = dyn_cast<VectorType>(Dst);
1106
1107 // If the cast is marked as legal (or promote) then assume low cost.
1108 if (SrcLT.first == DstLT.first &&
1109 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
1110 return SrcLT.first;
1111
1112 // Handle scalar conversions.
1113 if (!SrcVTy && !DstVTy) {
1114 // Just check the op cost. If the operation is legal then assume it costs
1115 // 1.
1116 if (!TLI->isOperationExpand(ISD, DstLT.second))
1117 return 1;
1118
1119 // Assume that illegal scalar instruction are expensive.
1120 return 4;
1121 }
1122
1123 // Check vector-to-vector casts.
1124 if (DstVTy && SrcVTy) {
1125 // If the cast is between same-sized registers, then the check is simple.
1126 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
1127
1128 // Assume that Zext is done using AND.
1129 if (Opcode == Instruction::ZExt)
1130 return SrcLT.first;
1131
1132 // Assume that sext is done using SHL and SRA.
1133 if (Opcode == Instruction::SExt)
1134 return SrcLT.first * 2;
1135
1136 // Just check the op cost. If the operation is legal then assume it
1137 // costs
1138 // 1 and multiply by the type-legalization overhead.
1139 if (!TLI->isOperationExpand(ISD, DstLT.second))
1140 return SrcLT.first * 1;
1141 }
1142
1143 // If we are legalizing by splitting, query the concrete TTI for the cost
1144 // of casting the original vector twice. We also need to factor in the
1145 // cost of the split itself. Count that as 1, to be consistent with
1146 // getTypeLegalizationCost().
1147 bool SplitSrc =
1148 TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
1150 bool SplitDst =
1151 TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
1153 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isVector() &&
1154 DstVTy->getElementCount().isVector()) {
1155 Type *SplitDstTy = VectorType::getHalfElementsVectorType(DstVTy);
1156 Type *SplitSrcTy = VectorType::getHalfElementsVectorType(SrcVTy);
1157 T *TTI = static_cast<T *>(this);
1158 // If both types need to be split then the split is free.
1159 InstructionCost SplitCost =
1160 (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
1161 return SplitCost +
1162 (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
1163 CostKind, I));
1164 }
1165
1166 // Scalarization cost is Invalid, can't assume any num elements.
1167 if (isa<ScalableVectorType>(DstVTy))
1169
1170 // In other cases where the source or destination are illegal, assume
1171 // the operation will get scalarized.
1172 unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
1173 InstructionCost Cost = thisT()->getCastInstrCost(
1174 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind, I);
1175
1176 // Return the cost of multiple scalar invocation plus the cost of
1177 // inserting and extracting the values.
1178 return getScalarizationOverhead(DstVTy, /*Insert*/ true, /*Extract*/ true,
1179 CostKind) +
1180 Num * Cost;
1181 }
1182
1183 // We already handled vector-to-vector and scalar-to-scalar conversions.
1184 // This
1185 // is where we handle bitcast between vectors and scalars. We need to assume
1186 // that the conversion is scalarized in one way or another.
1187 if (Opcode == Instruction::BitCast) {
1188 // Illegal bitcasts are done by storing and loading from a stack slot.
1189 return (SrcVTy ? getScalarizationOverhead(SrcVTy, /*Insert*/ false,
1190 /*Extract*/ true, CostKind)
1191 : 0) +
1192 (DstVTy ? getScalarizationOverhead(DstVTy, /*Insert*/ true,
1193 /*Extract*/ false, CostKind)
1194 : 0);
1195 }
1196
1197 llvm_unreachable("Unhandled cast");
1198 }
1199
1201 VectorType *VecTy, unsigned Index) {
1203 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1204 CostKind, Index, nullptr, nullptr) +
1205 thisT()->getCastInstrCost(Opcode, Dst, VecTy->getElementType(),
1207 }
1208
1210 const Instruction *I = nullptr) {
1211 return BaseT::getCFInstrCost(Opcode, CostKind, I);
1212 }
1213
1214 InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
1215 CmpInst::Predicate VecPred,
1217 const Instruction *I = nullptr) {
1218 const TargetLoweringBase *TLI = getTLI();
1219 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1220 assert(ISD && "Invalid opcode");
1221
1222 // TODO: Handle other cost kinds.
1224 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1225 I);
1226
1227 // Selects on vectors are actually vector selects.
1228 if (ISD == ISD::SELECT) {
1229 assert(CondTy && "CondTy must exist");
1230 if (CondTy->isVectorTy())
1231 ISD = ISD::VSELECT;
1232 }
1233 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1234
1235 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
1236 !TLI->isOperationExpand(ISD, LT.second)) {
1237 // The operation is legal. Assume it costs 1. Multiply
1238 // by the type-legalization overhead.
1239 return LT.first * 1;
1240 }
1241
1242 // Otherwise, assume that the cast is scalarized.
1243 // TODO: If one of the types get legalized by splitting, handle this
1244 // similarly to what getCastInstrCost() does.
1245 if (auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
1246 if (isa<ScalableVectorType>(ValTy))
1248
1249 unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
1250 if (CondTy)
1251 CondTy = CondTy->getScalarType();
1252 InstructionCost Cost = thisT()->getCmpSelInstrCost(
1253 Opcode, ValVTy->getScalarType(), CondTy, VecPred, CostKind, I);
1254
1255 // Return the cost of multiple scalar invocation plus the cost of
1256 // inserting and extracting the values.
1257 return getScalarizationOverhead(ValVTy, /*Insert*/ true,
1258 /*Extract*/ false, CostKind) +
1259 Num * Cost;
1260 }
1261
1262 // Unknown scalar opcode.
1263 return 1;
1264 }
1265
1268 unsigned Index, Value *Op0, Value *Op1) {
1269 return getRegUsageForType(Val->getScalarType());
1270 }
1271
1274 unsigned Index) {
1275 Value *Op0 = nullptr;
1276 Value *Op1 = nullptr;
1277 if (auto *IE = dyn_cast<InsertElementInst>(&I)) {
1278 Op0 = IE->getOperand(0);
1279 Op1 = IE->getOperand(1);
1280 }
1281 return thisT()->getVectorInstrCost(I.getOpcode(), Val, CostKind, Index, Op0,
1282 Op1);
1283 }
1284
1285 InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
1286 int VF,
1287 const APInt &DemandedDstElts,
1289 assert(DemandedDstElts.getBitWidth() == (unsigned)VF * ReplicationFactor &&
1290 "Unexpected size of DemandedDstElts.");
1291
1293
1294 auto *SrcVT = FixedVectorType::get(EltTy, VF);
1295 auto *ReplicatedVT = FixedVectorType::get(EltTy, VF * ReplicationFactor);
1296
1297 // The Mask shuffling cost is extract all the elements of the Mask
1298 // and insert each of them Factor times into the wide vector:
1299 //
1300 // E.g. an interleaved group with factor 3:
1301 // %mask = icmp ult <8 x i32> %vec1, %vec2
1302 // %interleaved.mask = shufflevector <8 x i1> %mask, <8 x i1> undef,
1303 // <24 x i32> <0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7>
1304 // The cost is estimated as extract all mask elements from the <8xi1> mask
1305 // vector and insert them factor times into the <24xi1> shuffled mask
1306 // vector.
1307 APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedDstElts, VF);
1308 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
1309 /*Insert*/ false,
1310 /*Extract*/ true, CostKind);
1311 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
1312 /*Insert*/ true,
1313 /*Extract*/ false, CostKind);
1314
1315 return Cost;
1316 }
1317
1319 getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
1322 const Instruction *I = nullptr) {
1323 assert(!Src->isVoidTy() && "Invalid type");
1324 // Assume types, such as structs, are expensive.
1325 if (getTLI()->getValueType(DL, Src, true) == MVT::Other)
1326 return 4;
1327 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
1328
1329 // Assuming that all loads of legal types cost 1.
1330 InstructionCost Cost = LT.first;
1332 return Cost;
1333
1334 const DataLayout &DL = this->getDataLayout();
1335 if (Src->isVectorTy() &&
1336 // In practice it's not currently possible to have a change in lane
1337 // length for extending loads or truncating stores so both types should
1338 // have the same scalable property.
1340 LT.second.getSizeInBits())) {
1341 // This is a vector load that legalizes to a larger type than the vector
1342 // itself. Unless the corresponding extending load or truncating store is
1343 // legal, then this will scalarize.
1345 EVT MemVT = getTLI()->getValueType(DL, Src);
1346 if (Opcode == Instruction::Store)
1347 LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
1348 else
1349 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
1350
1351 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
1352 // This is a vector load/store for some illegal type that is scalarized.
1353 // We must account for the cost of building or decomposing the vector.
1355 cast<VectorType>(Src), Opcode != Instruction::Store,
1356 Opcode == Instruction::Store, CostKind);
1357 }
1358 }
1359
1360 return Cost;
1361 }
1362
1364 Align Alignment, unsigned AddressSpace,
1366 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, true, false,
1367 CostKind);
1368 }
1369
1371 const Value *Ptr, bool VariableMask,
1372 Align Alignment,
1374 const Instruction *I = nullptr) {
1375 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask,
1376 true, CostKind);
1377 }
1378
1380 const Value *Ptr, bool VariableMask,
1381 Align Alignment,
1383 const Instruction *I) {
1384 // For a target without strided memory operations (or for an illegal
1385 // operation type on one which does), assume we lower to a gather/scatter
1386 // operation. (Which may in turn be scalarized.)
1387 return thisT()->getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1388 Alignment, CostKind, I);
1389 }
1390
1392 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1393 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1394 bool UseMaskForCond = false, bool UseMaskForGaps = false) {
1395
1396 // We cannot scalarize scalable vectors, so return Invalid.
1397 if (isa<ScalableVectorType>(VecTy))
1399
1400 auto *VT = cast<FixedVectorType>(VecTy);
1401
1402 unsigned NumElts = VT->getNumElements();
1403 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
1404
1405 unsigned NumSubElts = NumElts / Factor;
1406 auto *SubVT = FixedVectorType::get(VT->getElementType(), NumSubElts);
1407
1408 // Firstly, the cost of load/store operation.
1410 if (UseMaskForCond || UseMaskForGaps)
1411 Cost = thisT()->getMaskedMemoryOpCost(Opcode, VecTy, Alignment,
1413 else
1414 Cost = thisT()->getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace,
1415 CostKind);
1416
1417 // Legalize the vector type, and get the legalized and unlegalized type
1418 // sizes.
1419 MVT VecTyLT = getTypeLegalizationCost(VecTy).second;
1420 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1421 unsigned VecTyLTSize = VecTyLT.getStoreSize();
1422
1423 // Scale the cost of the memory operation by the fraction of legalized
1424 // instructions that will actually be used. We shouldn't account for the
1425 // cost of dead instructions since they will be removed.
1426 //
1427 // E.g., An interleaved load of factor 8:
1428 // %vec = load <16 x i64>, <16 x i64>* %ptr
1429 // %v0 = shufflevector %vec, undef, <0, 8>
1430 //
1431 // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
1432 // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
1433 // type). The other loads are unused.
1434 //
1435 // TODO: Note that legalization can turn masked loads/stores into unmasked
1436 // (legalized) loads/stores. This can be reflected in the cost.
1437 if (Cost.isValid() && VecTySize > VecTyLTSize) {
1438 // The number of loads of a legal type it will take to represent a load
1439 // of the unlegalized vector type.
1440 unsigned NumLegalInsts = divideCeil(VecTySize, VecTyLTSize);
1441
1442 // The number of elements of the unlegalized type that correspond to a
1443 // single legal instruction.
1444 unsigned NumEltsPerLegalInst = divideCeil(NumElts, NumLegalInsts);
1445
1446 // Determine which legal instructions will be used.
1447 BitVector UsedInsts(NumLegalInsts, false);
1448 for (unsigned Index : Indices)
1449 for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1450 UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);
1451
1452 // Scale the cost of the load by the fraction of legal instructions that
1453 // will be used.
1454 Cost = divideCeil(UsedInsts.count() * *Cost.getValue(), NumLegalInsts);
1455 }
1456
1457 // Then plus the cost of interleave operation.
1458 assert(Indices.size() <= Factor &&
1459 "Interleaved memory op has too many members");
1460
1461 const APInt DemandedAllSubElts = APInt::getAllOnes(NumSubElts);
1462 const APInt DemandedAllResultElts = APInt::getAllOnes(NumElts);
1463
1464 APInt DemandedLoadStoreElts = APInt::getZero(NumElts);
1465 for (unsigned Index : Indices) {
1466 assert(Index < Factor && "Invalid index for interleaved memory op");
1467 for (unsigned Elm = 0; Elm < NumSubElts; Elm++)
1468 DemandedLoadStoreElts.setBit(Index + Elm * Factor);
1469 }
1470
1471 if (Opcode == Instruction::Load) {
1472 // The interleave cost is similar to extract sub vectors' elements
1473 // from the wide vector, and insert them into sub vectors.
1474 //
1475 // E.g. An interleaved load of factor 2 (with one member of index 0):
1476 // %vec = load <8 x i32>, <8 x i32>* %ptr
1477 // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
1478 // The cost is estimated as extract elements at 0, 2, 4, 6 from the
1479 // <8 x i32> vector and insert them into a <4 x i32> vector.
1480 InstructionCost InsSubCost = thisT()->getScalarizationOverhead(
1481 SubVT, DemandedAllSubElts,
1482 /*Insert*/ true, /*Extract*/ false, CostKind);
1483 Cost += Indices.size() * InsSubCost;
1484 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1485 /*Insert*/ false,
1486 /*Extract*/ true, CostKind);
1487 } else {
1488 // The interleave cost is extract elements from sub vectors, and
1489 // insert them into the wide vector.
1490 //
1491 // E.g. An interleaved store of factor 3 with 2 members at indices 0,1:
1492 // (using VF=4):
1493 // %v0_v1 = shuffle %v0, %v1, <0,4,undef,1,5,undef,2,6,undef,3,7,undef>
1494 // %gaps.mask = <true, true, false, true, true, false,
1495 // true, true, false, true, true, false>
1496 // call llvm.masked.store <12 x i32> %v0_v1, <12 x i32>* %ptr,
1497 // i32 Align, <12 x i1> %gaps.mask
1498 // The cost is estimated as extract all elements (of actual members,
1499 // excluding gaps) from both <4 x i32> vectors and insert into the <12 x
1500 // i32> vector.
1501 InstructionCost ExtSubCost = thisT()->getScalarizationOverhead(
1502 SubVT, DemandedAllSubElts,
1503 /*Insert*/ false, /*Extract*/ true, CostKind);
1504 Cost += ExtSubCost * Indices.size();
1505 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1506 /*Insert*/ true,
1507 /*Extract*/ false, CostKind);
1508 }
1509
1510 if (!UseMaskForCond)
1511 return Cost;
1512
1513 Type *I8Type = Type::getInt8Ty(VT->getContext());
1514
1515 Cost += thisT()->getReplicationShuffleCost(
1516 I8Type, Factor, NumSubElts,
1517 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
1518 CostKind);
1519
1520 // The Gaps mask is invariant and created outside the loop, therefore the
1521 // cost of creating it is not accounted for here. However if we have both
1522 // a MaskForGaps and some other mask that guards the execution of the
1523 // memory access, we need to account for the cost of And-ing the two masks
1524 // inside the loop.
1525 if (UseMaskForGaps) {
1526 auto *MaskVT = FixedVectorType::get(I8Type, NumElts);
1527 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1528 CostKind);
1529 }
1530
1531 return Cost;
1532 }
1533
1534 /// Get intrinsic cost based on arguments.
1537 // Check for generically free intrinsics.
1539 return 0;
1540
1541 // Assume that target intrinsics are cheap.
1542 Intrinsic::ID IID = ICA.getID();
1545
1546 if (ICA.isTypeBasedOnly())
1548
1549 Type *RetTy = ICA.getReturnType();
1550
1551 ElementCount RetVF =
1552 (RetTy->isVectorTy() ? cast<VectorType>(RetTy)->getElementCount()
1554 const IntrinsicInst *I = ICA.getInst();
1555 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
1556 FastMathFlags FMF = ICA.getFlags();
1557 switch (IID) {
1558 default:
1559 break;
1560
1561 case Intrinsic::powi:
1562 if (auto *RHSC = dyn_cast<ConstantInt>(Args[1])) {
1563 bool ShouldOptForSize = I->getParent()->getParent()->hasOptSize();
1564 if (getTLI()->isBeneficialToExpandPowI(RHSC->getSExtValue(),
1565 ShouldOptForSize)) {
1566 // The cost is modeled on the expansion performed by ExpandPowI in
1567 // SelectionDAGBuilder.
1568 APInt Exponent = RHSC->getValue().abs();
1569 unsigned ActiveBits = Exponent.getActiveBits();
1570 unsigned PopCount = Exponent.popcount();
1571 InstructionCost Cost = (ActiveBits + PopCount - 2) *
1572 thisT()->getArithmeticInstrCost(
1573 Instruction::FMul, RetTy, CostKind);
1574 if (RHSC->isNegative())
1575 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv, RetTy,
1576 CostKind);
1577 return Cost;
1578 }
1579 }
1580 break;
1581 case Intrinsic::cttz:
1582 // FIXME: If necessary, this should go in target-specific overrides.
1583 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCttz(RetTy))
1585 break;
1586
1587 case Intrinsic::ctlz:
1588 // FIXME: If necessary, this should go in target-specific overrides.
1589 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCtlz(RetTy))
1591 break;
1592
1593 case Intrinsic::memcpy:
1594 return thisT()->getMemcpyCost(ICA.getInst());
1595
1596 case Intrinsic::masked_scatter: {
1597 const Value *Mask = Args[3];
1598 bool VarMask = !isa<Constant>(Mask);
1599 Align Alignment = cast<ConstantInt>(Args[2])->getAlignValue();
1600 return thisT()->getGatherScatterOpCost(Instruction::Store,
1601 ICA.getArgTypes()[0], Args[1],
1602 VarMask, Alignment, CostKind, I);
1603 }
1604 case Intrinsic::masked_gather: {
1605 const Value *Mask = Args[2];
1606 bool VarMask = !isa<Constant>(Mask);
1607 Align Alignment = cast<ConstantInt>(Args[1])->getAlignValue();
1608 return thisT()->getGatherScatterOpCost(Instruction::Load, RetTy, Args[0],
1609 VarMask, Alignment, CostKind, I);
1610 }
1611 case Intrinsic::experimental_vp_strided_store: {
1612 const Value *Data = Args[0];
1613 const Value *Ptr = Args[1];
1614 const Value *Mask = Args[3];
1615 const Value *EVL = Args[4];
1616 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1617 Align Alignment = I->getParamAlign(1).valueOrOne();
1618 return thisT()->getStridedMemoryOpCost(Instruction::Store,
1619 Data->getType(), Ptr, VarMask,
1620 Alignment, CostKind, I);
1621 }
1622 case Intrinsic::experimental_vp_strided_load: {
1623 const Value *Ptr = Args[0];
1624 const Value *Mask = Args[2];
1625 const Value *EVL = Args[3];
1626 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1627 Align Alignment = I->getParamAlign(0).valueOrOne();
1628 return thisT()->getStridedMemoryOpCost(Instruction::Load, RetTy, Ptr,
1629 VarMask, Alignment, CostKind, I);
1630 }
1631 case Intrinsic::experimental_stepvector: {
1632 if (isa<ScalableVectorType>(RetTy))
1634 // The cost of materialising a constant integer vector.
1636 }
1637 case Intrinsic::vector_extract: {
1638 // FIXME: Handle case where a scalable vector is extracted from a scalable
1639 // vector
1640 if (isa<ScalableVectorType>(RetTy))
1642 unsigned Index = cast<ConstantInt>(Args[1])->getZExtValue();
1643 return thisT()->getShuffleCost(
1644 TTI::SK_ExtractSubvector, cast<VectorType>(Args[0]->getType()),
1645 std::nullopt, CostKind, Index, cast<VectorType>(RetTy));
1646 }
1647 case Intrinsic::vector_insert: {
1648 // FIXME: Handle case where a scalable vector is inserted into a scalable
1649 // vector
1650 if (isa<ScalableVectorType>(Args[1]->getType()))
1652 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
1653 return thisT()->getShuffleCost(
1654 TTI::SK_InsertSubvector, cast<VectorType>(Args[0]->getType()),
1655 std::nullopt, CostKind, Index, cast<VectorType>(Args[1]->getType()));
1656 }
1657 case Intrinsic::experimental_vector_reverse: {
1658 return thisT()->getShuffleCost(
1659 TTI::SK_Reverse, cast<VectorType>(Args[0]->getType()), std::nullopt,
1660 CostKind, 0, cast<VectorType>(RetTy));
1661 }
1662 case Intrinsic::experimental_vector_splice: {
1663 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
1664 return thisT()->getShuffleCost(
1665 TTI::SK_Splice, cast<VectorType>(Args[0]->getType()), std::nullopt,
1666 CostKind, Index, cast<VectorType>(RetTy));
1667 }
1668 case Intrinsic::vector_reduce_add:
1669 case Intrinsic::vector_reduce_mul:
1670 case Intrinsic::vector_reduce_and:
1671 case Intrinsic::vector_reduce_or:
1672 case Intrinsic::vector_reduce_xor:
1673 case Intrinsic::vector_reduce_smax:
1674 case Intrinsic::vector_reduce_smin:
1675 case Intrinsic::vector_reduce_fmax:
1676 case Intrinsic::vector_reduce_fmin:
1677 case Intrinsic::vector_reduce_fmaximum:
1678 case Intrinsic::vector_reduce_fminimum:
1679 case Intrinsic::vector_reduce_umax:
1680 case Intrinsic::vector_reduce_umin: {
1681 IntrinsicCostAttributes Attrs(IID, RetTy, Args[0]->getType(), FMF, I, 1);
1683 }
1684 case Intrinsic::vector_reduce_fadd:
1685 case Intrinsic::vector_reduce_fmul: {
1687 IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF, I, 1);
1689 }
1690 case Intrinsic::fshl:
1691 case Intrinsic::fshr: {
1692 const Value *X = Args[0];
1693 const Value *Y = Args[1];
1694 const Value *Z = Args[2];
1697 const TTI::OperandValueInfo OpInfoZ = TTI::getOperandInfo(Z);
1698 const TTI::OperandValueInfo OpInfoBW =
1700 isPowerOf2_32(RetTy->getScalarSizeInBits()) ? TTI::OP_PowerOf2
1701 : TTI::OP_None};
1702
1703 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
1704 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
1706 Cost +=
1707 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
1708 Cost +=
1709 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);
1710 Cost += thisT()->getArithmeticInstrCost(
1711 BinaryOperator::Shl, RetTy, CostKind, OpInfoX,
1712 {OpInfoZ.Kind, TTI::OP_None});
1713 Cost += thisT()->getArithmeticInstrCost(
1714 BinaryOperator::LShr, RetTy, CostKind, OpInfoY,
1715 {OpInfoZ.Kind, TTI::OP_None});
1716 // Non-constant shift amounts requires a modulo.
1717 if (!OpInfoZ.isConstant())
1718 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::URem, RetTy,
1719 CostKind, OpInfoZ, OpInfoBW);
1720 // For non-rotates (X != Y) we must add shift-by-zero handling costs.
1721 if (X != Y) {
1722 Type *CondTy = RetTy->getWithNewBitWidth(1);
1723 Cost +=
1724 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
1726 Cost +=
1727 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
1729 }
1730 return Cost;
1731 }
1732 case Intrinsic::get_active_lane_mask: {
1733 EVT ResVT = getTLI()->getValueType(DL, RetTy, true);
1734 EVT ArgType = getTLI()->getValueType(DL, ICA.getArgTypes()[0], true);
1735
1736 // If we're not expanding the intrinsic then we assume this is cheap
1737 // to implement.
1738 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgType)) {
1739 return getTypeLegalizationCost(RetTy).first;
1740 }
1741
1742 // Create the expanded types that will be used to calculate the uadd_sat
1743 // operation.
1744 Type *ExpRetTy = VectorType::get(
1745 ICA.getArgTypes()[0], cast<VectorType>(RetTy)->getElementCount());
1746 IntrinsicCostAttributes Attrs(Intrinsic::uadd_sat, ExpRetTy, {}, FMF);
1748 thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
1749 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy, RetTy,
1751 return Cost;
1752 }
1753 }
1754
1755 // VP Intrinsics should have the same cost as their non-vp counterpart.
1756 // TODO: Adjust the cost to make the vp intrinsic cheaper than its non-vp
1757 // counterpart when the vector length argument is smaller than the maximum
1758 // vector length.
1759 // TODO: Support other kinds of VPIntrinsics
1760 if (VPIntrinsic::isVPIntrinsic(ICA.getID())) {
1761 std::optional<unsigned> FOp =
1763 if (FOp) {
1764 if (ICA.getID() == Intrinsic::vp_load) {
1765 Align Alignment;
1766 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1767 Alignment = VPI->getPointerAlignment().valueOrOne();
1768 unsigned AS = 0;
1769 if (ICA.getArgs().size() > 1)
1770 if (auto *PtrTy =
1771 dyn_cast<PointerType>(ICA.getArgs()[0]->getType()))
1772 AS = PtrTy->getAddressSpace();
1773 return thisT()->getMemoryOpCost(*FOp, ICA.getReturnType(), Alignment,
1774 AS, CostKind);
1775 }
1776 if (ICA.getID() == Intrinsic::vp_store) {
1777 Align Alignment;
1778 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1779 Alignment = VPI->getPointerAlignment().valueOrOne();
1780 unsigned AS = 0;
1781 if (ICA.getArgs().size() >= 2)
1782 if (auto *PtrTy =
1783 dyn_cast<PointerType>(ICA.getArgs()[1]->getType()))
1784 AS = PtrTy->getAddressSpace();
1785 return thisT()->getMemoryOpCost(*FOp, Args[0]->getType(), Alignment,
1786 AS, CostKind);
1787 }
1789 return thisT()->getArithmeticInstrCost(*FOp, ICA.getReturnType(),
1790 CostKind);
1791 }
1792 }
1793
1794 std::optional<Intrinsic::ID> FID =
1796 if (FID) {
1797 // Non-vp version will have same Args/Tys except mask and vector length.
1798 assert(ICA.getArgs().size() >= 2 && ICA.getArgTypes().size() >= 2 &&
1799 "Expected VPIntrinsic to have Mask and Vector Length args and "
1800 "types");
1802
1803 // VPReduction intrinsics have a start value argument that their non-vp
1804 // counterparts do not have, except for the fadd and fmul non-vp
1805 // counterpart.
1807 *FID != Intrinsic::vector_reduce_fadd &&
1808 *FID != Intrinsic::vector_reduce_fmul)
1809 NewTys = NewTys.drop_front();
1810
1811 IntrinsicCostAttributes NewICA(*FID, ICA.getReturnType(), NewTys,
1812 ICA.getFlags());
1813 return thisT()->getIntrinsicInstrCost(NewICA, CostKind);
1814 }
1815 }
1816
1817 // Assume that we need to scalarize this intrinsic.)
1818 // Compute the scalarization overhead based on Args for a vector
1819 // intrinsic.
1820 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
1821 if (RetVF.isVector() && !RetVF.isScalable()) {
1822 ScalarizationCost = 0;
1823 if (!RetTy->isVoidTy())
1824 ScalarizationCost += getScalarizationOverhead(
1825 cast<VectorType>(RetTy),
1826 /*Insert*/ true, /*Extract*/ false, CostKind);
1827 ScalarizationCost +=
1829 }
1830
1831 IntrinsicCostAttributes Attrs(IID, RetTy, ICA.getArgTypes(), FMF, I,
1832 ScalarizationCost);
1833 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
1834 }
1835
1836 /// Get intrinsic cost based on argument types.
1837 /// If ScalarizationCostPassed is std::numeric_limits<unsigned>::max(), the
1838 /// cost of scalarizing the arguments and the return value will be computed
1839 /// based on types.
1843 Intrinsic::ID IID = ICA.getID();
1844 Type *RetTy = ICA.getReturnType();
1845 const SmallVectorImpl<Type *> &Tys = ICA.getArgTypes();
1846 FastMathFlags FMF = ICA.getFlags();
1847 InstructionCost ScalarizationCostPassed = ICA.getScalarizationCost();
1848 bool SkipScalarizationCost = ICA.skipScalarizationCost();
1849
1850 VectorType *VecOpTy = nullptr;
1851 if (!Tys.empty()) {
1852 // The vector reduction operand is operand 0 except for fadd/fmul.
1853 // Their operand 0 is a scalar start value, so the vector op is operand 1.
1854 unsigned VecTyIndex = 0;
1855 if (IID == Intrinsic::vector_reduce_fadd ||
1856 IID == Intrinsic::vector_reduce_fmul)
1857 VecTyIndex = 1;
1858 assert(Tys.size() > VecTyIndex && "Unexpected IntrinsicCostAttributes");
1859 VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
1860 }
1861
1862 // Library call cost - other than size, make it expensive.
1863 unsigned SingleCallCost = CostKind == TTI::TCK_CodeSize ? 1 : 10;
1864 unsigned ISD = 0;
1865 switch (IID) {
1866 default: {
1867 // Scalable vectors cannot be scalarized, so return Invalid.
1868 if (isa<ScalableVectorType>(RetTy) || any_of(Tys, [](const Type *Ty) {
1869 return isa<ScalableVectorType>(Ty);
1870 }))
1872
1873 // Assume that we need to scalarize this intrinsic.
1874 InstructionCost ScalarizationCost =
1875 SkipScalarizationCost ? ScalarizationCostPassed : 0;
1876 unsigned ScalarCalls = 1;
1877 Type *ScalarRetTy = RetTy;
1878 if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
1879 if (!SkipScalarizationCost)
1880 ScalarizationCost = getScalarizationOverhead(
1881 RetVTy, /*Insert*/ true, /*Extract*/ false, CostKind);
1882 ScalarCalls = std::max(ScalarCalls,
1883 cast<FixedVectorType>(RetVTy)->getNumElements());
1884 ScalarRetTy = RetTy->getScalarType();
1885 }
1886 SmallVector<Type *, 4> ScalarTys;
1887 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
1888 Type *Ty = Tys[i];
1889 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1890 if (!SkipScalarizationCost)
1891 ScalarizationCost += getScalarizationOverhead(
1892 VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
1893 ScalarCalls = std::max(ScalarCalls,
1894 cast<FixedVectorType>(VTy)->getNumElements());
1895 Ty = Ty->getScalarType();
1896 }
1897 ScalarTys.push_back(Ty);
1898 }
1899 if (ScalarCalls == 1)
1900 return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
1901
1902 IntrinsicCostAttributes ScalarAttrs(IID, ScalarRetTy, ScalarTys, FMF);
1903 InstructionCost ScalarCost =
1904 thisT()->getIntrinsicInstrCost(ScalarAttrs, CostKind);
1905
1906 return ScalarCalls * ScalarCost + ScalarizationCost;
1907 }
1908 // Look for intrinsics that can be lowered directly or turned into a scalar
1909 // intrinsic call.
1910 case Intrinsic::sqrt:
1911 ISD = ISD::FSQRT;
1912 break;
1913 case Intrinsic::sin:
1914 ISD = ISD::FSIN;
1915 break;
1916 case Intrinsic::cos:
1917 ISD = ISD::FCOS;
1918 break;
1919 case Intrinsic::exp:
1920 ISD = ISD::FEXP;
1921 break;
1922 case Intrinsic::exp2:
1923 ISD = ISD::FEXP2;
1924 break;
1925 case Intrinsic::exp10:
1926 ISD = ISD::FEXP10;
1927 break;
1928 case Intrinsic::log:
1929 ISD = ISD::FLOG;
1930 break;
1931 case Intrinsic::log10:
1932 ISD = ISD::FLOG10;
1933 break;
1934 case Intrinsic::log2:
1935 ISD = ISD::FLOG2;
1936 break;
1937 case Intrinsic::fabs:
1938 ISD = ISD::FABS;
1939 break;
1940 case Intrinsic::canonicalize:
1941 ISD = ISD::FCANONICALIZE;
1942 break;
1943 case Intrinsic::minnum:
1944 ISD = ISD::FMINNUM;
1945 break;
1946 case Intrinsic::maxnum:
1947 ISD = ISD::FMAXNUM;
1948 break;
1949 case Intrinsic::minimum:
1950 ISD = ISD::FMINIMUM;
1951 break;
1952 case Intrinsic::maximum:
1953 ISD = ISD::FMAXIMUM;
1954 break;
1955 case Intrinsic::copysign:
1956 ISD = ISD::FCOPYSIGN;
1957 break;
1958 case Intrinsic::floor:
1959 ISD = ISD::FFLOOR;
1960 break;
1961 case Intrinsic::ceil:
1962 ISD = ISD::FCEIL;
1963 break;
1964 case Intrinsic::trunc:
1965 ISD = ISD::FTRUNC;
1966 break;
1967 case Intrinsic::nearbyint:
1968 ISD = ISD::FNEARBYINT;
1969 break;
1970 case Intrinsic::rint:
1971 ISD = ISD::FRINT;
1972 break;
1973 case Intrinsic::lrint:
1974 ISD = ISD::LRINT;
1975 break;
1976 case Intrinsic::llrint:
1977 ISD = ISD::LLRINT;
1978 break;
1979 case Intrinsic::round:
1980 ISD = ISD::FROUND;
1981 break;
1982 case Intrinsic::roundeven:
1983 ISD = ISD::FROUNDEVEN;
1984 break;
1985 case Intrinsic::pow:
1986 ISD = ISD::FPOW;
1987 break;
1988 case Intrinsic::fma:
1989 ISD = ISD::FMA;
1990 break;
1991 case Intrinsic::fmuladd:
1992 ISD = ISD::FMA;
1993 break;
1994 case Intrinsic::experimental_constrained_fmuladd:
1995 ISD = ISD::STRICT_FMA;
1996 break;
1997 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
1998 case Intrinsic::lifetime_start:
1999 case Intrinsic::lifetime_end:
2000 case Intrinsic::sideeffect:
2001 case Intrinsic::pseudoprobe:
2002 case Intrinsic::arithmetic_fence:
2003 return 0;
2004 case Intrinsic::masked_store: {
2005 Type *Ty = Tys[0];
2006 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2007 return thisT()->getMaskedMemoryOpCost(Instruction::Store, Ty, TyAlign, 0,
2008 CostKind);
2009 }
2010 case Intrinsic::masked_load: {
2011 Type *Ty = RetTy;
2012 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2013 return thisT()->getMaskedMemoryOpCost(Instruction::Load, Ty, TyAlign, 0,
2014 CostKind);
2015 }
2016 case Intrinsic::vector_reduce_add:
2017 case Intrinsic::vector_reduce_mul:
2018 case Intrinsic::vector_reduce_and:
2019 case Intrinsic::vector_reduce_or:
2020 case Intrinsic::vector_reduce_xor:
2021 return thisT()->getArithmeticReductionCost(
2022 getArithmeticReductionInstruction(IID), VecOpTy, std::nullopt,
2023 CostKind);
2024 case Intrinsic::vector_reduce_fadd:
2025 case Intrinsic::vector_reduce_fmul:
2026 return thisT()->getArithmeticReductionCost(
2027 getArithmeticReductionInstruction(IID), VecOpTy, FMF, CostKind);
2028 case Intrinsic::vector_reduce_smax:
2029 case Intrinsic::vector_reduce_smin:
2030 case Intrinsic::vector_reduce_umax:
2031 case Intrinsic::vector_reduce_umin:
2032 case Intrinsic::vector_reduce_fmax:
2033 case Intrinsic::vector_reduce_fmin:
2034 case Intrinsic::vector_reduce_fmaximum:
2035 case Intrinsic::vector_reduce_fminimum:
2036 return thisT()->getMinMaxReductionCost(getMinMaxReductionIntrinsicOp(IID),
2037 VecOpTy, ICA.getFlags(), CostKind);
2038 case Intrinsic::abs: {
2039 // abs(X) = select(icmp(X,0),X,sub(0,X))
2040 Type *CondTy = RetTy->getWithNewBitWidth(1);
2043 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2044 Pred, CostKind);
2045 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2046 Pred, CostKind);
2047 // TODO: Should we add an OperandValueProperties::OP_Zero property?
2048 Cost += thisT()->getArithmeticInstrCost(
2049 BinaryOperator::Sub, RetTy, CostKind, {TTI::OK_UniformConstantValue, TTI::OP_None});
2050 return Cost;
2051 }
2052 case Intrinsic::smax:
2053 case Intrinsic::smin:
2054 case Intrinsic::umax:
2055 case Intrinsic::umin: {
2056 // minmax(X,Y) = select(icmp(X,Y),X,Y)
2057 Type *CondTy = RetTy->getWithNewBitWidth(1);
2058 bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
2059 CmpInst::Predicate Pred =
2060 IsUnsigned ? CmpInst::ICMP_UGT : CmpInst::ICMP_SGT;
2062 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2063 Pred, CostKind);
2064 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2065 Pred, CostKind);
2066 return Cost;
2067 }
2068 case Intrinsic::sadd_sat:
2069 case Intrinsic::ssub_sat: {
2070 Type *CondTy = RetTy->getWithNewBitWidth(1);
2071
2072 Type *OpTy = StructType::create({RetTy, CondTy});
2073 Intrinsic::ID OverflowOp = IID == Intrinsic::sadd_sat
2074 ? Intrinsic::sadd_with_overflow
2075 : Intrinsic::ssub_with_overflow;
2077
2078 // SatMax -> Overflow && SumDiff < 0
2079 // SatMin -> Overflow && SumDiff >= 0
2081 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
2082 nullptr, ScalarizationCostPassed);
2083 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2084 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2085 Pred, CostKind);
2086 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
2087 CondTy, Pred, CostKind);
2088 return Cost;
2089 }
2090 case Intrinsic::uadd_sat:
2091 case Intrinsic::usub_sat: {
2092 Type *CondTy = RetTy->getWithNewBitWidth(1);
2093
2094 Type *OpTy = StructType::create({RetTy, CondTy});
2095 Intrinsic::ID OverflowOp = IID == Intrinsic::uadd_sat
2096 ? Intrinsic::uadd_with_overflow
2097 : Intrinsic::usub_with_overflow;
2098
2100 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
2101 nullptr, ScalarizationCostPassed);
2102 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2103 Cost +=
2104 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2106 return Cost;
2107 }
2108 case Intrinsic::smul_fix:
2109 case Intrinsic::umul_fix: {
2110 unsigned ExtSize = RetTy->getScalarSizeInBits() * 2;
2111 Type *ExtTy = RetTy->getWithNewBitWidth(ExtSize);
2112
2113 unsigned ExtOp =
2114 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
2116
2118 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH, CostKind);
2119 Cost +=
2120 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2121 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
2122 CCH, CostKind);
2123 Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, RetTy,
2124 CostKind,
2127 Cost += thisT()->getArithmeticInstrCost(Instruction::Shl, RetTy, CostKind,
2130 Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy, CostKind);
2131 return Cost;
2132 }
2133 case Intrinsic::sadd_with_overflow:
2134 case Intrinsic::ssub_with_overflow: {
2135 Type *SumTy = RetTy->getContainedType(0);
2136 Type *OverflowTy = RetTy->getContainedType(1);
2137 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
2138 ? BinaryOperator::Add
2139 : BinaryOperator::Sub;
2140
2141 // Add:
2142 // Overflow -> (Result < LHS) ^ (RHS < 0)
2143 // Sub:
2144 // Overflow -> (Result < LHS) ^ (RHS > 0)
2146 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
2147 Cost += 2 * thisT()->getCmpSelInstrCost(
2148 Instruction::ICmp, SumTy, OverflowTy,
2150 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
2151 CostKind);
2152 return Cost;
2153 }
2154 case Intrinsic::uadd_with_overflow:
2155 case Intrinsic::usub_with_overflow: {
2156 Type *SumTy = RetTy->getContainedType(0);
2157 Type *OverflowTy = RetTy->getContainedType(1);
2158 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
2159 ? BinaryOperator::Add
2160 : BinaryOperator::Sub;
2161 CmpInst::Predicate Pred = IID == Intrinsic::uadd_with_overflow
2164
2166 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
2167 Cost +=
2168 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy, OverflowTy,
2169 Pred, CostKind);
2170 return Cost;
2171 }
2172 case Intrinsic::smul_with_overflow:
2173 case Intrinsic::umul_with_overflow: {
2174 Type *MulTy = RetTy->getContainedType(0);
2175 Type *OverflowTy = RetTy->getContainedType(1);
2176 unsigned ExtSize = MulTy->getScalarSizeInBits() * 2;
2177 Type *ExtTy = MulTy->getWithNewBitWidth(ExtSize);
2178 bool IsSigned = IID == Intrinsic::smul_with_overflow;
2179
2180 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
2182
2184 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH, CostKind);
2185 Cost +=
2186 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2187 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
2188 CCH, CostKind);
2189 Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, ExtTy,
2190 CostKind,
2193
2194 if (IsSigned)
2195 Cost += thisT()->getArithmeticInstrCost(Instruction::AShr, MulTy,
2196 CostKind,
2199
2200 Cost += thisT()->getCmpSelInstrCost(
2201 BinaryOperator::ICmp, MulTy, OverflowTy, CmpInst::ICMP_NE, CostKind);
2202 return Cost;
2203 }
2204 case Intrinsic::fptosi_sat:
2205 case Intrinsic::fptoui_sat: {
2206 if (Tys.empty())
2207 break;
2208 Type *FromTy = Tys[0];
2209 bool IsSigned = IID == Intrinsic::fptosi_sat;
2210
2212 IntrinsicCostAttributes Attrs1(Intrinsic::minnum, FromTy,
2213 {FromTy, FromTy});
2214 Cost += thisT()->getIntrinsicInstrCost(Attrs1, CostKind);
2215 IntrinsicCostAttributes Attrs2(Intrinsic::maxnum, FromTy,
2216 {FromTy, FromTy});
2217 Cost += thisT()->getIntrinsicInstrCost(Attrs2, CostKind);
2218 Cost += thisT()->getCastInstrCost(
2219 IsSigned ? Instruction::FPToSI : Instruction::FPToUI, RetTy, FromTy,
2221 if (IsSigned) {
2222 Type *CondTy = RetTy->getWithNewBitWidth(1);
2223 Cost += thisT()->getCmpSelInstrCost(
2224 BinaryOperator::FCmp, FromTy, CondTy, CmpInst::FCMP_UNO, CostKind);
2225 Cost += thisT()->getCmpSelInstrCost(
2226 BinaryOperator::Select, RetTy, CondTy, CmpInst::FCMP_UNO, CostKind);
2227 }
2228 return Cost;
2229 }
2230 case Intrinsic::ctpop:
2231 ISD = ISD::CTPOP;
2232 // In case of legalization use TCC_Expensive. This is cheaper than a
2233 // library call but still not a cheap instruction.
2234 SingleCallCost = TargetTransformInfo::TCC_Expensive;
2235 break;
2236 case Intrinsic::ctlz:
2237 ISD = ISD::CTLZ;
2238 break;
2239 case Intrinsic::cttz:
2240 ISD = ISD::CTTZ;
2241 break;
2242 case Intrinsic::bswap:
2243 ISD = ISD::BSWAP;
2244 break;
2245 case Intrinsic::bitreverse:
2246 ISD = ISD::BITREVERSE;
2247 break;
2248 }
2249
2250 const TargetLoweringBase *TLI = getTLI();
2251 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(RetTy);
2252
2253 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
2254 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
2255 TLI->isFAbsFree(LT.second)) {
2256 return 0;
2257 }
2258
2259 // The operation is legal. Assume it costs 1.
2260 // If the type is split to multiple registers, assume that there is some
2261 // overhead to this.
2262 // TODO: Once we have extract/insert subvector cost we need to use them.
2263 if (LT.first > 1)
2264 return (LT.first * 2);
2265 else
2266 return (LT.first * 1);
2267 } else if (!TLI->isOperationExpand(ISD, LT.second)) {
2268 // If the operation is custom lowered then assume
2269 // that the code is twice as expensive.
2270 return (LT.first * 2);
2271 }
2272
2273 // If we can't lower fmuladd into an FMA estimate the cost as a floating
2274 // point mul followed by an add.
2275 if (IID == Intrinsic::fmuladd)
2276 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
2277 CostKind) +
2278 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
2279 CostKind);
2280 if (IID == Intrinsic::experimental_constrained_fmuladd) {
2281 IntrinsicCostAttributes FMulAttrs(
2282 Intrinsic::experimental_constrained_fmul, RetTy, Tys);
2283 IntrinsicCostAttributes FAddAttrs(
2284 Intrinsic::experimental_constrained_fadd, RetTy, Tys);
2285 return thisT()->getIntrinsicInstrCost(FMulAttrs, CostKind) +
2286 thisT()->getIntrinsicInstrCost(FAddAttrs, CostKind);
2287 }
2288
2289 // Else, assume that we need to scalarize this intrinsic. For math builtins
2290 // this will emit a costly libcall, adding call overhead and spills. Make it
2291 // very expensive.
2292 if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
2293 // Scalable vectors cannot be scalarized, so return Invalid.
2294 if (isa<ScalableVectorType>(RetTy) || any_of(Tys, [](const Type *Ty) {
2295 return isa<ScalableVectorType>(Ty);
2296 }))
2298
2299 InstructionCost ScalarizationCost =
2300 SkipScalarizationCost
2301 ? ScalarizationCostPassed
2302 : getScalarizationOverhead(RetVTy, /*Insert*/ true,
2303 /*Extract*/ false, CostKind);
2304
2305 unsigned ScalarCalls = cast<FixedVectorType>(RetVTy)->getNumElements();
2306 SmallVector<Type *, 4> ScalarTys;
2307 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
2308 Type *Ty = Tys[i];
2309 if (Ty->isVectorTy())
2310 Ty = Ty->getScalarType();
2311 ScalarTys.push_back(Ty);
2312 }
2313 IntrinsicCostAttributes Attrs(IID, RetTy->getScalarType(), ScalarTys, FMF);
2314 InstructionCost ScalarCost =
2315 thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2316 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
2317 if (auto *VTy = dyn_cast<VectorType>(Tys[i])) {
2318 if (!ICA.skipScalarizationCost())
2319 ScalarizationCost += getScalarizationOverhead(
2320 VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
2321 ScalarCalls = std::max(ScalarCalls,
2322 cast<FixedVectorType>(VTy)->getNumElements());
2323 }
2324 }
2325 return ScalarCalls * ScalarCost + ScalarizationCost;
2326 }
2327
2328 // This is going to be turned into a library call, make it expensive.
2329 return SingleCallCost;
2330 }
2331
2332 /// Compute a cost of the given call instruction.
2333 ///
2334 /// Compute the cost of calling function F with return type RetTy and
2335 /// argument types Tys. F might be nullptr, in this case the cost of an
2336 /// arbitrary call with the specified signature will be returned.
2337 /// This is used, for instance, when we estimate call of a vector
2338 /// counterpart of the given function.
2339 /// \param F Called function, might be nullptr.
2340 /// \param RetTy Return value types.
2341 /// \param Tys Argument types.
2342 /// \returns The cost of Call instruction.
2344 ArrayRef<Type *> Tys,
2346 return 10;
2347 }
2348
2349 unsigned getNumberOfParts(Type *Tp) {
2350 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
2351 return LT.first.isValid() ? *LT.first.getValue() : 0;
2352 }
2353
2355 const SCEV *) {
2356 return 0;
2357 }
2358
2359 /// Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
2360 /// We're assuming that reduction operation are performing the following way:
2361 ///
2362 /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
2363 /// <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
2364 /// \----------------v-------------/ \----------v------------/
2365 /// n/2 elements n/2 elements
2366 /// %red1 = op <n x t> %val, <n x t> val1
2367 /// After this operation we have a vector %red1 where only the first n/2
2368 /// elements are meaningful, the second n/2 elements are undefined and can be
2369 /// dropped. All other operations are actually working with the vector of
2370 /// length n/2, not n, though the real vector length is still n.
2371 /// %val2 = shufflevector<n x t> %red1, <n x t> %undef,
2372 /// <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
2373 /// \----------------v-------------/ \----------v------------/
2374 /// n/4 elements 3*n/4 elements
2375 /// %red2 = op <n x t> %red1, <n x t> val2 - working with the vector of
2376 /// length n/2, the resulting vector has length n/4 etc.
2377 ///
2378 /// The cost model should take into account that the actual length of the
2379 /// vector is reduced on each iteration.
2382 // Targets must implement a default value for the scalable case, since
2383 // we don't know how many lanes the vector has.
2384 if (isa<ScalableVectorType>(Ty))
2386
2387 Type *ScalarTy = Ty->getElementType();
2388 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
2389 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
2390 ScalarTy == IntegerType::getInt1Ty(Ty->getContext()) &&
2391 NumVecElts >= 2) {
2392 // Or reduction for i1 is represented as:
2393 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
2394 // %res = cmp ne iReduxWidth %val, 0
2395 // And reduction for i1 is represented as:
2396 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
2397 // %res = cmp eq iReduxWidth %val, 11111
2398 Type *ValTy = IntegerType::get(Ty->getContext(), NumVecElts);
2399 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
2401 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
2404 }
2405 unsigned NumReduxLevels = Log2_32(NumVecElts);
2406 InstructionCost ArithCost = 0;
2407 InstructionCost ShuffleCost = 0;
2408 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
2409 unsigned LongVectorCount = 0;
2410 unsigned MVTLen =
2411 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
2412 while (NumVecElts > MVTLen) {
2413 NumVecElts /= 2;
2414 VectorType *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
2415 ShuffleCost +=
2416 thisT()->getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt,
2417 CostKind, NumVecElts, SubTy);
2418 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy, CostKind);
2419 Ty = SubTy;
2420 ++LongVectorCount;
2421 }
2422
2423 NumReduxLevels -= LongVectorCount;
2424
2425 // The minimal length of the vector is limited by the real length of vector
2426 // operations performed on the current platform. That's why several final
2427 // reduction operations are performed on the vectors with the same
2428 // architecture-dependent length.
2429
2430 // By default reductions need one shuffle per reduction level.
2431 ShuffleCost +=
2432 NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
2433 std::nullopt, CostKind, 0, Ty);
2434 ArithCost +=
2435 NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty, CostKind);
2436 return ShuffleCost + ArithCost +
2437 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
2438 CostKind, 0, nullptr, nullptr);
2439 }
2440
2441 /// Try to calculate the cost of performing strict (in-order) reductions,
2442 /// which involves doing a sequence of floating point additions in lane
2443 /// order, starting with an initial value. For example, consider a scalar
2444 /// initial value 'InitVal' of type float and a vector of type <4 x float>:
2445 ///
2446 /// Vector = <float %v0, float %v1, float %v2, float %v3>
2447 ///
2448 /// %add1 = %InitVal + %v0
2449 /// %add2 = %add1 + %v1
2450 /// %add3 = %add2 + %v2
2451 /// %add4 = %add3 + %v3
2452 ///
2453 /// As a simple estimate we can say the cost of such a reduction is 4 times
2454 /// the cost of a scalar FP addition. We can only estimate the costs for
2455 /// fixed-width vectors here because for scalable vectors we do not know the
2456 /// runtime number of operations.
2459 // Targets must implement a default value for the scalable case, since
2460 // we don't know how many lanes the vector has.
2461 if (isa<ScalableVectorType>(Ty))
2463
2464 auto *VTy = cast<FixedVectorType>(Ty);
2466 VTy, /*Insert=*/false, /*Extract=*/true, CostKind);
2467 InstructionCost ArithCost = thisT()->getArithmeticInstrCost(
2468 Opcode, VTy->getElementType(), CostKind);
2469 ArithCost *= VTy->getNumElements();
2470
2471 return ExtractCost + ArithCost;
2472 }
2473
2475 std::optional<FastMathFlags> FMF,
2477 assert(Ty && "Unknown reduction vector type");
2479 return getOrderedReductionCost(Opcode, Ty, CostKind);
2480 return getTreeReductionCost(Opcode, Ty, CostKind);
2481 }
2482
2483 /// Try to calculate op costs for min/max reduction operations.
2484 /// \param CondTy Conditional type for the Select instruction.
2486 FastMathFlags FMF,
2488 // Targets must implement a default value for the scalable case, since
2489 // we don't know how many lanes the vector has.
2490 if (isa<ScalableVectorType>(Ty))
2492
2493 Type *ScalarTy = Ty->getElementType();
2494 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
2495 unsigned NumReduxLevels = Log2_32(NumVecElts);
2496 InstructionCost MinMaxCost = 0;
2497 InstructionCost ShuffleCost = 0;
2498 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
2499 unsigned LongVectorCount = 0;
2500 unsigned MVTLen =
2501 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
2502 while (NumVecElts > MVTLen) {
2503 NumVecElts /= 2;
2504 auto *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
2505
2506 ShuffleCost +=
2507 thisT()->getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt,
2508 CostKind, NumVecElts, SubTy);
2509
2510 IntrinsicCostAttributes Attrs(IID, SubTy, {SubTy, SubTy}, FMF);
2511 MinMaxCost += getIntrinsicInstrCost(Attrs, CostKind);
2512 Ty = SubTy;
2513 ++LongVectorCount;
2514 }
2515
2516 NumReduxLevels -= LongVectorCount;
2517
2518 // The minimal length of the vector is limited by the real length of vector
2519 // operations performed on the current platform. That's why several final
2520 // reduction opertions are perfomed on the vectors with the same
2521 // architecture-dependent length.
2522 ShuffleCost +=
2523 NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
2524 std::nullopt, CostKind, 0, Ty);
2525 IntrinsicCostAttributes Attrs(IID, Ty, {Ty, Ty}, FMF);
2526 MinMaxCost += NumReduxLevels * getIntrinsicInstrCost(Attrs, CostKind);
2527 // The last min/max should be in vector registers and we counted it above.
2528 // So just need a single extractelement.
2529 return ShuffleCost + MinMaxCost +
2530 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
2531 CostKind, 0, nullptr, nullptr);
2532 }
2533
2534 InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned,
2535 Type *ResTy, VectorType *Ty,
2536 FastMathFlags FMF,
2538 // Without any native support, this is equivalent to the cost of
2539 // vecreduce.opcode(ext(Ty A)).
2540 VectorType *ExtTy = VectorType::get(ResTy, Ty);
2541 InstructionCost RedCost =
2542 thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF, CostKind);
2543 InstructionCost ExtCost = thisT()->getCastInstrCost(
2544 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
2546
2547 return RedCost + ExtCost;
2548 }
2549
2551 VectorType *Ty,
2553 // Without any native support, this is equivalent to the cost of
2554 // vecreduce.add(mul(ext(Ty A), ext(Ty B))) or
2555 // vecreduce.add(mul(A, B)).
2556 VectorType *ExtTy = VectorType::get(ResTy, Ty);
2557 InstructionCost RedCost = thisT()->getArithmeticReductionCost(
2558 Instruction::Add, ExtTy, std::nullopt, CostKind);
2559 InstructionCost ExtCost = thisT()->getCastInstrCost(
2560 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
2562
2563 InstructionCost MulCost =
2564 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2565
2566 return RedCost + MulCost + 2 * ExtCost;
2567 }
2568
2570
2571 /// @}
2572};
2573
2574/// Concrete BasicTTIImpl that can be used if no further customization
2575/// is needed.
2576class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
2578
2579 friend class BasicTTIImplBase<BasicTTIImpl>;
2580
2581 const TargetSubtargetInfo *ST;
2582 const TargetLoweringBase *TLI;
2583
2584 const TargetSubtargetInfo *getST() const { return ST; }
2585 const TargetLoweringBase *getTLI() const { return TLI; }
2586
2587public:
2588 explicit BasicTTIImpl(const TargetMachine *TM, const Function &F);
2589};
2590
2591} // end namespace llvm
2592
2593#endif // LLVM_CODEGEN_BASICTTIIMPL_H
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
return RetTy
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
static const Function * getCalledFunction(const Value *V, bool &IsNoBuiltin)
LLVMContext & Context
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
This file describes how to lower LLVM code to machine code.
This file provides helpers for the implementation of a TargetTransformInfo-conforming class.
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition: APInt.h:76
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:212
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition: APInt.h:1308
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition: APInt.h:1179
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1439
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition: APInt.h:1108
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:178
an instruction to allocate memory on the stack
Definition: Instructions.h:59
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:204
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition: ArrayRef.h:210
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
Base class which can be used to help build a TTI implementation.
Definition: BasicTTIImpl.h:80
bool isTypeLegal(Type *Ty)
Definition: BasicTTIImpl.h:422
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on arguments.
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Definition: BasicTTIImpl.h:286
virtual unsigned getPrefetchDistance() const
Definition: BasicTTIImpl.h:716
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace)
Definition: BasicTTIImpl.h:399
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
Definition: BasicTTIImpl.h:576
bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const
Definition: BasicTTIImpl.h:549
unsigned getMaxInterleaveFactor(ElementCount VF)
Definition: BasicTTIImpl.h:883
unsigned getNumberOfParts(Type *Tp)
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index)
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
Definition: BasicTTIImpl.h:745
std::optional< unsigned > getVScaleForTuning() const
Definition: BasicTTIImpl.h:750
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind)
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
bool isTruncateFree(Type *Ty1, Type *Ty2)
Definition: BasicTTIImpl.h:412
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo)
Definition: BasicTTIImpl.h:656
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=ArrayRef< const Value * >(), const Instruction *CxtI=nullptr)
Definition: BasicTTIImpl.h:885
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind)
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI)
Definition: BasicTTIImpl.h:663
virtual bool shouldPrefetchAddressSpace(unsigned AS) const
Definition: BasicTTIImpl.h:736
InstructionCost getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I)
bool isLegalICmpImmediate(int64_t imm)
Definition: BasicTTIImpl.h:331
bool isProfitableToHoist(Instruction *I)
Definition: BasicTTIImpl.h:416
virtual unsigned getMaxPrefetchIterationsAhead() const
Definition: BasicTTIImpl.h:728
InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index)
std::optional< unsigned > getMaxVScale() const
Definition: BasicTTIImpl.h:749
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *Ty, int &Index, VectorType *&SubTy) const
Definition: BasicTTIImpl.h:963
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
unsigned getRegUsageForType(Type *Ty)
Definition: BasicTTIImpl.h:427
bool shouldBuildRelLookupTables() const
Definition: BasicTTIImpl.h:503
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
Try to calculate op costs for min/max reduction operations.
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const
Definition: BasicTTIImpl.h:570
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args=std::nullopt)
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
Definition: BasicTTIImpl.h:438
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const
Definition: BasicTTIImpl.h:370
bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2)
Definition: BasicTTIImpl.h:382
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
Definition: BasicTTIImpl.h:678
bool shouldFoldTerminatingConditionAfterLSR() const
Definition: BasicTTIImpl.h:390
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
Definition: BasicTTIImpl.h:720
bool hasBranchDivergence(const Function *F=nullptr)
Definition: BasicTTIImpl.h:280
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const
Definition: BasicTTIImpl.h:376
unsigned getAssumedAddrSpace(const Value *V) const
Definition: BasicTTIImpl.h:308
InstructionCost getOperandsScalarizationOverhead(ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing an instructions unique non-constant operands.
Definition: BasicTTIImpl.h:802
InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *, const SCEV *)
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing an instruction.
Definition: BasicTTIImpl.h:756
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset)
Definition: BasicTTIImpl.h:346
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind)
Definition: BasicTTIImpl.h:432
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty)
Definition: BasicTTIImpl.h:535
virtual std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const
Definition: BasicTTIImpl.h:696
bool isAlwaysUniform(const Value *V)
Definition: BasicTTIImpl.h:284
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr)
Definition: BasicTTIImpl.h:335
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true)
Definition: BasicTTIImpl.h:668
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const
Definition: BasicTTIImpl.h:272
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const
Definition: BasicTTIImpl.h:350
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind)
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
Definition: BasicTTIImpl.h:786
virtual std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const
Definition: BasicTTIImpl.h:702
virtual bool enableWritePrefetching() const
Definition: BasicTTIImpl.h:732
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
Definition: BasicTTIImpl.h:322
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
Definition: BasicTTIImpl.h:648
InstructionCost getMulAccReductionCost(bool IsUnsigned, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind)
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
Definition: BasicTTIImpl.h:299
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Compute a cost of the given call instruction.
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
InstructionCost getFPOpCost(Type *Ty)
Definition: BasicTTIImpl.h:539
InstructionCost getVectorSplitCost()
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
Definition: BasicTTIImpl.h:849
bool haveFastSqrt(Type *Ty)
Definition: BasicTTIImpl.h:528
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
Definition: BasicTTIImpl.h:318
unsigned getInliningThresholdMultiplier() const
Definition: BasicTTIImpl.h:568
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind)
virtual ~BasicTTIImplBase()=default
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
Definition: BasicTTIImpl.h:831
bool isVScaleKnownToBeAPowerOfTwo() const
Definition: BasicTTIImpl.h:751
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II)
Definition: BasicTTIImpl.h:672
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const
Definition: BasicTTIImpl.h:290
bool isLegalAddImmediate(int64_t imm)
Definition: BasicTTIImpl.h:327
unsigned getFlatAddressSpace()
Definition: BasicTTIImpl.h:294
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
virtual unsigned getCacheLineSize() const
Definition: BasicTTIImpl.h:712
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Definition: BasicTTIImpl.h:304
bool isSourceOfDivergence(const Value *V)
Definition: BasicTTIImpl.h:282
int getInlinerVectorBonusPercent() const
Definition: BasicTTIImpl.h:574
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on argument types.
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Definition: BasicTTIImpl.h:685
bool isSingleThreaded() const
Definition: BasicTTIImpl.h:312
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
Definition: BasicTTIImpl.h:263
unsigned adjustInliningThreshold(const CallBase *CB)
Definition: BasicTTIImpl.h:569
bool isProfitableLSRChainElement(Instruction *I)
Definition: BasicTTIImpl.h:395
Concrete BasicTTIImpl that can be used if no further customization is needed.
size_type count() const
count - Returns the number of bits which are set.
Definition: BitVector.h:162
BitVector & set()
Definition: BitVector.h:351
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1455
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:1323
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:965
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:988
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:992
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:990
@ ICMP_EQ
equal
Definition: InstrTypes.h:986
@ ICMP_NE
not equal
Definition: InstrTypes.h:987
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:975
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
TypeSize getTypeStoreSizeInBits(Type *Ty) const
Returns the maximum number of bits that may be overwritten by storing the specified type; always a mu...
Definition: DataLayout.h:484
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
Definition: DataLayout.h:420
constexpr bool isVector() const
One or more elements.
Definition: TypeSize.h:311
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:296
constexpr bool isScalar() const
Exactly one element.
Definition: TypeSize.h:307
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:539
unsigned getNumElements() const
Definition: DerivedTypes.h:582
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:692
bool isTargetIntrinsic() const
isTargetIntrinsic - Returns true if this function is an intrinsic and the intrinsic is specific to a ...
Definition: Function.cpp:877
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:338
The core instruction combiner logic.
Definition: InstCombiner.h:47
static InstructionCost getInvalid(CostType Val=0)
std::optional< CostType > getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:251
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
const SmallVectorImpl< Type * > & getArgTypes() const
const SmallVectorImpl< const Value * > & getArgs() const
InstructionCost getScalarizationCost() const
const IntrinsicInst * getInst() const
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:44
virtual bool shouldPrefetchAddressSpace(unsigned AS) const
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
Return the minimum stride necessary to trigger software prefetching.
virtual bool enableWritePrefetching() const
virtual unsigned getMaxPrefetchIterationsAhead() const
Return the maximum prefetch distance in terms of loop iterations.
virtual unsigned getPrefetchDistance() const
Return the preferred prefetch distance in terms of instructions.
virtual std::optional< unsigned > getCacheAssociativity(unsigned Level) const
Return the cache associatvity for the given level of cache.
virtual std::optional< unsigned > getCacheLineSize(unsigned Level) const
Return the target cache line size in bytes at a given level.
Machine Value Type.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
The optimization diagnostic interface.
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for applied optimization remarks.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Analysis providing profile information.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
static bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
static bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
static bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
static bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
static bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
size_type size() const
Definition: SmallPtrSet.h:94
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition: Type.cpp:513
Multiway switch.
Provides information about what library functions are available for the current target.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
virtual bool isCheapToSpeculateCttz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const
Return true if the specified store with truncation is legal on this target.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool isLegalAddImmediate(int64_t) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool isProfitableToHoist(Instruction *I) const
bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
virtual bool isCheapToSpeculateCtlz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic ctlz.
virtual int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) const
Return the prefered common base offset.
LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const
Return pair that represents the legalization kind (first) that needs to happen to EVT (second) in ord...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:76
virtual std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
If the specified predicate checks whether a generic pointer falls within a specified address space,...
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
virtual unsigned getAssumedAddrSpace(const Value *V) const
If the specified generic pointer could be assumed as a pointer to a specific address space,...
TargetOptions Options
ThreadModel::Model ThreadModel
ThreadModel - This flag specifies the type of threading model to assume for things like atomics.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual bool useAA() const
Enable use of alias analysis during code generation (during MI scheduling, DAGCombine,...
const DataLayout & getDataLayout() const
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const
bool isProfitableLSRChainElement(Instruction *I) const
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I) const
std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info, TTI::OperandValueInfo Opd2Info, ArrayRef< const Value * > Args, const Instruction *CxtI=nullptr) const
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I) const
bool isLoweredToCall(const Function *F) const
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
CRTP base class for use as a mix-in that aids implementing a TargetTransformInfo-compatible class.
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind)
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
static OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
static bool requiresOrderedReduction(std::optional< FastMathFlags > FMF)
A helper function to determine the type of reduction algorithm used for a given Opcode and set of Fas...
@ TCC_Expensive
The cost of a 'div' instruction on x86.
@ TCC_Basic
The cost of a typical 'add' instruction.
MemIndexedMode
The type of load/store indexing.
@ MIM_PostInc
Post-incrementing.
@ MIM_PostDec
Post-decrementing.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
@ SK_InsertSubvector
InsertSubvector. Index indicates start offset.
@ SK_Select
Selects elements from the corresponding lane of either source operand.
@ SK_PermuteSingleSrc
Shuffle elements of single source vector with any shuffle mask.
@ SK_Transpose
Transpose two vectors.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_PermuteTwoSrc
Merge elements from two source vectors into one with any shuffle mask.
@ SK_Reverse
Reverse the order of the vector.
@ SK_ExtractSubvector
ExtractSubvector Index indicates start offset.
CastContextHint
Represents a hint about the context in which a cast is used.
@ None
The cast is not used with a load/store of any kind.
@ Normal
The cast is used with a normal load/store.
CacheLevel
The possible cache levels.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:361
bool isArch64Bit() const
Test whether the architecture is 64-bit.
Definition: Triple.cpp:1538
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, XROS, or DriverKit).
Definition: Triple.h:542
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:330
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:262
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:216
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
Value * getOperand(unsigned i) const
Definition: User.h:169
static bool isVPBinOp(Intrinsic::ID ID)
static std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
static std::optional< Intrinsic::ID > getFunctionalIntrinsicIDForVP(Intrinsic::ID ID)
static bool isVPIntrinsic(Intrinsic::ID)
static bool isVPReduction(Intrinsic::ID ID)
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
Base class of all SIMD vector types.
Definition: DerivedTypes.h:403
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
Definition: DerivedTypes.h:507
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Definition: DerivedTypes.h:641
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:676
Type * getElementType() const
Definition: DerivedTypes.h:436
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:203
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
Definition: APInt.cpp:3011
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:714
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:483
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:390
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:255
@ BRIND
BRIND - Indirect branch.
Definition: ISDOpcodes.h:1052
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1056
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
Definition: ISDOpcodes.h:500
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:727
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:971
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition: ISDOpcodes.h:736
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
Definition: ISDOpcodes.h:984
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:493
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:1472
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
Definition: LoopUtils.cpp:950
uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition: MathExtras.h:417
AddressSpace
Definition: NVPTXBaseInfo.h:21
unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID)
Returns the arithmetic instruction opcode used when expanding a reduction.
Definition: LoopUtils.cpp:921
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1738
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:313
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:264
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
InstructionCost Cost
cl::opt< unsigned > PartialUnrollingThreshold
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:34
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:136
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:628
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:306
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:64
Attributes of a target dependent hardware loop.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
bool AllowPeeling
Allow peeling off loop iterations.
bool AllowLoopNestsPeeling
Allow peeling off loop iterations for loop nests.
bool PeelProfiledIterations
Allow peeling basing on profile.
unsigned PeelCount
A forced peeling factor (the number of bodied of the original loop that should be peeled off before t...
Parameters that control the generic loop unrolling transformation.
bool UpperBound
Allow using trip count upper bound to unroll loops.
unsigned PartialOptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size, like OptSizeThreshold,...
unsigned PartialThreshold
The cost threshold for the unrolled loop, like Threshold, but used for partial/runtime unrolling (set...
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...
unsigned OptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size (set to UINT_MAX to disable).