LLVM 23.0.0git
BasicTTIImpl.h
Go to the documentation of this file.
1//===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file provides a helper that implements much of the TTI interface in
11/// terms of the target-independent code generator and TargetLowering
12/// interfaces.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
18
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/BitVector.h"
21#include "llvm/ADT/STLExtras.h"
35#include "llvm/IR/BasicBlock.h"
36#include "llvm/IR/Constant.h"
37#include "llvm/IR/Constants.h"
38#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/InstrTypes.h"
41#include "llvm/IR/Instruction.h"
43#include "llvm/IR/Intrinsics.h"
44#include "llvm/IR/Operator.h"
45#include "llvm/IR/Type.h"
46#include "llvm/IR/Value.h"
54#include <algorithm>
55#include <cassert>
56#include <cstdint>
57#include <limits>
58#include <optional>
59#include <utility>
60
61namespace llvm {
62
63class Function;
64class GlobalValue;
65class LLVMContext;
66class ScalarEvolution;
67class SCEV;
68class TargetMachine;
69
71
72/// Base class which can be used to help build a TTI implementation.
73///
74/// This class provides as much implementation of the TTI interface as is
75/// possible using the target independent parts of the code generator.
76///
77/// In order to subclass it, your class must implement a getST() method to
78/// return the subtarget, and a getTLI() method to return the target lowering.
79/// We need these methods implemented in the derived class so that this class
80/// doesn't have to duplicate storage for them.
81template <typename T>
83private:
85 using TTI = TargetTransformInfo;
86
87 /// Helper function to access this as a T.
88 const T *thisT() const { return static_cast<const T *>(this); }
89
90 /// Estimate a cost of Broadcast as an extract and sequence of insert
91 /// operations.
93 getBroadcastShuffleOverhead(FixedVectorType *VTy,
96 // Broadcast cost is equal to the cost of extracting the zero'th element
97 // plus the cost of inserting it into every element of the result vector.
98 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
99 CostKind, 0, nullptr, nullptr);
100
101 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
102 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
103 CostKind, i, nullptr, nullptr);
104 }
105 return Cost;
106 }
107
108 /// Estimate a cost of shuffle as a sequence of extract and insert
109 /// operations.
111 getPermuteShuffleOverhead(FixedVectorType *VTy,
114 // Shuffle cost is equal to the cost of extracting element from its argument
115 // plus the cost of inserting them onto the result vector.
116
117 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
118 // index 0 of first vector, index 1 of second vector,index 2 of first
119 // vector and finally index 3 of second vector and insert them at index
120 // <0,1,2,3> of result vector.
121 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
122 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
123 CostKind, i, nullptr, nullptr);
124 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
125 CostKind, i, nullptr, nullptr);
126 }
127 return Cost;
128 }
129
130 /// Estimate a cost of subvector extraction as a sequence of extract and
131 /// insert operations.
132 InstructionCost getExtractSubvectorOverhead(VectorType *VTy,
134 int Index,
135 FixedVectorType *SubVTy) const {
136 assert(VTy && SubVTy &&
137 "Can only extract subvectors from vectors");
138 int NumSubElts = SubVTy->getNumElements();
140 (Index + NumSubElts) <=
142 "SK_ExtractSubvector index out of range");
143
145 // Subvector extraction cost is equal to the cost of extracting element from
146 // the source type plus the cost of inserting them into the result vector
147 // type.
148 for (int i = 0; i != NumSubElts; ++i) {
149 Cost +=
150 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
151 CostKind, i + Index, nullptr, nullptr);
152 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
153 CostKind, i, nullptr, nullptr);
154 }
155 return Cost;
156 }
157
158 /// Estimate a cost of subvector insertion as a sequence of extract and
159 /// insert operations.
160 InstructionCost getInsertSubvectorOverhead(VectorType *VTy,
162 int Index,
163 FixedVectorType *SubVTy) const {
164 assert(VTy && SubVTy &&
165 "Can only insert subvectors into vectors");
166 int NumSubElts = SubVTy->getNumElements();
168 (Index + NumSubElts) <=
170 "SK_InsertSubvector index out of range");
171
173 // Subvector insertion cost is equal to the cost of extracting element from
174 // the source type plus the cost of inserting them into the result vector
175 // type.
176 for (int i = 0; i != NumSubElts; ++i) {
177 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
178 CostKind, i, nullptr, nullptr);
179 Cost +=
180 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, CostKind,
181 i + Index, nullptr, nullptr);
182 }
183 return Cost;
184 }
185
186 /// Local query method delegates up to T which *must* implement this!
187 const TargetSubtargetInfo *getST() const {
188 return static_cast<const T *>(this)->getST();
189 }
190
191 /// Local query method delegates up to T which *must* implement this!
192 const TargetLoweringBase *getTLI() const {
193 return static_cast<const T *>(this)->getTLI();
194 }
195
196 static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
197 switch (M) {
199 return ISD::UNINDEXED;
200 case TTI::MIM_PreInc:
201 return ISD::PRE_INC;
202 case TTI::MIM_PreDec:
203 return ISD::PRE_DEC;
204 case TTI::MIM_PostInc:
205 return ISD::POST_INC;
206 case TTI::MIM_PostDec:
207 return ISD::POST_DEC;
208 }
209 llvm_unreachable("Unexpected MemIndexedMode");
210 }
211
212 InstructionCost getCommonMaskedMemoryOpCost(unsigned Opcode, Type *DataTy,
213 Align Alignment,
214 bool VariableMask,
215 bool IsGatherScatter,
217 unsigned AddressSpace = 0) const {
218 // We cannot scalarize scalable vectors, so return Invalid.
219 if (isa<ScalableVectorType>(DataTy))
221
222 auto *VT = cast<FixedVectorType>(DataTy);
223 unsigned VF = VT->getNumElements();
224
225 // Assume the target does not have support for gather/scatter operations
226 // and provide a rough estimate.
227 //
228 // First, compute the cost of the individual memory operations.
229 InstructionCost AddrExtractCost =
230 IsGatherScatter ? getScalarizationOverhead(
232 PointerType::get(VT->getContext(), 0), VF),
233 /*Insert=*/false, /*Extract=*/true, CostKind)
234 : 0;
235
236 // The cost of the scalar loads/stores.
237 InstructionCost MemoryOpCost =
238 VF * thisT()->getMemoryOpCost(Opcode, VT->getElementType(), Alignment,
240
241 // Next, compute the cost of packing the result in a vector.
242 InstructionCost PackingCost =
243 getScalarizationOverhead(VT, Opcode != Instruction::Store,
244 Opcode == Instruction::Store, CostKind);
245
246 InstructionCost ConditionalCost = 0;
247 if (VariableMask) {
248 // Compute the cost of conditionally executing the memory operations with
249 // variable masks. This includes extracting the individual conditions, a
250 // branches and PHIs to combine the results.
251 // NOTE: Estimating the cost of conditionally executing the memory
252 // operations accurately is quite difficult and the current solution
253 // provides a very rough estimate only.
254 ConditionalCost =
257 /*Insert=*/false, /*Extract=*/true, CostKind) +
258 VF * (thisT()->getCFInstrCost(Instruction::Br, CostKind) +
259 thisT()->getCFInstrCost(Instruction::PHI, CostKind));
260 }
261
262 return AddrExtractCost + MemoryOpCost + PackingCost + ConditionalCost;
263 }
264
265 /// Checks if the provided mask \p is a splat mask, i.e. it contains only -1
266 /// or same non -1 index value and this index value contained at least twice.
267 /// So, mask <0, -1,-1, -1> is not considered splat (it is just identity),
268 /// same for <-1, 0, -1, -1> (just a slide), while <2, -1, 2, -1> is a splat
269 /// with \p Index=2.
270 static bool isSplatMask(ArrayRef<int> Mask, unsigned NumSrcElts, int &Index) {
271 // Check that the broadcast index meets at least twice.
272 bool IsCompared = false;
273 if (int SplatIdx = PoisonMaskElem;
274 all_of(enumerate(Mask), [&](const auto &P) {
275 if (P.value() == PoisonMaskElem)
276 return P.index() != Mask.size() - 1 || IsCompared;
277 if (static_cast<unsigned>(P.value()) >= NumSrcElts * 2)
278 return false;
279 if (SplatIdx == PoisonMaskElem) {
280 SplatIdx = P.value();
281 return P.index() != Mask.size() - 1;
282 }
283 IsCompared = true;
284 return SplatIdx == P.value();
285 })) {
286 Index = SplatIdx;
287 return true;
288 }
289 return false;
290 }
291
292 /// Several intrinsics that return structs (including llvm.sincos[pi] and
293 /// llvm.modf) can be lowered to a vector library call (for certain VFs). The
294 /// vector library functions correspond to the scalar calls (e.g. sincos or
295 /// modf), which unlike the intrinsic return values via output pointers. This
296 /// helper checks if a vector call exists for the given intrinsic, and returns
297 /// the cost, which includes the cost of the mask (if required), and the loads
298 /// for values returned via output pointers. \p LC is the scalar libcall and
299 /// \p CallRetElementIndex (optional) is the struct element which is mapped to
300 /// the call return value. If std::nullopt is returned, then no vector library
301 /// call is available, so the intrinsic should be assigned the default cost
302 /// (e.g. scalarization).
303 std::optional<InstructionCost> getMultipleResultIntrinsicVectorLibCallCost(
305 std::optional<unsigned> CallRetElementIndex = {}) const {
306 Type *RetTy = ICA.getReturnType();
307 // Vector variants of the intrinsic can be mapped to a vector library call.
308 auto const *LibInfo = ICA.getLibInfo();
309 if (!LibInfo || !isa<StructType>(RetTy) ||
311 return std::nullopt;
312
313 Type *Ty = getContainedTypes(RetTy).front();
314 EVT VT = getTLI()->getValueType(DL, Ty);
315
316 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
317
318 switch (ICA.getID()) {
319 case Intrinsic::modf:
320 LC = RTLIB::getMODF(VT);
321 break;
322 case Intrinsic::sincospi:
323 LC = RTLIB::getSINCOSPI(VT);
324 break;
325 case Intrinsic::sincos:
326 LC = RTLIB::getSINCOS(VT);
327 break;
328 default:
329 return std::nullopt;
330 }
331
332 // Find associated libcall.
333 RTLIB::LibcallImpl LibcallImpl = getTLI()->getLibcallImpl(LC);
334 if (LibcallImpl == RTLIB::Unsupported)
335 return std::nullopt;
336
337 LLVMContext &Ctx = RetTy->getContext();
338
339 // Cost the call + mask.
340 auto Cost =
341 thisT()->getCallInstrCost(nullptr, RetTy, ICA.getArgTypes(), CostKind);
342
345 auto VecTy = VectorType::get(IntegerType::getInt1Ty(Ctx), VF);
346 Cost += thisT()->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy,
347 VecTy, {}, CostKind, 0, nullptr, {});
348 }
349
350 // Lowering to a library call (with output pointers) may require us to emit
351 // reloads for the results.
352 for (auto [Idx, VectorTy] : enumerate(getContainedTypes(RetTy))) {
353 if (Idx == CallRetElementIndex)
354 continue;
355 Cost += thisT()->getMemoryOpCost(
356 Instruction::Load, VectorTy,
357 thisT()->getDataLayout().getABITypeAlign(VectorTy), 0, CostKind);
358 }
359 return Cost;
360 }
361
362 /// Filter out constant and duplicated entries in \p Ops and return a vector
363 /// containing the types from \p Tys corresponding to the remaining operands.
365 filterConstantAndDuplicatedOperands(ArrayRef<const Value *> Ops,
366 ArrayRef<Type *> Tys) {
367 SmallPtrSet<const Value *, 4> UniqueOperands;
368 SmallVector<Type *, 4> FilteredTys;
369 for (const auto &[Op, Ty] : zip_equal(Ops, Tys)) {
370 if (isa<Constant>(Op) || !UniqueOperands.insert(Op).second)
371 continue;
372 FilteredTys.push_back(Ty);
373 }
374 return FilteredTys;
375 }
376
377protected:
378 explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
379 : BaseT(DL) {}
380 ~BasicTTIImplBase() override = default;
381
384
385public:
386 /// \name Scalar TTI Implementations
387 /// @{
389 unsigned AddressSpace, Align Alignment,
390 unsigned *Fast) const override {
391 EVT E = EVT::getIntegerVT(Context, BitWidth);
392 return getTLI()->allowsMisalignedMemoryAccesses(
394 }
395
396 bool areInlineCompatible(const Function *Caller,
397 const Function *Callee) const override {
398 const TargetMachine &TM = getTLI()->getTargetMachine();
399
400 const FeatureBitset &CallerBits =
401 TM.getSubtargetImpl(*Caller)->getFeatureBits();
402 const FeatureBitset &CalleeBits =
403 TM.getSubtargetImpl(*Callee)->getFeatureBits();
404
405 // Inline a callee if its target-features are a subset of the callers
406 // target-features.
407 return (CallerBits & CalleeBits) == CalleeBits;
408 }
409
410 bool hasBranchDivergence(const Function *F = nullptr) const override {
411 return false;
412 }
413
414 bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override {
415 return false;
416 }
417
418 bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override {
419 return true;
420 }
421
422 unsigned getFlatAddressSpace() const override {
423 // Return an invalid address space.
424 return -1;
425 }
426
428 Intrinsic::ID IID) const override {
429 return false;
430 }
431
432 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override {
433 return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
434 }
435
436 unsigned getAssumedAddrSpace(const Value *V) const override {
437 return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
438 }
439
440 bool isSingleThreaded() const override {
441 return getTLI()->getTargetMachine().Options.ThreadModel ==
443 }
444
445 std::pair<const Value *, unsigned>
446 getPredicatedAddrSpace(const Value *V) const override {
447 return getTLI()->getTargetMachine().getPredicatedAddrSpace(V);
448 }
449
451 Value *NewV) const override {
452 return nullptr;
453 }
454
455 bool isLegalAddImmediate(int64_t imm) const override {
456 return getTLI()->isLegalAddImmediate(imm);
457 }
458
459 bool isLegalAddScalableImmediate(int64_t Imm) const override {
460 return getTLI()->isLegalAddScalableImmediate(Imm);
461 }
462
463 bool isLegalICmpImmediate(int64_t imm) const override {
464 return getTLI()->isLegalICmpImmediate(imm);
465 }
466
467 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
468 bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
469 Instruction *I = nullptr,
470 int64_t ScalableOffset = 0) const override {
472 AM.BaseGV = BaseGV;
473 AM.BaseOffs = BaseOffset;
474 AM.HasBaseReg = HasBaseReg;
475 AM.Scale = Scale;
476 AM.ScalableOffset = ScalableOffset;
477 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
478 }
479
480 int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) {
481 return getTLI()->getPreferredLargeGEPBaseOffset(MinOffset, MaxOffset);
482 }
483
484 unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
485 Type *ScalarValTy) const override {
486 auto &&IsSupportedByTarget = [this, ScalarMemTy, ScalarValTy](unsigned VF) {
487 auto *SrcTy = FixedVectorType::get(ScalarMemTy, VF / 2);
488 EVT VT = getTLI()->getValueType(DL, SrcTy);
489 if (getTLI()->isOperationLegal(ISD::STORE, VT) ||
490 getTLI()->isOperationCustom(ISD::STORE, VT))
491 return true;
492
493 EVT ValVT =
494 getTLI()->getValueType(DL, FixedVectorType::get(ScalarValTy, VF / 2));
495 EVT LegalizedVT =
496 getTLI()->getTypeToTransformTo(ScalarMemTy->getContext(), VT);
497 return getTLI()->isTruncStoreLegal(LegalizedVT, ValVT);
498 };
499 while (VF > 2 && IsSupportedByTarget(VF))
500 VF /= 2;
501 return VF;
502 }
503
504 bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty) const override {
505 EVT VT = getTLI()->getValueType(DL, Ty, /*AllowUnknown=*/true);
506 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
507 }
508
509 bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty) const override {
510 EVT VT = getTLI()->getValueType(DL, Ty, /*AllowUnknown=*/true);
511 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
512 }
513
515 const TTI::LSRCost &C2) const override {
517 }
518
522
526
530
532 StackOffset BaseOffset, bool HasBaseReg,
533 int64_t Scale,
534 unsigned AddrSpace) const override {
536 AM.BaseGV = BaseGV;
537 AM.BaseOffs = BaseOffset.getFixed();
538 AM.HasBaseReg = HasBaseReg;
539 AM.Scale = Scale;
540 AM.ScalableOffset = BaseOffset.getScalable();
541 if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace))
542 return 0;
544 }
545
546 bool isTruncateFree(Type *Ty1, Type *Ty2) const override {
547 return getTLI()->isTruncateFree(Ty1, Ty2);
548 }
549
550 bool isProfitableToHoist(Instruction *I) const override {
551 return getTLI()->isProfitableToHoist(I);
552 }
553
554 bool useAA() const override { return getST()->useAA(); }
555
556 bool isTypeLegal(Type *Ty) const override {
557 EVT VT = getTLI()->getValueType(DL, Ty, /*AllowUnknown=*/true);
558 return getTLI()->isTypeLegal(VT);
559 }
560
561 unsigned getRegUsageForType(Type *Ty) const override {
562 EVT ETy = getTLI()->getValueType(DL, Ty);
563 return getTLI()->getNumRegisters(Ty->getContext(), ETy);
564 }
565
566 InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr,
567 ArrayRef<const Value *> Operands, Type *AccessType,
568 TTI::TargetCostKind CostKind) const override {
569 return BaseT::getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind);
570 }
571
573 const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI,
574 BlockFrequencyInfo *BFI) const override {
575 /// Try to find the estimated number of clusters. Note that the number of
576 /// clusters identified in this function could be different from the actual
577 /// numbers found in lowering. This function ignore switches that are
578 /// lowered with a mix of jump table / bit test / BTree. This function was
579 /// initially intended to be used when estimating the cost of switch in
580 /// inline cost heuristic, but it's a generic cost model to be used in other
581 /// places (e.g., in loop unrolling).
582 unsigned N = SI.getNumCases();
583 const TargetLoweringBase *TLI = getTLI();
584 const DataLayout &DL = this->getDataLayout();
585
586 JumpTableSize = 0;
587 bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
588
589 // Early exit if both a jump table and bit test are not allowed.
590 if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
591 return N;
592
593 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
594 APInt MinCaseVal = MaxCaseVal;
595 for (auto CI : SI.cases()) {
596 const APInt &CaseVal = CI.getCaseValue()->getValue();
597 if (CaseVal.sgt(MaxCaseVal))
598 MaxCaseVal = CaseVal;
599 if (CaseVal.slt(MinCaseVal))
600 MinCaseVal = CaseVal;
601 }
602
603 // Check if suitable for a bit test
604 if (N <= DL.getIndexSizeInBits(0u)) {
606 for (auto I : SI.cases()) {
607 const BasicBlock *BB = I.getCaseSuccessor();
608 ++DestMap[BB];
609 }
610
611 if (TLI->isSuitableForBitTests(DestMap, MinCaseVal, MaxCaseVal, DL))
612 return 1;
613 }
614
615 // Check if suitable for a jump table.
616 if (IsJTAllowed) {
617 if (N < 2 || N < TLI->getMinimumJumpTableEntries())
618 return N;
620 (MaxCaseVal - MinCaseVal)
621 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
622 // Check whether a range of clusters is dense enough for a jump table
623 if (TLI->isSuitableForJumpTable(&SI, N, Range, PSI, BFI)) {
624 JumpTableSize = Range;
625 return 1;
626 }
627 }
628 return N;
629 }
630
631 bool shouldBuildLookupTables() const override {
632 const TargetLoweringBase *TLI = getTLI();
633 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
634 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
635 }
636
637 bool shouldBuildRelLookupTables() const override {
638 const TargetMachine &TM = getTLI()->getTargetMachine();
639 // If non-PIC mode, do not generate a relative lookup table.
640 if (!TM.isPositionIndependent())
641 return false;
642
643 /// Relative lookup table entries consist of 32-bit offsets.
644 /// Do not generate relative lookup tables for large code models
645 /// in 64-bit achitectures where 32-bit offsets might not be enough.
646 if (TM.getCodeModel() == CodeModel::Medium ||
648 return false;
649
650 const Triple &TargetTriple = TM.getTargetTriple();
651 if (!TargetTriple.isArch64Bit())
652 return false;
653
654 // TODO: Triggers issues on aarch64 on darwin, so temporarily disable it
655 // there.
656 if (TargetTriple.getArch() == Triple::aarch64 && TargetTriple.isOSDarwin())
657 return false;
658
659 return true;
660 }
661
662 bool haveFastSqrt(Type *Ty) const override {
663 const TargetLoweringBase *TLI = getTLI();
664 EVT VT = TLI->getValueType(DL, Ty);
665 return TLI->isTypeLegal(VT) &&
667 }
668
669 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override { return true; }
670
671 InstructionCost getFPOpCost(Type *Ty) const override {
672 // Check whether FADD is available, as a proxy for floating-point in
673 // general.
674 const TargetLoweringBase *TLI = getTLI();
675 EVT VT = TLI->getValueType(DL, Ty);
679 }
680
682 const Function &Fn) const override {
683 switch (Inst.getOpcode()) {
684 default:
685 break;
686 case Instruction::SDiv:
687 case Instruction::SRem:
688 case Instruction::UDiv:
689 case Instruction::URem: {
690 if (!isa<ConstantInt>(Inst.getOperand(1)))
691 return false;
692 EVT VT = getTLI()->getValueType(DL, Inst.getType());
693 return !getTLI()->isIntDivCheap(VT, Fn.getAttributes());
694 }
695 };
696
697 return false;
698 }
699
700 unsigned getInliningThresholdMultiplier() const override { return 1; }
701 unsigned adjustInliningThreshold(const CallBase *CB) const override {
702 return 0;
703 }
704 unsigned getCallerAllocaCost(const CallBase *CB,
705 const AllocaInst *AI) const override {
706 return 0;
707 }
708
709 int getInlinerVectorBonusPercent() const override { return 150; }
710
713 OptimizationRemarkEmitter *ORE) const override {
714 // This unrolling functionality is target independent, but to provide some
715 // motivation for its intended use, for x86:
716
717 // According to the Intel 64 and IA-32 Architectures Optimization Reference
718 // Manual, Intel Core models and later have a loop stream detector (and
719 // associated uop queue) that can benefit from partial unrolling.
720 // The relevant requirements are:
721 // - The loop must have no more than 4 (8 for Nehalem and later) branches
722 // taken, and none of them may be calls.
723 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
724
725 // According to the Software Optimization Guide for AMD Family 15h
726 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
727 // and loop buffer which can benefit from partial unrolling.
728 // The relevant requirements are:
729 // - The loop must have fewer than 16 branches
730 // - The loop must have less than 40 uops in all executed loop branches
731
732 // The number of taken branches in a loop is hard to estimate here, and
733 // benchmarking has revealed that it is better not to be conservative when
734 // estimating the branch count. As a result, we'll ignore the branch limits
735 // until someone finds a case where it matters in practice.
736
737 unsigned MaxOps;
738 const TargetSubtargetInfo *ST = getST();
739 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
741 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
742 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
743 else
744 return;
745
746 // Scan the loop: don't unroll loops with calls.
747 for (BasicBlock *BB : L->blocks()) {
748 for (Instruction &I : *BB) {
749 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
750 if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
751 if (!thisT()->isLoweredToCall(F))
752 continue;
753 }
754
755 if (ORE) {
756 ORE->emit([&]() {
757 return OptimizationRemark("TTI", "DontUnroll", L->getStartLoc(),
758 L->getHeader())
759 << "advising against unrolling the loop because it "
760 "contains a "
761 << ore::NV("Call", &I);
762 });
763 }
764 return;
765 }
766 }
767 }
768
769 // Enable runtime and partial unrolling up to the specified size.
770 // Enable using trip count upper bound to unroll loops.
771 UP.Partial = UP.Runtime = UP.UpperBound = true;
772 UP.PartialThreshold = MaxOps;
773
774 // Avoid unrolling when optimizing for size.
775 UP.OptSizeThreshold = 0;
777
778 // Set number of instructions optimized when "back edge"
779 // becomes "fall through" to default value of 2.
780 UP.BEInsns = 2;
781 }
782
784 TTI::PeelingPreferences &PP) const override {
785 PP.PeelCount = 0;
786 PP.AllowPeeling = true;
787 PP.AllowLoopNestsPeeling = false;
788 PP.PeelProfiledIterations = true;
789 }
790
793 HardwareLoopInfo &HWLoopInfo) const override {
794 return BaseT::isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
795 }
796
797 unsigned getEpilogueVectorizationMinVF() const override {
799 }
800
803 }
804
806 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) const override {
807 return BaseT::getPreferredTailFoldingStyle(IVUpdateMayOverflow);
808 }
809
810 std::optional<Instruction *>
813 }
814
815 std::optional<Value *>
817 APInt DemandedMask, KnownBits &Known,
818 bool &KnownBitsComputed) const override {
819 return BaseT::simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
820 KnownBitsComputed);
821 }
822
824 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
825 APInt &UndefElts2, APInt &UndefElts3,
826 std::function<void(Instruction *, unsigned, APInt, APInt &)>
827 SimplifyAndSetOp) const override {
829 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
830 SimplifyAndSetOp);
831 }
832
833 std::optional<unsigned>
835 return std::optional<unsigned>(
836 getST()->getCacheSize(static_cast<unsigned>(Level)));
837 }
838
839 std::optional<unsigned>
841 std::optional<unsigned> TargetResult =
842 getST()->getCacheAssociativity(static_cast<unsigned>(Level));
843
844 if (TargetResult)
845 return TargetResult;
846
847 return BaseT::getCacheAssociativity(Level);
848 }
849
850 unsigned getCacheLineSize() const override {
851 return getST()->getCacheLineSize();
852 }
853
854 unsigned getPrefetchDistance() const override {
855 return getST()->getPrefetchDistance();
856 }
857
858 unsigned getMinPrefetchStride(unsigned NumMemAccesses,
859 unsigned NumStridedMemAccesses,
860 unsigned NumPrefetches,
861 bool HasCall) const override {
862 return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
863 NumPrefetches, HasCall);
864 }
865
866 unsigned getMaxPrefetchIterationsAhead() const override {
867 return getST()->getMaxPrefetchIterationsAhead();
868 }
869
870 bool enableWritePrefetching() const override {
871 return getST()->enableWritePrefetching();
872 }
873
874 bool shouldPrefetchAddressSpace(unsigned AS) const override {
875 return getST()->shouldPrefetchAddressSpace(AS);
876 }
877
878 /// @}
879
880 /// \name Vector TTI Implementations
881 /// @{
882
887
888 std::optional<unsigned> getMaxVScale() const override { return std::nullopt; }
889 std::optional<unsigned> getVScaleForTuning() const override {
890 return std::nullopt;
891 }
892 bool isVScaleKnownToBeAPowerOfTwo() const override { return false; }
893
894 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
895 /// are set if the demanded result elements need to be inserted and/or
896 /// extracted from vectors.
898 getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts,
899 bool Insert, bool Extract,
901 bool ForPoisonSrc = true, ArrayRef<Value *> VL = {},
903 TTI::VectorInstrContext::None) const override {
904 /// FIXME: a bitfield is not a reasonable abstraction for talking about
905 /// which elements are needed from a scalable vector
906 if (isa<ScalableVectorType>(InTy))
908 auto *Ty = cast<FixedVectorType>(InTy);
909
910 assert(DemandedElts.getBitWidth() == Ty->getNumElements() &&
911 (VL.empty() || VL.size() == Ty->getNumElements()) &&
912 "Vector size mismatch");
913
915
916 for (int i = 0, e = Ty->getNumElements(); i < e; ++i) {
917 if (!DemandedElts[i])
918 continue;
919 if (Insert) {
920 Value *InsertedVal = VL.empty() ? nullptr : VL[i];
921 Cost +=
922 thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
923 CostKind, i, nullptr, InsertedVal, VIC);
924 }
925 if (Extract)
926 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
927 CostKind, i, nullptr, nullptr, VIC);
928 }
929
930 return Cost;
931 }
932
934 return false;
935 }
936
937 bool
939 unsigned ScalarOpdIdx) const override {
940 return false;
941 }
942
944 int OpdIdx) const override {
945 return OpdIdx == -1;
946 }
947
948 bool
950 int RetIdx) const override {
951 return RetIdx == 0;
952 }
953
954 /// Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
956 VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind,
957 bool ForPoisonSrc = true, ArrayRef<Value *> VL = {},
959 if (isa<ScalableVectorType>(InTy))
961 auto *Ty = cast<FixedVectorType>(InTy);
962
963 APInt DemandedElts = APInt::getAllOnes(Ty->getNumElements());
964 // Use CRTP to allow target overrides
965 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
966 CostKind, ForPoisonSrc, VL, VIC);
967 }
968
969 /// Estimate the overhead of scalarizing an instruction's
970 /// operands. The (potentially vector) types to use for each of
971 /// argument are passes via Tys.
975 TTI::VectorInstrContext::None) const override {
977 for (Type *Ty : Tys) {
978 // Disregard things like metadata arguments.
979 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
980 !Ty->isPtrOrPtrVectorTy())
981 continue;
982
983 if (auto *VecTy = dyn_cast<VectorType>(Ty))
984 Cost += getScalarizationOverhead(VecTy, /*Insert*/ false,
985 /*Extract*/ true, CostKind,
986 /*ForPoisonSrc=*/true, {}, VIC);
987 }
988
989 return Cost;
990 }
991
992 /// Estimate the overhead of scalarizing the inputs and outputs of an
993 /// instruction, with return type RetTy and arguments Args of type Tys. If
994 /// Args are unknown (empty), then the cost associated with one argument is
995 /// added as a heuristic.
1001 RetTy, /*Insert*/ true, /*Extract*/ false, CostKind);
1002 if (!Args.empty())
1004 filterConstantAndDuplicatedOperands(Args, Tys), CostKind);
1005 else
1006 // When no information on arguments is provided, we add the cost
1007 // associated with one argument as a heuristic.
1008 Cost += getScalarizationOverhead(RetTy, /*Insert*/ false,
1009 /*Extract*/ true, CostKind);
1010
1011 return Cost;
1012 }
1013
1014 /// Estimate the cost of type-legalization and the legalized type.
1015 std::pair<InstructionCost, MVT> getTypeLegalizationCost(Type *Ty) const {
1016 LLVMContext &C = Ty->getContext();
1017 EVT MTy = getTLI()->getValueType(DL, Ty);
1018
1020 // We keep legalizing the type until we find a legal kind. We assume that
1021 // the only operation that costs anything is the split. After splitting
1022 // we need to handle two types.
1023 while (true) {
1024 TargetLoweringBase::LegalizeKind LK = getTLI()->getTypeConversion(C, MTy);
1025
1027 // Ensure we return a sensible simple VT here, since many callers of
1028 // this function require it.
1029 MVT VT = MTy.isSimple() ? MTy.getSimpleVT() : MVT::i64;
1030 return std::make_pair(InstructionCost::getInvalid(), VT);
1031 }
1032
1033 if (LK.first == TargetLoweringBase::TypeLegal)
1034 return std::make_pair(Cost, MTy.getSimpleVT());
1035
1036 if (LK.first == TargetLoweringBase::TypeSplitVector ||
1038 Cost *= 2;
1039
1040 // Do not loop with f128 type.
1041 if (MTy == LK.second)
1042 return std::make_pair(Cost, MTy.getSimpleVT());
1043
1044 // Keep legalizing the type.
1045 MTy = LK.second;
1046 }
1047 }
1048
1049 unsigned getMaxInterleaveFactor(ElementCount VF) const override { return 1; }
1050
1052 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
1055 ArrayRef<const Value *> Args = {},
1056 const Instruction *CxtI = nullptr) const override {
1057 // Check if any of the operands are vector operands.
1058 const TargetLoweringBase *TLI = getTLI();
1059 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1060 assert(ISD && "Invalid opcode");
1061
1062 // TODO: Handle more cost kinds.
1064 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind,
1065 Opd1Info, Opd2Info,
1066 Args, CxtI);
1067
1068 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
1069
1070 bool IsFloat = Ty->isFPOrFPVectorTy();
1071 // Assume that floating point arithmetic operations cost twice as much as
1072 // integer operations.
1073 InstructionCost OpCost = (IsFloat ? 2 : 1);
1074
1075 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
1076 // The operation is legal. Assume it costs 1.
1077 // TODO: Once we have extract/insert subvector cost we need to use them.
1078 return LT.first * OpCost;
1079 }
1080
1081 if (!TLI->isOperationExpand(ISD, LT.second)) {
1082 // If the operation is custom lowered, then assume that the code is twice
1083 // as expensive.
1084 return LT.first * 2 * OpCost;
1085 }
1086
1087 // An 'Expand' of URem and SRem is special because it may default
1088 // to expanding the operation into a sequence of sub-operations
1089 // i.e. X % Y -> X-(X/Y)*Y.
1090 if (ISD == ISD::UREM || ISD == ISD::SREM) {
1091 bool IsSigned = ISD == ISD::SREM;
1092 if (TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIVREM : ISD::UDIVREM,
1093 LT.second) ||
1094 TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIV : ISD::UDIV,
1095 LT.second)) {
1096 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
1097 InstructionCost DivCost = thisT()->getArithmeticInstrCost(
1098 DivOpc, Ty, CostKind, Opd1Info, Opd2Info);
1099 InstructionCost MulCost =
1100 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty, CostKind);
1101 InstructionCost SubCost =
1102 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty, CostKind);
1103 return DivCost + MulCost + SubCost;
1104 }
1105 }
1106
1107 // We cannot scalarize scalable vectors, so return Invalid.
1110
1111 // Else, assume that we need to scalarize this op.
1112 // TODO: If one of the types get legalized by splitting, handle this
1113 // similarly to what getCastInstrCost() does.
1114 if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
1115 InstructionCost Cost = thisT()->getArithmeticInstrCost(
1116 Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info,
1117 Args, CxtI);
1118 // Return the cost of multiple scalar invocation plus the cost of
1119 // inserting and extracting the values.
1120 SmallVector<Type *> Tys(Args.size(), Ty);
1121 return getScalarizationOverhead(VTy, Args, Tys, CostKind) +
1122 VTy->getNumElements() * Cost;
1123 }
1124
1125 // We don't know anything about this scalar instruction.
1126 return OpCost;
1127 }
1128
1130 ArrayRef<int> Mask,
1131 VectorType *SrcTy, int &Index,
1132 VectorType *&SubTy) const {
1133 if (Mask.empty())
1134 return Kind;
1135 int NumDstElts = Mask.size();
1136 int NumSrcElts = SrcTy->getElementCount().getKnownMinValue();
1137 switch (Kind) {
1139 if (ShuffleVectorInst::isReverseMask(Mask, NumSrcElts))
1140 return TTI::SK_Reverse;
1141 if (ShuffleVectorInst::isZeroEltSplatMask(Mask, NumSrcElts))
1142 return TTI::SK_Broadcast;
1143 if (isSplatMask(Mask, NumSrcElts, Index))
1144 return TTI::SK_Broadcast;
1145 if (ShuffleVectorInst::isExtractSubvectorMask(Mask, NumSrcElts, Index) &&
1146 (Index + NumDstElts) <= NumSrcElts) {
1147 SubTy = FixedVectorType::get(SrcTy->getElementType(), NumDstElts);
1149 }
1150 break;
1151 }
1152 case TTI::SK_PermuteTwoSrc: {
1153 if (all_of(Mask, [NumSrcElts](int M) { return M < NumSrcElts; }))
1155 Index, SubTy);
1156 int NumSubElts;
1157 if (NumDstElts > 2 && ShuffleVectorInst::isInsertSubvectorMask(
1158 Mask, NumSrcElts, NumSubElts, Index)) {
1159 if (Index + NumSubElts > NumSrcElts)
1160 return Kind;
1161 SubTy = FixedVectorType::get(SrcTy->getElementType(), NumSubElts);
1163 }
1164 if (ShuffleVectorInst::isSelectMask(Mask, NumSrcElts))
1165 return TTI::SK_Select;
1166 if (ShuffleVectorInst::isTransposeMask(Mask, NumSrcElts))
1167 return TTI::SK_Transpose;
1168 if (ShuffleVectorInst::isSpliceMask(Mask, NumSrcElts, Index))
1169 return TTI::SK_Splice;
1170 break;
1171 }
1172 case TTI::SK_Select:
1173 case TTI::SK_Reverse:
1174 case TTI::SK_Broadcast:
1175 case TTI::SK_Transpose:
1178 case TTI::SK_Splice:
1179 break;
1180 }
1181 return Kind;
1182 }
1183
1187 VectorType *SubTp, ArrayRef<const Value *> Args = {},
1188 const Instruction *CxtI = nullptr) const override {
1189 switch (improveShuffleKindFromMask(Kind, Mask, SrcTy, Index, SubTp)) {
1190 case TTI::SK_Broadcast:
1191 if (auto *FVT = dyn_cast<FixedVectorType>(SrcTy))
1192 return getBroadcastShuffleOverhead(FVT, CostKind);
1194 case TTI::SK_Select:
1195 case TTI::SK_Splice:
1196 case TTI::SK_Reverse:
1197 case TTI::SK_Transpose:
1200 if (auto *FVT = dyn_cast<FixedVectorType>(SrcTy))
1201 return getPermuteShuffleOverhead(FVT, CostKind);
1204 return getExtractSubvectorOverhead(SrcTy, CostKind, Index,
1205 cast<FixedVectorType>(SubTp));
1207 return getInsertSubvectorOverhead(DstTy, CostKind, Index,
1208 cast<FixedVectorType>(SubTp));
1209 }
1210 llvm_unreachable("Unknown TTI::ShuffleKind");
1211 }
1212
1214 getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1216 const Instruction *I = nullptr) const override {
1217 if (BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I) == 0)
1218 return 0;
1219
1220 const TargetLoweringBase *TLI = getTLI();
1221 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1222 assert(ISD && "Invalid opcode");
1223 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Src);
1224 std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(Dst);
1225
1226 TypeSize SrcSize = SrcLT.second.getSizeInBits();
1227 TypeSize DstSize = DstLT.second.getSizeInBits();
1228 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
1229 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
1230
1231 switch (Opcode) {
1232 default:
1233 break;
1234 case Instruction::Trunc:
1235 // Check for NOOP conversions.
1236 if (TLI->isTruncateFree(SrcLT.second, DstLT.second))
1237 return 0;
1238 [[fallthrough]];
1239 case Instruction::BitCast:
1240 // Bitcast between types that are legalized to the same type are free and
1241 // assume int to/from ptr of the same size is also free.
1242 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
1243 SrcSize == DstSize)
1244 return 0;
1245 break;
1246 case Instruction::FPExt:
1247 if (I && getTLI()->isExtFree(I))
1248 return 0;
1249 break;
1250 case Instruction::ZExt:
1251 if (TLI->isZExtFree(SrcLT.second, DstLT.second))
1252 return 0;
1253 [[fallthrough]];
1254 case Instruction::SExt:
1255 if (I && getTLI()->isExtFree(I))
1256 return 0;
1257
1258 // If this is a zext/sext of a load, return 0 if the corresponding
1259 // extending load exists on target and the result type is legal.
1260 if (CCH == TTI::CastContextHint::Normal) {
1261 EVT ExtVT = EVT::getEVT(Dst);
1262 EVT LoadVT = EVT::getEVT(Src);
1263 unsigned LType =
1264 ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD);
1265 if (DstLT.first == SrcLT.first &&
1266 TLI->isLoadExtLegal(LType, ExtVT, LoadVT))
1267 return 0;
1268 }
1269 break;
1270 case Instruction::AddrSpaceCast:
1271 if (TLI->isFreeAddrSpaceCast(Src->getPointerAddressSpace(),
1272 Dst->getPointerAddressSpace()))
1273 return 0;
1274 break;
1275 }
1276
1277 auto *SrcVTy = dyn_cast<VectorType>(Src);
1278 auto *DstVTy = dyn_cast<VectorType>(Dst);
1279
1280 // If the cast is marked as legal (or promote) then assume low cost.
1281 if (SrcLT.first == DstLT.first &&
1282 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
1283 return SrcLT.first;
1284
1285 // Handle scalar conversions.
1286 if (!SrcVTy && !DstVTy) {
1287 // Just check the op cost. If the operation is legal then assume it costs
1288 // 1.
1289 if (!TLI->isOperationExpand(ISD, DstLT.second))
1290 return 1;
1291
1292 // Assume that illegal scalar instruction are expensive.
1293 return 4;
1294 }
1295
1296 // Check vector-to-vector casts.
1297 if (DstVTy && SrcVTy) {
1298 // If the cast is between same-sized registers, then the check is simple.
1299 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
1300
1301 // Assume that Zext is done using AND.
1302 if (Opcode == Instruction::ZExt)
1303 return SrcLT.first;
1304
1305 // Assume that sext is done using SHL and SRA.
1306 if (Opcode == Instruction::SExt)
1307 return SrcLT.first * 2;
1308
1309 // Just check the op cost. If the operation is legal then assume it
1310 // costs
1311 // 1 and multiply by the type-legalization overhead.
1312 if (!TLI->isOperationExpand(ISD, DstLT.second))
1313 return SrcLT.first * 1;
1314 }
1315
1316 // If we are legalizing by splitting, query the concrete TTI for the cost
1317 // of casting the original vector twice. We also need to factor in the
1318 // cost of the split itself. Count that as 1, to be consistent with
1319 // getTypeLegalizationCost().
1320 bool SplitSrc =
1321 TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
1323 bool SplitDst =
1324 TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
1326 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isKnownEven() &&
1327 DstVTy->getElementCount().isKnownEven()) {
1328 Type *SplitDstTy = VectorType::getHalfElementsVectorType(DstVTy);
1329 Type *SplitSrcTy = VectorType::getHalfElementsVectorType(SrcVTy);
1330 const T *TTI = thisT();
1331 // If both types need to be split then the split is free.
1332 InstructionCost SplitCost =
1333 (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
1334 return SplitCost +
1335 (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
1336 CostKind, I));
1337 }
1338
1339 // Scalarization cost is Invalid, can't assume any num elements.
1340 if (isa<ScalableVectorType>(DstVTy))
1342
1343 // In other cases where the source or destination are illegal, assume
1344 // the operation will get scalarized.
1345 unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
1346 InstructionCost Cost = thisT()->getCastInstrCost(
1347 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind, I);
1348
1349 // Return the cost of multiple scalar invocation plus the cost of
1350 // inserting and extracting the values.
1351 return getScalarizationOverhead(DstVTy, /*Insert*/ true, /*Extract*/ true,
1352 CostKind) +
1353 Num * Cost;
1354 }
1355
1356 // We already handled vector-to-vector and scalar-to-scalar conversions.
1357 // This
1358 // is where we handle bitcast between vectors and scalars. We need to assume
1359 // that the conversion is scalarized in one way or another.
1360 if (Opcode == Instruction::BitCast) {
1361 // Illegal bitcasts are done by storing and loading from a stack slot.
1362 return (SrcVTy ? getScalarizationOverhead(SrcVTy, /*Insert*/ false,
1363 /*Extract*/ true, CostKind)
1364 : 0) +
1365 (DstVTy ? getScalarizationOverhead(DstVTy, /*Insert*/ true,
1366 /*Extract*/ false, CostKind)
1367 : 0);
1368 }
1369
1370 llvm_unreachable("Unhandled cast");
1371 }
1372
1374 getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
1375 unsigned Index,
1376 TTI::TargetCostKind CostKind) const override {
1377 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1378 CostKind, Index, nullptr, nullptr) +
1379 thisT()->getCastInstrCost(Opcode, Dst, VecTy->getElementType(),
1381 }
1382
1385 const Instruction *I = nullptr) const override {
1386 return BaseT::getCFInstrCost(Opcode, CostKind, I);
1387 }
1388
1390 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
1394 const Instruction *I = nullptr) const override {
1395 const TargetLoweringBase *TLI = getTLI();
1396 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1397 assert(ISD && "Invalid opcode");
1398
1399 if (getTLI()->getValueType(DL, ValTy, true) == MVT::Other)
1400 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1401 Op1Info, Op2Info, I);
1402
1403 // Selects on vectors are actually vector selects.
1404 if (ISD == ISD::SELECT) {
1405 assert(CondTy && "CondTy must exist");
1406 if (CondTy->isVectorTy())
1407 ISD = ISD::VSELECT;
1408 }
1409 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1410
1411 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
1412 !TLI->isOperationExpand(ISD, LT.second)) {
1413 // The operation is legal. Assume it costs 1. Multiply
1414 // by the type-legalization overhead.
1415 return LT.first * 1;
1416 }
1417
1418 // Otherwise, assume that the cast is scalarized.
1419 // TODO: If one of the types get legalized by splitting, handle this
1420 // similarly to what getCastInstrCost() does.
1421 if (auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
1422 if (isa<ScalableVectorType>(ValTy))
1424
1425 unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
1426 InstructionCost Cost = thisT()->getCmpSelInstrCost(
1427 Opcode, ValVTy->getScalarType(), CondTy->getScalarType(), VecPred,
1428 CostKind, Op1Info, Op2Info, I);
1429
1430 // Return the cost of multiple scalar invocation plus the cost of
1431 // inserting and extracting the values.
1432 return getScalarizationOverhead(ValVTy, /*Insert*/ true,
1433 /*Extract*/ false, CostKind) +
1434 Num * Cost;
1435 }
1436
1437 // Unknown scalar opcode.
1438 return 1;
1439 }
1440
1443 unsigned Index, const Value *Op0, const Value *Op1,
1445 TTI::VectorInstrContext::None) const override {
1446 return getRegUsageForType(Val->getScalarType());
1447 }
1448
1449 /// \param ScalarUserAndIdx encodes the information about extracts from a
1450 /// vector with 'Scalar' being the value being extracted,'User' being the user
1451 /// of the extract(nullptr if user is not known before vectorization) and
1452 /// 'Idx' being the extract lane.
1454 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
1455 Value *Scalar,
1456 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx,
1458 TTI::VectorInstrContext::None) const override {
1459 return getVectorInstrCost(Opcode, Val, CostKind, Index, nullptr, nullptr,
1460 VIC);
1461 }
1462
1465 TTI::TargetCostKind CostKind, unsigned Index,
1467 TTI::VectorInstrContext::None) const override {
1468 Value *Op0 = nullptr;
1469 Value *Op1 = nullptr;
1470 if (auto *IE = dyn_cast<InsertElementInst>(&I)) {
1471 Op0 = IE->getOperand(0);
1472 Op1 = IE->getOperand(1);
1473 }
1474 // If VIC is None, compute it from the instruction
1477 return thisT()->getVectorInstrCost(I.getOpcode(), Val, CostKind, Index, Op0,
1478 Op1, VIC);
1479 }
1480
1484 unsigned Index) const override {
1485 unsigned NewIndex = -1;
1486 if (auto *FVTy = dyn_cast<FixedVectorType>(Val)) {
1487 assert(Index < FVTy->getNumElements() &&
1488 "Unexpected index from end of vector");
1489 NewIndex = FVTy->getNumElements() - 1 - Index;
1490 }
1491 return thisT()->getVectorInstrCost(Opcode, Val, CostKind, NewIndex, nullptr,
1492 nullptr);
1493 }
1494
1496 getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF,
1497 const APInt &DemandedDstElts,
1498 TTI::TargetCostKind CostKind) const override {
1499 assert(DemandedDstElts.getBitWidth() == (unsigned)VF * ReplicationFactor &&
1500 "Unexpected size of DemandedDstElts.");
1501
1503
1504 auto *SrcVT = FixedVectorType::get(EltTy, VF);
1505 auto *ReplicatedVT = FixedVectorType::get(EltTy, VF * ReplicationFactor);
1506
1507 // The Mask shuffling cost is extract all the elements of the Mask
1508 // and insert each of them Factor times into the wide vector:
1509 //
1510 // E.g. an interleaved group with factor 3:
1511 // %mask = icmp ult <8 x i32> %vec1, %vec2
1512 // %interleaved.mask = shufflevector <8 x i1> %mask, <8 x i1> undef,
1513 // <24 x i32> <0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7>
1514 // The cost is estimated as extract all mask elements from the <8xi1> mask
1515 // vector and insert them factor times into the <24xi1> shuffled mask
1516 // vector.
1517 APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedDstElts, VF);
1518 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
1519 /*Insert*/ false,
1520 /*Extract*/ true, CostKind);
1521 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
1522 /*Insert*/ true,
1523 /*Extract*/ false, CostKind);
1524
1525 return Cost;
1526 }
1527
1529 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
1532 const Instruction *I = nullptr) const override {
1533 assert(!Src->isVoidTy() && "Invalid type");
1534 // Assume types, such as structs, are expensive.
1535 if (getTLI()->getValueType(DL, Src, true) == MVT::Other)
1536 return 4;
1537 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
1538
1539 // Assuming that all loads of legal types cost 1.
1540 InstructionCost Cost = LT.first;
1542 return Cost;
1543
1544 const DataLayout &DL = this->getDataLayout();
1545 if (Src->isVectorTy() &&
1546 // In practice it's not currently possible to have a change in lane
1547 // length for extending loads or truncating stores so both types should
1548 // have the same scalable property.
1549 TypeSize::isKnownLT(DL.getTypeStoreSizeInBits(Src),
1550 LT.second.getSizeInBits())) {
1551 // This is a vector load that legalizes to a larger type than the vector
1552 // itself. Unless the corresponding extending load or truncating store is
1553 // legal, then this will scalarize.
1555 EVT MemVT = getTLI()->getValueType(DL, Src);
1556 if (Opcode == Instruction::Store)
1557 LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
1558 else
1559 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
1560
1561 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
1562 // This is a vector load/store for some illegal type that is scalarized.
1563 // We must account for the cost of building or decomposing the vector.
1565 cast<VectorType>(Src), Opcode != Instruction::Store,
1566 Opcode == Instruction::Store, CostKind);
1567 }
1568 }
1569
1570 return Cost;
1571 }
1572
1574 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1575 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1576 bool UseMaskForCond = false, bool UseMaskForGaps = false) const override {
1577
1578 // We cannot scalarize scalable vectors, so return Invalid.
1579 if (isa<ScalableVectorType>(VecTy))
1581
1582 auto *VT = cast<FixedVectorType>(VecTy);
1583
1584 unsigned NumElts = VT->getNumElements();
1585 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
1586
1587 unsigned NumSubElts = NumElts / Factor;
1588 auto *SubVT = FixedVectorType::get(VT->getElementType(), NumSubElts);
1589
1590 // Firstly, the cost of load/store operation.
1592 if (UseMaskForCond || UseMaskForGaps) {
1593 unsigned IID = Opcode == Instruction::Load ? Intrinsic::masked_load
1594 : Intrinsic::masked_store;
1595 Cost = thisT()->getMemIntrinsicInstrCost(
1596 MemIntrinsicCostAttributes(IID, VecTy, Alignment, AddressSpace),
1597 CostKind);
1598 } else
1599 Cost = thisT()->getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace,
1600 CostKind);
1601
1602 // Legalize the vector type, and get the legalized and unlegalized type
1603 // sizes.
1604 MVT VecTyLT = getTypeLegalizationCost(VecTy).second;
1605 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1606 unsigned VecTyLTSize = VecTyLT.getStoreSize();
1607
1608 // Scale the cost of the memory operation by the fraction of legalized
1609 // instructions that will actually be used. We shouldn't account for the
1610 // cost of dead instructions since they will be removed.
1611 //
1612 // E.g., An interleaved load of factor 8:
1613 // %vec = load <16 x i64>, <16 x i64>* %ptr
1614 // %v0 = shufflevector %vec, undef, <0, 8>
1615 //
1616 // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
1617 // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
1618 // type). The other loads are unused.
1619 //
1620 // TODO: Note that legalization can turn masked loads/stores into unmasked
1621 // (legalized) loads/stores. This can be reflected in the cost.
1622 if (Cost.isValid() && VecTySize > VecTyLTSize) {
1623 // The number of loads of a legal type it will take to represent a load
1624 // of the unlegalized vector type.
1625 unsigned NumLegalInsts = divideCeil(VecTySize, VecTyLTSize);
1626
1627 // The number of elements of the unlegalized type that correspond to a
1628 // single legal instruction.
1629 unsigned NumEltsPerLegalInst = divideCeil(NumElts, NumLegalInsts);
1630
1631 // Determine which legal instructions will be used.
1632 BitVector UsedInsts(NumLegalInsts, false);
1633 for (unsigned Index : Indices)
1634 for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1635 UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);
1636
1637 // Scale the cost of the load by the fraction of legal instructions that
1638 // will be used.
1639 Cost = divideCeil(UsedInsts.count() * Cost.getValue(), NumLegalInsts);
1640 }
1641
1642 // Then plus the cost of interleave operation.
1643 assert(Indices.size() <= Factor &&
1644 "Interleaved memory op has too many members");
1645
1646 const APInt DemandedAllSubElts = APInt::getAllOnes(NumSubElts);
1647 const APInt DemandedAllResultElts = APInt::getAllOnes(NumElts);
1648
1649 APInt DemandedLoadStoreElts = APInt::getZero(NumElts);
1650 for (unsigned Index : Indices) {
1651 assert(Index < Factor && "Invalid index for interleaved memory op");
1652 for (unsigned Elm = 0; Elm < NumSubElts; Elm++)
1653 DemandedLoadStoreElts.setBit(Index + Elm * Factor);
1654 }
1655
1656 if (Opcode == Instruction::Load) {
1657 // The interleave cost is similar to extract sub vectors' elements
1658 // from the wide vector, and insert them into sub vectors.
1659 //
1660 // E.g. An interleaved load of factor 2 (with one member of index 0):
1661 // %vec = load <8 x i32>, <8 x i32>* %ptr
1662 // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
1663 // The cost is estimated as extract elements at 0, 2, 4, 6 from the
1664 // <8 x i32> vector and insert them into a <4 x i32> vector.
1665 InstructionCost InsSubCost = thisT()->getScalarizationOverhead(
1666 SubVT, DemandedAllSubElts,
1667 /*Insert*/ true, /*Extract*/ false, CostKind);
1668 Cost += Indices.size() * InsSubCost;
1669 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1670 /*Insert*/ false,
1671 /*Extract*/ true, CostKind);
1672 } else {
1673 // The interleave cost is extract elements from sub vectors, and
1674 // insert them into the wide vector.
1675 //
1676 // E.g. An interleaved store of factor 3 with 2 members at indices 0,1:
1677 // (using VF=4):
1678 // %v0_v1 = shuffle %v0, %v1, <0,4,undef,1,5,undef,2,6,undef,3,7,undef>
1679 // %gaps.mask = <true, true, false, true, true, false,
1680 // true, true, false, true, true, false>
1681 // call llvm.masked.store <12 x i32> %v0_v1, <12 x i32>* %ptr,
1682 // i32 Align, <12 x i1> %gaps.mask
1683 // The cost is estimated as extract all elements (of actual members,
1684 // excluding gaps) from both <4 x i32> vectors and insert into the <12 x
1685 // i32> vector.
1686 InstructionCost ExtSubCost = thisT()->getScalarizationOverhead(
1687 SubVT, DemandedAllSubElts,
1688 /*Insert*/ false, /*Extract*/ true, CostKind);
1689 Cost += ExtSubCost * Indices.size();
1690 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1691 /*Insert*/ true,
1692 /*Extract*/ false, CostKind);
1693 }
1694
1695 if (!UseMaskForCond)
1696 return Cost;
1697
1698 Type *I8Type = Type::getInt8Ty(VT->getContext());
1699
1700 Cost += thisT()->getReplicationShuffleCost(
1701 I8Type, Factor, NumSubElts,
1702 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
1703 CostKind);
1704
1705 // The Gaps mask is invariant and created outside the loop, therefore the
1706 // cost of creating it is not accounted for here. However if we have both
1707 // a MaskForGaps and some other mask that guards the execution of the
1708 // memory access, we need to account for the cost of And-ing the two masks
1709 // inside the loop.
1710 if (UseMaskForGaps) {
1711 auto *MaskVT = FixedVectorType::get(I8Type, NumElts);
1712 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1713 CostKind);
1714 }
1715
1716 return Cost;
1717 }
1718
1719 /// Get intrinsic cost based on arguments.
1722 TTI::TargetCostKind CostKind) const override {
1723 // Check for generically free intrinsics.
1725 return 0;
1726
1727 // Assume that target intrinsics are cheap.
1728 Intrinsic::ID IID = ICA.getID();
1731
1732 // VP Intrinsics should have the same cost as their non-vp counterpart.
1733 // TODO: Adjust the cost to make the vp intrinsic cheaper than its non-vp
1734 // counterpart when the vector length argument is smaller than the maximum
1735 // vector length.
1736 // TODO: Support other kinds of VPIntrinsics
1737 if (VPIntrinsic::isVPIntrinsic(ICA.getID())) {
1738 std::optional<unsigned> FOp =
1740 if (FOp) {
1741 if (ICA.getID() == Intrinsic::vp_load) {
1742 Align Alignment;
1743 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1744 Alignment = VPI->getPointerAlignment().valueOrOne();
1745 unsigned AS = 0;
1746 if (ICA.getArgTypes().size() > 1)
1747 if (auto *PtrTy = dyn_cast<PointerType>(ICA.getArgTypes()[0]))
1748 AS = PtrTy->getAddressSpace();
1749 return thisT()->getMemoryOpCost(*FOp, ICA.getReturnType(), Alignment,
1750 AS, CostKind);
1751 }
1752 if (ICA.getID() == Intrinsic::vp_store) {
1753 Align Alignment;
1754 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1755 Alignment = VPI->getPointerAlignment().valueOrOne();
1756 unsigned AS = 0;
1757 if (ICA.getArgTypes().size() >= 2)
1758 if (auto *PtrTy = dyn_cast<PointerType>(ICA.getArgTypes()[1]))
1759 AS = PtrTy->getAddressSpace();
1760 return thisT()->getMemoryOpCost(*FOp, ICA.getArgTypes()[0], Alignment,
1761 AS, CostKind);
1762 }
1764 ICA.getID() == Intrinsic::vp_fneg) {
1765 return thisT()->getArithmeticInstrCost(*FOp, ICA.getReturnType(),
1766 CostKind);
1767 }
1768 if (VPCastIntrinsic::isVPCast(ICA.getID())) {
1769 return thisT()->getCastInstrCost(
1770 *FOp, ICA.getReturnType(), ICA.getArgTypes()[0],
1772 }
1773 if (VPCmpIntrinsic::isVPCmp(ICA.getID())) {
1774 // We can only handle vp_cmp intrinsics with underlying instructions.
1775 if (ICA.getInst()) {
1776 assert(FOp);
1777 auto *UI = cast<VPCmpIntrinsic>(ICA.getInst());
1778 return thisT()->getCmpSelInstrCost(*FOp, ICA.getArgTypes()[0],
1779 ICA.getReturnType(),
1780 UI->getPredicate(), CostKind);
1781 }
1782 }
1783 }
1784 if (ICA.getID() == Intrinsic::vp_load_ff) {
1785 Type *RetTy = ICA.getReturnType();
1786 Type *DataTy = cast<StructType>(RetTy)->getElementType(0);
1787 Align Alignment;
1788 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1789 Alignment = VPI->getPointerAlignment().valueOrOne();
1790 return thisT()->getMemIntrinsicInstrCost(
1791 MemIntrinsicCostAttributes(ICA.getID(), DataTy, Alignment),
1792 CostKind);
1793 }
1794 if (ICA.getID() == Intrinsic::vp_scatter) {
1795 if (ICA.isTypeBasedOnly()) {
1796 IntrinsicCostAttributes MaskedScatter(
1799 ICA.getFlags());
1800 return getTypeBasedIntrinsicInstrCost(MaskedScatter, CostKind);
1801 }
1802 Align Alignment;
1803 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1804 Alignment = VPI->getPointerAlignment().valueOrOne();
1805 bool VarMask = isa<Constant>(ICA.getArgs()[2]);
1806 return thisT()->getMemIntrinsicInstrCost(
1807 MemIntrinsicCostAttributes(Intrinsic::vp_scatter,
1808 ICA.getArgTypes()[0], ICA.getArgs()[1],
1809 VarMask, Alignment, nullptr),
1810 CostKind);
1811 }
1812 if (ICA.getID() == Intrinsic::vp_gather) {
1813 if (ICA.isTypeBasedOnly()) {
1814 IntrinsicCostAttributes MaskedGather(
1817 ICA.getFlags());
1818 return getTypeBasedIntrinsicInstrCost(MaskedGather, CostKind);
1819 }
1820 Align Alignment;
1821 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1822 Alignment = VPI->getPointerAlignment().valueOrOne();
1823 bool VarMask = isa<Constant>(ICA.getArgs()[1]);
1824 return thisT()->getMemIntrinsicInstrCost(
1825 MemIntrinsicCostAttributes(Intrinsic::vp_gather,
1826 ICA.getReturnType(), ICA.getArgs()[0],
1827 VarMask, Alignment, nullptr),
1828 CostKind);
1829 }
1830
1831 if (ICA.getID() == Intrinsic::vp_select ||
1832 ICA.getID() == Intrinsic::vp_merge) {
1833 TTI::OperandValueInfo OpInfoX, OpInfoY;
1834 if (!ICA.isTypeBasedOnly()) {
1835 OpInfoX = TTI::getOperandInfo(ICA.getArgs()[0]);
1836 OpInfoY = TTI::getOperandInfo(ICA.getArgs()[1]);
1837 }
1838 return getCmpSelInstrCost(
1839 Instruction::Select, ICA.getReturnType(), ICA.getArgTypes()[0],
1840 CmpInst::BAD_ICMP_PREDICATE, CostKind, OpInfoX, OpInfoY);
1841 }
1842
1843 std::optional<Intrinsic::ID> FID =
1845
1846 // Not functionally equivalent but close enough for cost modelling.
1847 if (ICA.getID() == Intrinsic::experimental_vp_reverse)
1848 FID = Intrinsic::vector_reverse;
1849
1850 if (FID) {
1851 // Non-vp version will have same arg types except mask and vector
1852 // length.
1853 assert(ICA.getArgTypes().size() >= 2 &&
1854 "Expected VPIntrinsic to have Mask and Vector Length args and "
1855 "types");
1856
1857 ArrayRef<const Value *> NewArgs = ArrayRef(ICA.getArgs());
1858 if (!ICA.isTypeBasedOnly())
1859 NewArgs = NewArgs.drop_back(2);
1861
1862 // VPReduction intrinsics have a start value argument that their non-vp
1863 // counterparts do not have, except for the fadd and fmul non-vp
1864 // counterpart.
1866 *FID != Intrinsic::vector_reduce_fadd &&
1867 *FID != Intrinsic::vector_reduce_fmul) {
1868 if (!ICA.isTypeBasedOnly())
1869 NewArgs = NewArgs.drop_front();
1870 NewTys = NewTys.drop_front();
1871 }
1872
1873 IntrinsicCostAttributes NewICA(*FID, ICA.getReturnType(), NewArgs,
1874 NewTys, ICA.getFlags());
1875 return thisT()->getIntrinsicInstrCost(NewICA, CostKind);
1876 }
1877 }
1878
1879 if (ICA.isTypeBasedOnly())
1881
1882 Type *RetTy = ICA.getReturnType();
1883
1884 ElementCount RetVF = isVectorizedTy(RetTy) ? getVectorizedTypeVF(RetTy)
1886
1887 const IntrinsicInst *I = ICA.getInst();
1888 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
1889 FastMathFlags FMF = ICA.getFlags();
1890 switch (IID) {
1891 default:
1892 break;
1893
1894 case Intrinsic::powi:
1895 if (auto *RHSC = dyn_cast<ConstantInt>(Args[1])) {
1896 bool ShouldOptForSize = I->getParent()->getParent()->hasOptSize();
1897 if (getTLI()->isBeneficialToExpandPowI(RHSC->getSExtValue(),
1898 ShouldOptForSize)) {
1899 // The cost is modeled on the expansion performed by ExpandPowI in
1900 // SelectionDAGBuilder.
1901 APInt Exponent = RHSC->getValue().abs();
1902 unsigned ActiveBits = Exponent.getActiveBits();
1903 unsigned PopCount = Exponent.popcount();
1904 InstructionCost Cost = (ActiveBits + PopCount - 2) *
1905 thisT()->getArithmeticInstrCost(
1906 Instruction::FMul, RetTy, CostKind);
1907 if (RHSC->isNegative())
1908 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv, RetTy,
1909 CostKind);
1910 return Cost;
1911 }
1912 }
1913 break;
1914 case Intrinsic::cttz:
1915 // FIXME: If necessary, this should go in target-specific overrides.
1916 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCttz(RetTy))
1918 break;
1919
1920 case Intrinsic::ctlz:
1921 // FIXME: If necessary, this should go in target-specific overrides.
1922 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCtlz(RetTy))
1924 break;
1925
1926 case Intrinsic::memcpy:
1927 return thisT()->getMemcpyCost(ICA.getInst());
1928
1929 case Intrinsic::masked_scatter: {
1930 const Value *Mask = Args[2];
1931 bool VarMask = !isa<Constant>(Mask);
1932 Align Alignment = I->getParamAlign(1).valueOrOne();
1933 return thisT()->getMemIntrinsicInstrCost(
1934 MemIntrinsicCostAttributes(Intrinsic::masked_scatter,
1935 ICA.getArgTypes()[0], Args[1], VarMask,
1936 Alignment, I),
1937 CostKind);
1938 }
1939 case Intrinsic::masked_gather: {
1940 const Value *Mask = Args[1];
1941 bool VarMask = !isa<Constant>(Mask);
1942 Align Alignment = I->getParamAlign(0).valueOrOne();
1943 return thisT()->getMemIntrinsicInstrCost(
1944 MemIntrinsicCostAttributes(Intrinsic::masked_gather, RetTy, Args[0],
1945 VarMask, Alignment, I),
1946 CostKind);
1947 }
1948 case Intrinsic::masked_compressstore: {
1949 const Value *Data = Args[0];
1950 const Value *Mask = Args[2];
1951 Align Alignment = I->getParamAlign(1).valueOrOne();
1952 return thisT()->getMemIntrinsicInstrCost(
1953 MemIntrinsicCostAttributes(IID, Data->getType(), !isa<Constant>(Mask),
1954 Alignment, I),
1955 CostKind);
1956 }
1957 case Intrinsic::masked_expandload: {
1958 const Value *Mask = Args[1];
1959 Align Alignment = I->getParamAlign(0).valueOrOne();
1960 return thisT()->getMemIntrinsicInstrCost(
1961 MemIntrinsicCostAttributes(IID, RetTy, !isa<Constant>(Mask),
1962 Alignment, I),
1963 CostKind);
1964 }
1965 case Intrinsic::experimental_vp_strided_store: {
1966 const Value *Data = Args[0];
1967 const Value *Ptr = Args[1];
1968 const Value *Mask = Args[3];
1969 const Value *EVL = Args[4];
1970 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1971 Type *EltTy = cast<VectorType>(Data->getType())->getElementType();
1972 Align Alignment =
1973 I->getParamAlign(1).value_or(thisT()->DL.getABITypeAlign(EltTy));
1974 return thisT()->getMemIntrinsicInstrCost(
1975 MemIntrinsicCostAttributes(IID, Data->getType(), Ptr, VarMask,
1976 Alignment, I),
1977 CostKind);
1978 }
1979 case Intrinsic::experimental_vp_strided_load: {
1980 const Value *Ptr = Args[0];
1981 const Value *Mask = Args[2];
1982 const Value *EVL = Args[3];
1983 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1984 Type *EltTy = cast<VectorType>(RetTy)->getElementType();
1985 Align Alignment =
1986 I->getParamAlign(0).value_or(thisT()->DL.getABITypeAlign(EltTy));
1987 return thisT()->getMemIntrinsicInstrCost(
1988 MemIntrinsicCostAttributes(IID, RetTy, Ptr, VarMask, Alignment, I),
1989 CostKind);
1990 }
1991 case Intrinsic::stepvector: {
1992 if (isa<ScalableVectorType>(RetTy))
1994 // The cost of materialising a constant integer vector.
1996 }
1997 case Intrinsic::vector_extract: {
1998 // FIXME: Handle case where a scalable vector is extracted from a scalable
1999 // vector
2000 if (isa<ScalableVectorType>(RetTy))
2002 unsigned Index = cast<ConstantInt>(Args[1])->getZExtValue();
2003 return thisT()->getShuffleCost(TTI::SK_ExtractSubvector,
2004 cast<VectorType>(RetTy),
2005 cast<VectorType>(Args[0]->getType()), {},
2006 CostKind, Index, cast<VectorType>(RetTy));
2007 }
2008 case Intrinsic::vector_insert: {
2009 // FIXME: Handle case where a scalable vector is inserted into a scalable
2010 // vector
2011 if (isa<ScalableVectorType>(Args[1]->getType()))
2013 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
2014 return thisT()->getShuffleCost(
2016 cast<VectorType>(Args[0]->getType()), {}, CostKind, Index,
2017 cast<VectorType>(Args[1]->getType()));
2018 }
2019 case Intrinsic::vector_splice_left:
2020 case Intrinsic::vector_splice_right: {
2021 auto *COffset = dyn_cast<ConstantInt>(Args[2]);
2022 if (!COffset)
2023 break;
2024 unsigned Index = COffset->getZExtValue();
2025 return thisT()->getShuffleCost(
2027 cast<VectorType>(Args[0]->getType()), {}, CostKind,
2028 IID == Intrinsic::vector_splice_left ? Index : -Index,
2029 cast<VectorType>(RetTy));
2030 }
2031 case Intrinsic::vector_reduce_add:
2032 case Intrinsic::vector_reduce_mul:
2033 case Intrinsic::vector_reduce_and:
2034 case Intrinsic::vector_reduce_or:
2035 case Intrinsic::vector_reduce_xor:
2036 case Intrinsic::vector_reduce_smax:
2037 case Intrinsic::vector_reduce_smin:
2038 case Intrinsic::vector_reduce_fmax:
2039 case Intrinsic::vector_reduce_fmin:
2040 case Intrinsic::vector_reduce_fmaximum:
2041 case Intrinsic::vector_reduce_fminimum:
2042 case Intrinsic::vector_reduce_umax:
2043 case Intrinsic::vector_reduce_umin: {
2044 IntrinsicCostAttributes Attrs(IID, RetTy, Args[0]->getType(), FMF, I, 1);
2046 }
2047 case Intrinsic::vector_reduce_fadd:
2048 case Intrinsic::vector_reduce_fmul: {
2050 IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF, I, 1);
2052 }
2053 case Intrinsic::fshl:
2054 case Intrinsic::fshr: {
2055 const Value *X = Args[0];
2056 const Value *Y = Args[1];
2057 const Value *Z = Args[2];
2060 const TTI::OperandValueInfo OpInfoZ = TTI::getOperandInfo(Z);
2061
2062 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
2063 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
2065 Cost +=
2066 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
2067 Cost +=
2068 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);
2069 Cost += thisT()->getArithmeticInstrCost(
2070 BinaryOperator::Shl, RetTy, CostKind, OpInfoX,
2071 {OpInfoZ.Kind, TTI::OP_None});
2072 Cost += thisT()->getArithmeticInstrCost(
2073 BinaryOperator::LShr, RetTy, CostKind, OpInfoY,
2074 {OpInfoZ.Kind, TTI::OP_None});
2075 // Non-constant shift amounts requires a modulo. If the typesize is a
2076 // power-2 then this will be converted to an and, otherwise it will use a
2077 // urem.
2078 if (!OpInfoZ.isConstant())
2079 Cost += thisT()->getArithmeticInstrCost(
2080 isPowerOf2_32(RetTy->getScalarSizeInBits()) ? BinaryOperator::And
2081 : BinaryOperator::URem,
2082 RetTy, CostKind, OpInfoZ,
2083 {TTI::OK_UniformConstantValue, TTI::OP_None});
2084 // For non-rotates (X != Y) we must add shift-by-zero handling costs.
2085 if (X != Y) {
2086 Type *CondTy = RetTy->getWithNewBitWidth(1);
2087 Cost +=
2088 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2090 Cost +=
2091 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2093 }
2094 return Cost;
2095 }
2096 case Intrinsic::experimental_cttz_elts: {
2097 EVT ArgType = getTLI()->getValueType(DL, ICA.getArgTypes()[0], true);
2098
2099 // If we're not expanding the intrinsic then we assume this is cheap
2100 // to implement.
2101 if (!getTLI()->shouldExpandCttzElements(ArgType))
2102 return getTypeLegalizationCost(RetTy).first;
2103
2104 // TODO: The costs below reflect the expansion code in
2105 // SelectionDAGBuilder, but we may want to sacrifice some accuracy in
2106 // favour of compile time.
2107
2108 // Find the smallest "sensible" element type to use for the expansion.
2109 bool ZeroIsPoison = !cast<ConstantInt>(Args[1])->isZero();
2110 ConstantRange VScaleRange(APInt(64, 1), APInt::getZero(64));
2111 if (isa<ScalableVectorType>(ICA.getArgTypes()[0]) && I && I->getCaller())
2112 VScaleRange = getVScaleRange(I->getCaller(), 64);
2113
2114 unsigned EltWidth = getTLI()->getBitWidthForCttzElements(
2115 RetTy, ArgType.getVectorElementCount(), ZeroIsPoison, &VScaleRange);
2116 Type *NewEltTy = IntegerType::getIntNTy(RetTy->getContext(), EltWidth);
2117
2118 // Create the new vector type & get the vector length
2119 Type *NewVecTy = VectorType::get(
2120 NewEltTy, cast<VectorType>(Args[0]->getType())->getElementCount());
2121
2122 IntrinsicCostAttributes StepVecAttrs(Intrinsic::stepvector, NewVecTy, {},
2123 FMF);
2125 thisT()->getIntrinsicInstrCost(StepVecAttrs, CostKind);
2126
2127 Cost +=
2128 thisT()->getArithmeticInstrCost(Instruction::Sub, NewVecTy, CostKind);
2129 Cost += thisT()->getCastInstrCost(Instruction::SExt, NewVecTy,
2130 Args[0]->getType(),
2132 Cost +=
2133 thisT()->getArithmeticInstrCost(Instruction::And, NewVecTy, CostKind);
2134
2135 IntrinsicCostAttributes ReducAttrs(Intrinsic::vector_reduce_umax,
2136 NewEltTy, NewVecTy, FMF, I, 1);
2137 Cost += thisT()->getTypeBasedIntrinsicInstrCost(ReducAttrs, CostKind);
2138 Cost +=
2139 thisT()->getArithmeticInstrCost(Instruction::Sub, NewEltTy, CostKind);
2140
2141 return Cost;
2142 }
2143 case Intrinsic::get_active_lane_mask:
2144 case Intrinsic::experimental_vector_match:
2145 case Intrinsic::experimental_vector_histogram_add:
2146 case Intrinsic::experimental_vector_histogram_uadd_sat:
2147 case Intrinsic::experimental_vector_histogram_umax:
2148 case Intrinsic::experimental_vector_histogram_umin:
2149 return thisT()->getTypeBasedIntrinsicInstrCost(ICA, CostKind);
2150 case Intrinsic::modf:
2151 case Intrinsic::sincos:
2152 case Intrinsic::sincospi: {
2153 std::optional<unsigned> CallRetElementIndex;
2154 // The first element of the modf result is returned by value in the
2155 // libcall.
2156 if (ICA.getID() == Intrinsic::modf)
2157 CallRetElementIndex = 0;
2158
2159 if (auto Cost = getMultipleResultIntrinsicVectorLibCallCost(
2160 ICA, CostKind, CallRetElementIndex))
2161 return *Cost;
2162 // Otherwise, fallback to default scalarization cost.
2163 break;
2164 }
2165 case Intrinsic::loop_dependence_war_mask:
2166 case Intrinsic::loop_dependence_raw_mask: {
2167 // Compute the cost of the expanded version of these intrinsics:
2168 //
2169 // The possible expansions are...
2170 //
2171 // loop_dependence_war_mask:
2172 // diff = (ptrB - ptrA) / eltSize
2173 // cmp = icmp sle diff, 0
2174 // upper_bound = select cmp, -1, diff
2175 // mask = get_active_lane_mask 0, upper_bound
2176 //
2177 // loop_dependence_raw_mask:
2178 // diff = (abs(ptrB - ptrA)) / eltSize
2179 // cmp = icmp eq diff, 0
2180 // upper_bound = select cmp, -1, diff
2181 // mask = get_active_lane_mask 0, upper_bound
2182 //
2183 auto *PtrTy = cast<PointerType>(ICA.getArgTypes()[0]);
2184 Type *IntPtrTy = IntegerType::getIntNTy(
2185 RetTy->getContext(), thisT()->getDataLayout().getPointerSizeInBits(
2186 PtrTy->getAddressSpace()));
2187 bool IsReadAfterWrite = IID == Intrinsic::loop_dependence_raw_mask;
2188
2190 thisT()->getArithmeticInstrCost(Instruction::Sub, IntPtrTy, CostKind);
2191 if (IsReadAfterWrite) {
2192 IntrinsicCostAttributes AbsAttrs(Intrinsic::abs, IntPtrTy, {IntPtrTy},
2193 {});
2194 Cost += thisT()->getIntrinsicInstrCost(AbsAttrs, CostKind);
2195 }
2196
2197 TTI::OperandValueInfo EltSizeOpInfo =
2198 TTI::getOperandInfo(ICA.getArgs()[2]);
2199 Cost += thisT()->getArithmeticInstrCost(Instruction::SDiv, IntPtrTy,
2200 CostKind, {}, EltSizeOpInfo);
2201
2202 Type *CondTy = IntegerType::getInt1Ty(RetTy->getContext());
2203 CmpInst::Predicate Pred =
2204 IsReadAfterWrite ? CmpInst::ICMP_EQ : CmpInst::ICMP_SLE;
2205 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CondTy,
2206 IntPtrTy, Pred, CostKind);
2207 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, IntPtrTy,
2208 CondTy, Pred, CostKind);
2209
2210 IntrinsicCostAttributes Attrs(Intrinsic::get_active_lane_mask, RetTy,
2211 {IntPtrTy, IntPtrTy}, FMF);
2212 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2213 return Cost;
2214 }
2215 }
2216
2217 // Assume that we need to scalarize this intrinsic.)
2218 // Compute the scalarization overhead based on Args for a vector
2219 // intrinsic.
2220 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
2221 if (RetVF.isVector() && !RetVF.isScalable()) {
2222 ScalarizationCost = 0;
2223 if (!RetTy->isVoidTy()) {
2224 for (Type *VectorTy : getContainedTypes(RetTy)) {
2225 ScalarizationCost += getScalarizationOverhead(
2226 cast<VectorType>(VectorTy),
2227 /*Insert=*/true, /*Extract=*/false, CostKind);
2228 }
2229 }
2230 ScalarizationCost += getOperandsScalarizationOverhead(
2231 filterConstantAndDuplicatedOperands(Args, ICA.getArgTypes()),
2232 CostKind);
2233 }
2234
2235 IntrinsicCostAttributes Attrs(IID, RetTy, ICA.getArgTypes(), FMF, I,
2236 ScalarizationCost);
2237 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
2238 }
2239
2240 /// Get intrinsic cost based on argument types.
2241 /// If ScalarizationCostPassed is std::numeric_limits<unsigned>::max(), the
2242 /// cost of scalarizing the arguments and the return value will be computed
2243 /// based on types.
2247 Intrinsic::ID IID = ICA.getID();
2248 Type *RetTy = ICA.getReturnType();
2249 const SmallVectorImpl<Type *> &Tys = ICA.getArgTypes();
2250 FastMathFlags FMF = ICA.getFlags();
2251 InstructionCost ScalarizationCostPassed = ICA.getScalarizationCost();
2252 bool SkipScalarizationCost = ICA.skipScalarizationCost();
2253
2254 VectorType *VecOpTy = nullptr;
2255 if (!Tys.empty()) {
2256 // The vector reduction operand is operand 0 except for fadd/fmul.
2257 // Their operand 0 is a scalar start value, so the vector op is operand 1.
2258 unsigned VecTyIndex = 0;
2259 if (IID == Intrinsic::vector_reduce_fadd ||
2260 IID == Intrinsic::vector_reduce_fmul)
2261 VecTyIndex = 1;
2262 assert(Tys.size() > VecTyIndex && "Unexpected IntrinsicCostAttributes");
2263 VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
2264 }
2265
2266 // Library call cost - other than size, make it expensive.
2267 unsigned SingleCallCost = CostKind == TTI::TCK_CodeSize ? 1 : 10;
2268 unsigned ISD = 0;
2269 switch (IID) {
2270 default: {
2271 // Scalable vectors cannot be scalarized, so return Invalid.
2272 if (isa<ScalableVectorType>(RetTy) || any_of(Tys, [](const Type *Ty) {
2273 return isa<ScalableVectorType>(Ty);
2274 }))
2276
2277 // Assume that we need to scalarize this intrinsic.
2278 InstructionCost ScalarizationCost =
2279 SkipScalarizationCost ? ScalarizationCostPassed : 0;
2280 unsigned ScalarCalls = 1;
2281 Type *ScalarRetTy = RetTy;
2282 if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
2283 if (!SkipScalarizationCost)
2284 ScalarizationCost = getScalarizationOverhead(
2285 RetVTy, /*Insert*/ true, /*Extract*/ false, CostKind);
2286 ScalarCalls = std::max(ScalarCalls,
2288 ScalarRetTy = RetTy->getScalarType();
2289 }
2290 SmallVector<Type *, 4> ScalarTys;
2291 for (Type *Ty : Tys) {
2292 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
2293 if (!SkipScalarizationCost)
2294 ScalarizationCost += getScalarizationOverhead(
2295 VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
2296 ScalarCalls = std::max(ScalarCalls,
2298 Ty = Ty->getScalarType();
2299 }
2300 ScalarTys.push_back(Ty);
2301 }
2302 if (ScalarCalls == 1)
2303 return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
2304
2305 IntrinsicCostAttributes ScalarAttrs(IID, ScalarRetTy, ScalarTys, FMF);
2306 InstructionCost ScalarCost =
2307 thisT()->getIntrinsicInstrCost(ScalarAttrs, CostKind);
2308
2309 return ScalarCalls * ScalarCost + ScalarizationCost;
2310 }
2311 // Look for intrinsics that can be lowered directly or turned into a scalar
2312 // intrinsic call.
2313 case Intrinsic::sqrt:
2314 ISD = ISD::FSQRT;
2315 break;
2316 case Intrinsic::sin:
2317 ISD = ISD::FSIN;
2318 break;
2319 case Intrinsic::cos:
2320 ISD = ISD::FCOS;
2321 break;
2322 case Intrinsic::sincos:
2323 ISD = ISD::FSINCOS;
2324 break;
2325 case Intrinsic::sincospi:
2327 break;
2328 case Intrinsic::modf:
2329 ISD = ISD::FMODF;
2330 break;
2331 case Intrinsic::tan:
2332 ISD = ISD::FTAN;
2333 break;
2334 case Intrinsic::asin:
2335 ISD = ISD::FASIN;
2336 break;
2337 case Intrinsic::acos:
2338 ISD = ISD::FACOS;
2339 break;
2340 case Intrinsic::atan:
2341 ISD = ISD::FATAN;
2342 break;
2343 case Intrinsic::atan2:
2344 ISD = ISD::FATAN2;
2345 break;
2346 case Intrinsic::sinh:
2347 ISD = ISD::FSINH;
2348 break;
2349 case Intrinsic::cosh:
2350 ISD = ISD::FCOSH;
2351 break;
2352 case Intrinsic::tanh:
2353 ISD = ISD::FTANH;
2354 break;
2355 case Intrinsic::exp:
2356 ISD = ISD::FEXP;
2357 break;
2358 case Intrinsic::exp2:
2359 ISD = ISD::FEXP2;
2360 break;
2361 case Intrinsic::exp10:
2362 ISD = ISD::FEXP10;
2363 break;
2364 case Intrinsic::log:
2365 ISD = ISD::FLOG;
2366 break;
2367 case Intrinsic::log10:
2368 ISD = ISD::FLOG10;
2369 break;
2370 case Intrinsic::log2:
2371 ISD = ISD::FLOG2;
2372 break;
2373 case Intrinsic::ldexp:
2374 ISD = ISD::FLDEXP;
2375 break;
2376 case Intrinsic::fabs:
2377 ISD = ISD::FABS;
2378 break;
2379 case Intrinsic::canonicalize:
2381 break;
2382 case Intrinsic::minnum:
2383 ISD = ISD::FMINNUM;
2384 break;
2385 case Intrinsic::maxnum:
2386 ISD = ISD::FMAXNUM;
2387 break;
2388 case Intrinsic::minimum:
2390 break;
2391 case Intrinsic::maximum:
2393 break;
2394 case Intrinsic::minimumnum:
2396 break;
2397 case Intrinsic::maximumnum:
2399 break;
2400 case Intrinsic::copysign:
2402 break;
2403 case Intrinsic::floor:
2404 ISD = ISD::FFLOOR;
2405 break;
2406 case Intrinsic::ceil:
2407 ISD = ISD::FCEIL;
2408 break;
2409 case Intrinsic::trunc:
2410 ISD = ISD::FTRUNC;
2411 break;
2412 case Intrinsic::nearbyint:
2414 break;
2415 case Intrinsic::rint:
2416 ISD = ISD::FRINT;
2417 break;
2418 case Intrinsic::lrint:
2419 ISD = ISD::LRINT;
2420 break;
2421 case Intrinsic::llrint:
2422 ISD = ISD::LLRINT;
2423 break;
2424 case Intrinsic::round:
2425 ISD = ISD::FROUND;
2426 break;
2427 case Intrinsic::roundeven:
2429 break;
2430 case Intrinsic::lround:
2431 ISD = ISD::LROUND;
2432 break;
2433 case Intrinsic::llround:
2434 ISD = ISD::LLROUND;
2435 break;
2436 case Intrinsic::pow:
2437 ISD = ISD::FPOW;
2438 break;
2439 case Intrinsic::fma:
2440 ISD = ISD::FMA;
2441 break;
2442 case Intrinsic::fmuladd:
2443 ISD = ISD::FMA;
2444 break;
2445 case Intrinsic::experimental_constrained_fmuladd:
2447 break;
2448 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
2449 case Intrinsic::lifetime_start:
2450 case Intrinsic::lifetime_end:
2451 case Intrinsic::sideeffect:
2452 case Intrinsic::pseudoprobe:
2453 case Intrinsic::arithmetic_fence:
2454 return 0;
2455 case Intrinsic::masked_store: {
2456 Type *Ty = Tys[0];
2457 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2458 return thisT()->getMemIntrinsicInstrCost(
2459 MemIntrinsicCostAttributes(IID, Ty, TyAlign, 0), CostKind);
2460 }
2461 case Intrinsic::masked_load: {
2462 Type *Ty = RetTy;
2463 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2464 return thisT()->getMemIntrinsicInstrCost(
2465 MemIntrinsicCostAttributes(IID, Ty, TyAlign, 0), CostKind);
2466 }
2467 case Intrinsic::experimental_vp_strided_store: {
2468 auto *Ty = cast<VectorType>(ICA.getArgTypes()[0]);
2469 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2470 return thisT()->getMemIntrinsicInstrCost(
2471 MemIntrinsicCostAttributes(IID, Ty, /*Ptr=*/nullptr,
2472 /*VariableMask=*/true, Alignment,
2473 ICA.getInst()),
2474 CostKind);
2475 }
2476 case Intrinsic::experimental_vp_strided_load: {
2477 auto *Ty = cast<VectorType>(ICA.getReturnType());
2478 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2479 return thisT()->getMemIntrinsicInstrCost(
2480 MemIntrinsicCostAttributes(IID, Ty, /*Ptr=*/nullptr,
2481 /*VariableMask=*/true, Alignment,
2482 ICA.getInst()),
2483 CostKind);
2484 }
2485 case Intrinsic::vector_reduce_add:
2486 case Intrinsic::vector_reduce_mul:
2487 case Intrinsic::vector_reduce_and:
2488 case Intrinsic::vector_reduce_or:
2489 case Intrinsic::vector_reduce_xor:
2490 return thisT()->getArithmeticReductionCost(
2491 getArithmeticReductionInstruction(IID), VecOpTy, std::nullopt,
2492 CostKind);
2493 case Intrinsic::vector_reduce_fadd:
2494 case Intrinsic::vector_reduce_fmul:
2495 return thisT()->getArithmeticReductionCost(
2496 getArithmeticReductionInstruction(IID), VecOpTy, FMF, CostKind);
2497 case Intrinsic::vector_reduce_smax:
2498 case Intrinsic::vector_reduce_smin:
2499 case Intrinsic::vector_reduce_umax:
2500 case Intrinsic::vector_reduce_umin:
2501 case Intrinsic::vector_reduce_fmax:
2502 case Intrinsic::vector_reduce_fmin:
2503 case Intrinsic::vector_reduce_fmaximum:
2504 case Intrinsic::vector_reduce_fminimum:
2505 return thisT()->getMinMaxReductionCost(getMinMaxReductionIntrinsicOp(IID),
2506 VecOpTy, ICA.getFlags(), CostKind);
2507 case Intrinsic::experimental_vector_match: {
2508 auto *SearchTy = cast<VectorType>(ICA.getArgTypes()[0]);
2509 auto *NeedleTy = cast<FixedVectorType>(ICA.getArgTypes()[1]);
2510 unsigned SearchSize = NeedleTy->getNumElements();
2511
2512 // If we're not expanding the intrinsic then we assume this is cheap to
2513 // implement.
2514 EVT SearchVT = getTLI()->getValueType(DL, SearchTy);
2515 if (!getTLI()->shouldExpandVectorMatch(SearchVT, SearchSize))
2516 return getTypeLegalizationCost(RetTy).first;
2517
2518 // Approximate the cost based on the expansion code in
2519 // SelectionDAGBuilder.
2521 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, NeedleTy,
2522 CostKind, 1, nullptr, nullptr);
2523 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SearchTy,
2524 CostKind, 0, nullptr, nullptr);
2525 Cost += thisT()->getShuffleCost(TTI::SK_Broadcast, SearchTy, SearchTy, {},
2526 CostKind, 0, nullptr);
2527 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SearchTy, RetTy,
2529 Cost +=
2530 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
2531 Cost *= SearchSize;
2532 Cost +=
2533 thisT()->getArithmeticInstrCost(BinaryOperator::And, RetTy, CostKind);
2534 return Cost;
2535 }
2536 case Intrinsic::vector_reverse:
2537 return thisT()->getShuffleCost(TTI::SK_Reverse, cast<VectorType>(RetTy),
2538 cast<VectorType>(ICA.getArgTypes()[0]), {},
2539 CostKind, 0, cast<VectorType>(RetTy));
2540 case Intrinsic::experimental_vector_histogram_add:
2541 case Intrinsic::experimental_vector_histogram_uadd_sat:
2542 case Intrinsic::experimental_vector_histogram_umax:
2543 case Intrinsic::experimental_vector_histogram_umin: {
2545 Type *EltTy = ICA.getArgTypes()[1];
2546
2547 // Targets with scalable vectors must handle this on their own.
2548 if (!PtrsTy)
2550
2551 Align Alignment = thisT()->DL.getABITypeAlign(EltTy);
2553 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, PtrsTy,
2554 CostKind, 1, nullptr, nullptr);
2555 Cost += thisT()->getMemoryOpCost(Instruction::Load, EltTy, Alignment, 0,
2556 CostKind);
2557 switch (IID) {
2558 default:
2559 llvm_unreachable("Unhandled histogram update operation.");
2560 case Intrinsic::experimental_vector_histogram_add:
2561 Cost +=
2562 thisT()->getArithmeticInstrCost(Instruction::Add, EltTy, CostKind);
2563 break;
2564 case Intrinsic::experimental_vector_histogram_uadd_sat: {
2565 IntrinsicCostAttributes UAddSat(Intrinsic::uadd_sat, EltTy, {EltTy});
2566 Cost += thisT()->getIntrinsicInstrCost(UAddSat, CostKind);
2567 break;
2568 }
2569 case Intrinsic::experimental_vector_histogram_umax: {
2570 IntrinsicCostAttributes UMax(Intrinsic::umax, EltTy, {EltTy});
2571 Cost += thisT()->getIntrinsicInstrCost(UMax, CostKind);
2572 break;
2573 }
2574 case Intrinsic::experimental_vector_histogram_umin: {
2575 IntrinsicCostAttributes UMin(Intrinsic::umin, EltTy, {EltTy});
2576 Cost += thisT()->getIntrinsicInstrCost(UMin, CostKind);
2577 break;
2578 }
2579 }
2580 Cost += thisT()->getMemoryOpCost(Instruction::Store, EltTy, Alignment, 0,
2581 CostKind);
2582 Cost *= PtrsTy->getNumElements();
2583 return Cost;
2584 }
2585 case Intrinsic::get_active_lane_mask: {
2586 Type *ArgTy = ICA.getArgTypes()[0];
2587 EVT ResVT = getTLI()->getValueType(DL, RetTy, true);
2588 EVT ArgVT = getTLI()->getValueType(DL, ArgTy, true);
2589
2590 // If we're not expanding the intrinsic then we assume this is cheap
2591 // to implement.
2592 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgVT))
2593 return getTypeLegalizationCost(RetTy).first;
2594
2595 // Create the expanded types that will be used to calculate the uadd_sat
2596 // operation.
2597 Type *ExpRetTy =
2598 VectorType::get(ArgTy, cast<VectorType>(RetTy)->getElementCount());
2599 IntrinsicCostAttributes Attrs(Intrinsic::uadd_sat, ExpRetTy, {}, FMF);
2601 thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
2602 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy, RetTy,
2604 return Cost;
2605 }
2606 case Intrinsic::experimental_memset_pattern:
2607 // This cost is set to match the cost of the memset_pattern16 libcall.
2608 // It should likely be re-evaluated after migration to this intrinsic
2609 // is complete.
2610 return TTI::TCC_Basic * 4;
2611 case Intrinsic::abs:
2612 ISD = ISD::ABS;
2613 break;
2614 case Intrinsic::fshl:
2615 ISD = ISD::FSHL;
2616 break;
2617 case Intrinsic::fshr:
2618 ISD = ISD::FSHR;
2619 break;
2620 case Intrinsic::smax:
2621 ISD = ISD::SMAX;
2622 break;
2623 case Intrinsic::smin:
2624 ISD = ISD::SMIN;
2625 break;
2626 case Intrinsic::umax:
2627 ISD = ISD::UMAX;
2628 break;
2629 case Intrinsic::umin:
2630 ISD = ISD::UMIN;
2631 break;
2632 case Intrinsic::sadd_sat:
2633 ISD = ISD::SADDSAT;
2634 break;
2635 case Intrinsic::ssub_sat:
2636 ISD = ISD::SSUBSAT;
2637 break;
2638 case Intrinsic::uadd_sat:
2639 ISD = ISD::UADDSAT;
2640 break;
2641 case Intrinsic::usub_sat:
2642 ISD = ISD::USUBSAT;
2643 break;
2644 case Intrinsic::smul_fix:
2645 ISD = ISD::SMULFIX;
2646 break;
2647 case Intrinsic::umul_fix:
2648 ISD = ISD::UMULFIX;
2649 break;
2650 case Intrinsic::sadd_with_overflow:
2651 ISD = ISD::SADDO;
2652 break;
2653 case Intrinsic::ssub_with_overflow:
2654 ISD = ISD::SSUBO;
2655 break;
2656 case Intrinsic::uadd_with_overflow:
2657 ISD = ISD::UADDO;
2658 break;
2659 case Intrinsic::usub_with_overflow:
2660 ISD = ISD::USUBO;
2661 break;
2662 case Intrinsic::smul_with_overflow:
2663 ISD = ISD::SMULO;
2664 break;
2665 case Intrinsic::umul_with_overflow:
2666 ISD = ISD::UMULO;
2667 break;
2668 case Intrinsic::fptosi_sat:
2669 case Intrinsic::fptoui_sat: {
2670 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Tys[0]);
2671 std::pair<InstructionCost, MVT> RetLT = getTypeLegalizationCost(RetTy);
2672
2673 // For cast instructions, types are different between source and
2674 // destination. Also need to check if the source type can be legalize.
2675 if (!SrcLT.first.isValid() || !RetLT.first.isValid())
2677 ISD = IID == Intrinsic::fptosi_sat ? ISD::FP_TO_SINT_SAT
2679 break;
2680 }
2681 case Intrinsic::ctpop:
2682 ISD = ISD::CTPOP;
2683 // In case of legalization use TCC_Expensive. This is cheaper than a
2684 // library call but still not a cheap instruction.
2685 SingleCallCost = TargetTransformInfo::TCC_Expensive;
2686 break;
2687 case Intrinsic::ctlz:
2688 ISD = ISD::CTLZ;
2689 break;
2690 case Intrinsic::cttz:
2691 ISD = ISD::CTTZ;
2692 break;
2693 case Intrinsic::bswap:
2694 ISD = ISD::BSWAP;
2695 break;
2696 case Intrinsic::bitreverse:
2698 break;
2699 case Intrinsic::ucmp:
2700 ISD = ISD::UCMP;
2701 break;
2702 case Intrinsic::scmp:
2703 ISD = ISD::SCMP;
2704 break;
2705 }
2706
2707 auto *ST = dyn_cast<StructType>(RetTy);
2708 Type *LegalizeTy = ST ? ST->getContainedType(0) : RetTy;
2709 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(LegalizeTy);
2710
2711 const TargetLoweringBase *TLI = getTLI();
2712
2713 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
2714 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
2715 TLI->isFAbsFree(LT.second)) {
2716 return 0;
2717 }
2718
2719 // The operation is legal. Assume it costs 1.
2720 // If the type is split to multiple registers, assume that there is some
2721 // overhead to this.
2722 // TODO: Once we have extract/insert subvector cost we need to use them.
2723 if (LT.first > 1)
2724 return (LT.first * 2);
2725 else
2726 return (LT.first * 1);
2727 } else if (TLI->isOperationCustom(ISD, LT.second)) {
2728 // If the operation is custom lowered then assume
2729 // that the code is twice as expensive.
2730 return (LT.first * 2);
2731 }
2732
2733 switch (IID) {
2734 case Intrinsic::fmuladd: {
2735 // If we can't lower fmuladd into an FMA estimate the cost as a floating
2736 // point mul followed by an add.
2737
2738 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
2739 CostKind) +
2740 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
2741 CostKind);
2742 }
2743 case Intrinsic::experimental_constrained_fmuladd: {
2744 IntrinsicCostAttributes FMulAttrs(
2745 Intrinsic::experimental_constrained_fmul, RetTy, Tys);
2746 IntrinsicCostAttributes FAddAttrs(
2747 Intrinsic::experimental_constrained_fadd, RetTy, Tys);
2748 return thisT()->getIntrinsicInstrCost(FMulAttrs, CostKind) +
2749 thisT()->getIntrinsicInstrCost(FAddAttrs, CostKind);
2750 }
2751 case Intrinsic::smin:
2752 case Intrinsic::smax:
2753 case Intrinsic::umin:
2754 case Intrinsic::umax: {
2755 // minmax(X,Y) = select(icmp(X,Y),X,Y)
2756 Type *CondTy = RetTy->getWithNewBitWidth(1);
2757 bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
2758 CmpInst::Predicate Pred =
2759 IsUnsigned ? CmpInst::ICMP_UGT : CmpInst::ICMP_SGT;
2761 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2762 Pred, CostKind);
2763 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2764 Pred, CostKind);
2765 return Cost;
2766 }
2767 case Intrinsic::sadd_with_overflow:
2768 case Intrinsic::ssub_with_overflow: {
2769 Type *SumTy = RetTy->getContainedType(0);
2770 Type *OverflowTy = RetTy->getContainedType(1);
2771 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
2772 ? BinaryOperator::Add
2773 : BinaryOperator::Sub;
2774
2775 // Add:
2776 // Overflow -> (Result < LHS) ^ (RHS < 0)
2777 // Sub:
2778 // Overflow -> (Result < LHS) ^ (RHS > 0)
2780 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
2781 Cost +=
2782 2 * thisT()->getCmpSelInstrCost(Instruction::ICmp, SumTy, OverflowTy,
2784 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
2785 CostKind);
2786 return Cost;
2787 }
2788 case Intrinsic::uadd_with_overflow:
2789 case Intrinsic::usub_with_overflow: {
2790 Type *SumTy = RetTy->getContainedType(0);
2791 Type *OverflowTy = RetTy->getContainedType(1);
2792 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
2793 ? BinaryOperator::Add
2794 : BinaryOperator::Sub;
2795 CmpInst::Predicate Pred = IID == Intrinsic::uadd_with_overflow
2798
2800 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
2801 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy,
2802 OverflowTy, Pred, CostKind);
2803 return Cost;
2804 }
2805 case Intrinsic::smul_with_overflow:
2806 case Intrinsic::umul_with_overflow: {
2807 Type *MulTy = RetTy->getContainedType(0);
2808 Type *OverflowTy = RetTy->getContainedType(1);
2809 unsigned ExtSize = MulTy->getScalarSizeInBits() * 2;
2810 Type *ExtTy = MulTy->getWithNewBitWidth(ExtSize);
2811 bool IsSigned = IID == Intrinsic::smul_with_overflow;
2812
2813 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
2815
2817 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH, CostKind);
2818 Cost +=
2819 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2820 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
2821 CCH, CostKind);
2822 Cost += thisT()->getArithmeticInstrCost(
2823 Instruction::LShr, ExtTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2825
2826 if (IsSigned)
2827 Cost += thisT()->getArithmeticInstrCost(
2828 Instruction::AShr, MulTy, CostKind,
2831
2832 Cost += thisT()->getCmpSelInstrCost(
2833 BinaryOperator::ICmp, MulTy, OverflowTy, CmpInst::ICMP_NE, CostKind);
2834 return Cost;
2835 }
2836 case Intrinsic::sadd_sat:
2837 case Intrinsic::ssub_sat: {
2838 // Assume a default expansion.
2839 Type *CondTy = RetTy->getWithNewBitWidth(1);
2840
2841 Type *OpTy = StructType::create({RetTy, CondTy});
2842 Intrinsic::ID OverflowOp = IID == Intrinsic::sadd_sat
2843 ? Intrinsic::sadd_with_overflow
2844 : Intrinsic::ssub_with_overflow;
2846
2847 // SatMax -> Overflow && SumDiff < 0
2848 // SatMin -> Overflow && SumDiff >= 0
2850 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
2851 nullptr, ScalarizationCostPassed);
2852 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2853 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2854 Pred, CostKind);
2855 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
2856 CondTy, Pred, CostKind);
2857 return Cost;
2858 }
2859 case Intrinsic::uadd_sat:
2860 case Intrinsic::usub_sat: {
2861 Type *CondTy = RetTy->getWithNewBitWidth(1);
2862
2863 Type *OpTy = StructType::create({RetTy, CondTy});
2864 Intrinsic::ID OverflowOp = IID == Intrinsic::uadd_sat
2865 ? Intrinsic::uadd_with_overflow
2866 : Intrinsic::usub_with_overflow;
2867
2869 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
2870 nullptr, ScalarizationCostPassed);
2871 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2872 Cost +=
2873 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2875 return Cost;
2876 }
2877 case Intrinsic::smul_fix:
2878 case Intrinsic::umul_fix: {
2879 unsigned ExtSize = RetTy->getScalarSizeInBits() * 2;
2880 Type *ExtTy = RetTy->getWithNewBitWidth(ExtSize);
2881
2882 unsigned ExtOp =
2883 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
2885
2887 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH, CostKind);
2888 Cost +=
2889 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2890 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
2891 CCH, CostKind);
2892 Cost += thisT()->getArithmeticInstrCost(
2893 Instruction::LShr, RetTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2895 Cost += thisT()->getArithmeticInstrCost(
2896 Instruction::Shl, RetTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2898 Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy, CostKind);
2899 return Cost;
2900 }
2901 case Intrinsic::abs: {
2902 // abs(X) = select(icmp(X,0),X,sub(0,X))
2903 Type *CondTy = RetTy->getWithNewBitWidth(1);
2906 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2907 Pred, CostKind);
2908 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2909 Pred, CostKind);
2910 // TODO: Should we add an OperandValueProperties::OP_Zero property?
2911 Cost += thisT()->getArithmeticInstrCost(
2912 BinaryOperator::Sub, RetTy, CostKind,
2914 return Cost;
2915 }
2916 case Intrinsic::fshl:
2917 case Intrinsic::fshr: {
2918 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
2919 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
2920 Type *CondTy = RetTy->getWithNewBitWidth(1);
2922 Cost +=
2923 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
2924 Cost +=
2925 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);
2926 Cost +=
2927 thisT()->getArithmeticInstrCost(BinaryOperator::Shl, RetTy, CostKind);
2928 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::LShr, RetTy,
2929 CostKind);
2930 // Non-constant shift amounts requires a modulo. If the typesize is a
2931 // power-2 then this will be converted to an and, otherwise it will use a
2932 // urem.
2933 Cost += thisT()->getArithmeticInstrCost(
2934 isPowerOf2_32(RetTy->getScalarSizeInBits()) ? BinaryOperator::And
2935 : BinaryOperator::URem,
2936 RetTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2937 {TTI::OK_UniformConstantValue, TTI::OP_None});
2938 // Shift-by-zero handling.
2939 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2941 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2943 return Cost;
2944 }
2945 case Intrinsic::fptosi_sat:
2946 case Intrinsic::fptoui_sat: {
2947 if (Tys.empty())
2948 break;
2949 Type *FromTy = Tys[0];
2950 bool IsSigned = IID == Intrinsic::fptosi_sat;
2951
2953 IntrinsicCostAttributes Attrs1(Intrinsic::minnum, FromTy,
2954 {FromTy, FromTy});
2955 Cost += thisT()->getIntrinsicInstrCost(Attrs1, CostKind);
2956 IntrinsicCostAttributes Attrs2(Intrinsic::maxnum, FromTy,
2957 {FromTy, FromTy});
2958 Cost += thisT()->getIntrinsicInstrCost(Attrs2, CostKind);
2959 Cost += thisT()->getCastInstrCost(
2960 IsSigned ? Instruction::FPToSI : Instruction::FPToUI, RetTy, FromTy,
2962 if (IsSigned) {
2963 Type *CondTy = RetTy->getWithNewBitWidth(1);
2964 Cost += thisT()->getCmpSelInstrCost(
2965 BinaryOperator::FCmp, FromTy, CondTy, CmpInst::FCMP_UNO, CostKind);
2966 Cost += thisT()->getCmpSelInstrCost(
2967 BinaryOperator::Select, RetTy, CondTy, CmpInst::FCMP_UNO, CostKind);
2968 }
2969 return Cost;
2970 }
2971 case Intrinsic::ucmp:
2972 case Intrinsic::scmp: {
2973 Type *CmpTy = Tys[0];
2974 Type *CondTy = RetTy->getWithNewBitWidth(1);
2976 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2978 CostKind) +
2979 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2981 CostKind);
2982
2983 EVT VT = TLI->getValueType(DL, CmpTy, true);
2985 // x < y ? -1 : (x > y ? 1 : 0)
2986 Cost += 2 * thisT()->getCmpSelInstrCost(
2987 BinaryOperator::Select, RetTy, CondTy,
2989 } else {
2990 // zext(x > y) - zext(x < y)
2991 Cost +=
2992 2 * thisT()->getCastInstrCost(CastInst::ZExt, RetTy, CondTy,
2994 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
2995 CostKind);
2996 }
2997 return Cost;
2998 }
2999 case Intrinsic::maximumnum:
3000 case Intrinsic::minimumnum: {
3001 // On platform that support FMAXNUM_IEEE/FMINNUM_IEEE, we expand
3002 // maximumnum/minimumnum to
3003 // ARG0 = fcanonicalize ARG0, ARG0 // to quiet ARG0
3004 // ARG1 = fcanonicalize ARG1, ARG1 // to quiet ARG1
3005 // RESULT = MAXNUM_IEEE ARG0, ARG1 // or MINNUM_IEEE
3006 // FIXME: In LangRef, we claimed FMAXNUM has the same behaviour of
3007 // FMAXNUM_IEEE, while the backend hasn't migrated the code yet.
3008 // Finally, we will remove FMAXNUM_IEEE and FMINNUM_IEEE.
3009 int IeeeISD =
3010 IID == Intrinsic::maximumnum ? ISD::FMAXNUM_IEEE : ISD::FMINNUM_IEEE;
3011 if (TLI->isOperationLegal(IeeeISD, LT.second)) {
3012 IntrinsicCostAttributes FCanonicalizeAttrs(Intrinsic::canonicalize,
3013 RetTy, Tys[0]);
3014 InstructionCost FCanonicalizeCost =
3015 thisT()->getIntrinsicInstrCost(FCanonicalizeAttrs, CostKind);
3016 return LT.first + FCanonicalizeCost * 2;
3017 }
3018 break;
3019 }
3020 default:
3021 break;
3022 }
3023
3024 // Else, assume that we need to scalarize this intrinsic. For math builtins
3025 // this will emit a costly libcall, adding call overhead and spills. Make it
3026 // very expensive.
3027 if (isVectorizedTy(RetTy)) {
3028 ArrayRef<Type *> RetVTys = getContainedTypes(RetTy);
3029
3030 // Scalable vectors cannot be scalarized, so return Invalid.
3031 if (any_of(concat<Type *const>(RetVTys, Tys),
3032 [](Type *Ty) { return isa<ScalableVectorType>(Ty); }))
3034
3035 InstructionCost ScalarizationCost = ScalarizationCostPassed;
3036 if (!SkipScalarizationCost) {
3037 ScalarizationCost = 0;
3038 for (Type *RetVTy : RetVTys) {
3039 ScalarizationCost += getScalarizationOverhead(
3040 cast<VectorType>(RetVTy), /*Insert=*/true,
3041 /*Extract=*/false, CostKind);
3042 }
3043 }
3044
3045 unsigned ScalarCalls = getVectorizedTypeVF(RetTy).getFixedValue();
3046 SmallVector<Type *, 4> ScalarTys;
3047 for (Type *Ty : Tys) {
3048 if (Ty->isVectorTy())
3049 Ty = Ty->getScalarType();
3050 ScalarTys.push_back(Ty);
3051 }
3052 IntrinsicCostAttributes Attrs(IID, toScalarizedTy(RetTy), ScalarTys, FMF);
3053 InstructionCost ScalarCost =
3054 thisT()->getIntrinsicInstrCost(Attrs, CostKind);
3055 for (Type *Ty : Tys) {
3056 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
3057 if (!ICA.skipScalarizationCost())
3058 ScalarizationCost += getScalarizationOverhead(
3059 VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
3060 ScalarCalls = std::max(ScalarCalls,
3062 }
3063 }
3064 return ScalarCalls * ScalarCost + ScalarizationCost;
3065 }
3066
3067 // This is going to be turned into a library call, make it expensive.
3068 return SingleCallCost;
3069 }
3070
3071 /// Get memory intrinsic cost based on arguments.
3074 TTI::TargetCostKind CostKind) const override {
3075 unsigned Id = MICA.getID();
3076 Type *DataTy = MICA.getDataType();
3077 bool VariableMask = MICA.getVariableMask();
3078 Align Alignment = MICA.getAlignment();
3079
3080 switch (Id) {
3081 case Intrinsic::experimental_vp_strided_load:
3082 case Intrinsic::experimental_vp_strided_store: {
3083 unsigned Opcode = Id == Intrinsic::experimental_vp_strided_load
3084 ? Instruction::Load
3085 : Instruction::Store;
3086 // For a target without strided memory operations (or for an illegal
3087 // operation type on one which does), assume we lower to a gather/scatter
3088 // operation. (Which may in turn be scalarized.)
3089 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3090 VariableMask, true, CostKind);
3091 }
3092 case Intrinsic::masked_scatter:
3093 case Intrinsic::masked_gather:
3094 case Intrinsic::vp_scatter:
3095 case Intrinsic::vp_gather: {
3096 unsigned Opcode = (MICA.getID() == Intrinsic::masked_gather ||
3097 MICA.getID() == Intrinsic::vp_gather)
3098 ? Instruction::Load
3099 : Instruction::Store;
3100
3101 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3102 VariableMask, true, CostKind);
3103 }
3104 case Intrinsic::vp_load:
3105 case Intrinsic::vp_store:
3107 case Intrinsic::masked_load:
3108 case Intrinsic::masked_store: {
3109 unsigned Opcode =
3110 Id == Intrinsic::masked_load ? Instruction::Load : Instruction::Store;
3111 // TODO: Pass on AddressSpace when we have test coverage.
3112 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, true, false,
3113 CostKind);
3114 }
3115 case Intrinsic::masked_compressstore:
3116 case Intrinsic::masked_expandload: {
3117 unsigned Opcode = MICA.getID() == Intrinsic::masked_expandload
3118 ? Instruction::Load
3119 : Instruction::Store;
3120 // Treat expand load/compress store as gather/scatter operation.
3121 // TODO: implement more precise cost estimation for these intrinsics.
3122 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3123 VariableMask,
3124 /*IsGatherScatter*/ true, CostKind);
3125 }
3126 case Intrinsic::vp_load_ff:
3128 default:
3129 llvm_unreachable("unexpected intrinsic");
3130 }
3131 }
3132
3133 /// Compute a cost of the given call instruction.
3134 ///
3135 /// Compute the cost of calling function F with return type RetTy and
3136 /// argument types Tys. F might be nullptr, in this case the cost of an
3137 /// arbitrary call with the specified signature will be returned.
3138 /// This is used, for instance, when we estimate call of a vector
3139 /// counterpart of the given function.
3140 /// \param F Called function, might be nullptr.
3141 /// \param RetTy Return value types.
3142 /// \param Tys Argument types.
3143 /// \returns The cost of Call instruction.
3146 TTI::TargetCostKind CostKind) const override {
3147 return 10;
3148 }
3149
3150 unsigned getNumberOfParts(Type *Tp) const override {
3151 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
3152 if (!LT.first.isValid())
3153 return 0;
3154 // Try to find actual number of parts for non-power-of-2 elements as
3155 // ceil(num-of-elements/num-of-subtype-elements).
3156 if (auto *FTp = dyn_cast<FixedVectorType>(Tp);
3157 Tp && LT.second.isFixedLengthVector() &&
3158 !has_single_bit(FTp->getNumElements())) {
3159 if (auto *SubTp = dyn_cast_if_present<FixedVectorType>(
3160 EVT(LT.second).getTypeForEVT(Tp->getContext()));
3161 SubTp && SubTp->getElementType() == FTp->getElementType())
3162 return divideCeil(FTp->getNumElements(), SubTp->getNumElements());
3163 }
3164 return LT.first.getValue();
3165 }
3166
3169 TTI::TargetCostKind) const override {
3170 return 0;
3171 }
3172
3173 /// Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
3174 /// We're assuming that reduction operation are performing the following way:
3175 ///
3176 /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
3177 /// <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
3178 /// \----------------v-------------/ \----------v------------/
3179 /// n/2 elements n/2 elements
3180 /// %red1 = op <n x t> %val, <n x t> val1
3181 /// After this operation we have a vector %red1 where only the first n/2
3182 /// elements are meaningful, the second n/2 elements are undefined and can be
3183 /// dropped. All other operations are actually working with the vector of
3184 /// length n/2, not n, though the real vector length is still n.
3185 /// %val2 = shufflevector<n x t> %red1, <n x t> %undef,
3186 /// <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
3187 /// \----------------v-------------/ \----------v------------/
3188 /// n/4 elements 3*n/4 elements
3189 /// %red2 = op <n x t> %red1, <n x t> val2 - working with the vector of
3190 /// length n/2, the resulting vector has length n/4 etc.
3191 ///
3192 /// The cost model should take into account that the actual length of the
3193 /// vector is reduced on each iteration.
3196 // Targets must implement a default value for the scalable case, since
3197 // we don't know how many lanes the vector has.
3200
3201 Type *ScalarTy = Ty->getElementType();
3202 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
3203 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
3204 ScalarTy == IntegerType::getInt1Ty(Ty->getContext()) &&
3205 NumVecElts >= 2) {
3206 // Or reduction for i1 is represented as:
3207 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
3208 // %res = cmp ne iReduxWidth %val, 0
3209 // And reduction for i1 is represented as:
3210 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
3211 // %res = cmp eq iReduxWidth %val, 11111
3212 Type *ValTy = IntegerType::get(Ty->getContext(), NumVecElts);
3213 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
3215 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
3218 }
3219 unsigned NumReduxLevels = Log2_32(NumVecElts);
3220 InstructionCost ArithCost = 0;
3221 InstructionCost ShuffleCost = 0;
3222 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3223 unsigned LongVectorCount = 0;
3224 unsigned MVTLen =
3225 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3226 while (NumVecElts > MVTLen) {
3227 NumVecElts /= 2;
3228 VectorType *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
3229 ShuffleCost += thisT()->getShuffleCost(
3230 TTI::SK_ExtractSubvector, SubTy, Ty, {}, CostKind, NumVecElts, SubTy);
3231 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy, CostKind);
3232 Ty = SubTy;
3233 ++LongVectorCount;
3234 }
3235
3236 NumReduxLevels -= LongVectorCount;
3237
3238 // The minimal length of the vector is limited by the real length of vector
3239 // operations performed on the current platform. That's why several final
3240 // reduction operations are performed on the vectors with the same
3241 // architecture-dependent length.
3242
3243 // By default reductions need one shuffle per reduction level.
3244 ShuffleCost +=
3245 NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
3246 Ty, {}, CostKind, 0, Ty);
3247 ArithCost +=
3248 NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty, CostKind);
3249 return ShuffleCost + ArithCost +
3250 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3251 CostKind, 0, nullptr, nullptr);
3252 }
3253
3254 /// Try to calculate the cost of performing strict (in-order) reductions,
3255 /// which involves doing a sequence of floating point additions in lane
3256 /// order, starting with an initial value. For example, consider a scalar
3257 /// initial value 'InitVal' of type float and a vector of type <4 x float>:
3258 ///
3259 /// Vector = <float %v0, float %v1, float %v2, float %v3>
3260 ///
3261 /// %add1 = %InitVal + %v0
3262 /// %add2 = %add1 + %v1
3263 /// %add3 = %add2 + %v2
3264 /// %add4 = %add3 + %v3
3265 ///
3266 /// As a simple estimate we can say the cost of such a reduction is 4 times
3267 /// the cost of a scalar FP addition. We can only estimate the costs for
3268 /// fixed-width vectors here because for scalable vectors we do not know the
3269 /// runtime number of operations.
3272 // Targets must implement a default value for the scalable case, since
3273 // we don't know how many lanes the vector has.
3276
3277 auto *VTy = cast<FixedVectorType>(Ty);
3279 VTy, /*Insert=*/false, /*Extract=*/true, CostKind);
3280 InstructionCost ArithCost = thisT()->getArithmeticInstrCost(
3281 Opcode, VTy->getElementType(), CostKind);
3282 ArithCost *= VTy->getNumElements();
3283
3284 return ExtractCost + ArithCost;
3285 }
3286
3289 std::optional<FastMathFlags> FMF,
3290 TTI::TargetCostKind CostKind) const override {
3291 assert(Ty && "Unknown reduction vector type");
3293 return getOrderedReductionCost(Opcode, Ty, CostKind);
3294 return getTreeReductionCost(Opcode, Ty, CostKind);
3295 }
3296
3297 /// Try to calculate op costs for min/max reduction operations.
3298 /// \param CondTy Conditional type for the Select instruction.
3301 TTI::TargetCostKind CostKind) const override {
3302 // Targets must implement a default value for the scalable case, since
3303 // we don't know how many lanes the vector has.
3306
3307 Type *ScalarTy = Ty->getElementType();
3308 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
3309 unsigned NumReduxLevels = Log2_32(NumVecElts);
3310 InstructionCost MinMaxCost = 0;
3311 InstructionCost ShuffleCost = 0;
3312 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3313 unsigned LongVectorCount = 0;
3314 unsigned MVTLen =
3315 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3316 while (NumVecElts > MVTLen) {
3317 NumVecElts /= 2;
3318 auto *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
3319
3320 ShuffleCost += thisT()->getShuffleCost(
3321 TTI::SK_ExtractSubvector, SubTy, Ty, {}, CostKind, NumVecElts, SubTy);
3322
3323 IntrinsicCostAttributes Attrs(IID, SubTy, {SubTy, SubTy}, FMF);
3324 MinMaxCost += getIntrinsicInstrCost(Attrs, CostKind);
3325 Ty = SubTy;
3326 ++LongVectorCount;
3327 }
3328
3329 NumReduxLevels -= LongVectorCount;
3330
3331 // The minimal length of the vector is limited by the real length of vector
3332 // operations performed on the current platform. That's why several final
3333 // reduction opertions are perfomed on the vectors with the same
3334 // architecture-dependent length.
3335 ShuffleCost +=
3336 NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
3337 Ty, {}, CostKind, 0, Ty);
3338 IntrinsicCostAttributes Attrs(IID, Ty, {Ty, Ty}, FMF);
3339 MinMaxCost += NumReduxLevels * getIntrinsicInstrCost(Attrs, CostKind);
3340 // The last min/max should be in vector registers and we counted it above.
3341 // So just need a single extractelement.
3342 return ShuffleCost + MinMaxCost +
3343 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3344 CostKind, 0, nullptr, nullptr);
3345 }
3346
3348 getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy,
3349 VectorType *Ty, std::optional<FastMathFlags> FMF,
3350 TTI::TargetCostKind CostKind) const override {
3351 if (auto *FTy = dyn_cast<FixedVectorType>(Ty);
3352 FTy && IsUnsigned && Opcode == Instruction::Add &&
3353 FTy->getElementType() == IntegerType::getInt1Ty(Ty->getContext())) {
3354 // Represent vector_reduce_add(ZExt(<n x i1>)) as
3355 // ZExtOrTrunc(ctpop(bitcast <n x i1> to in)).
3356 auto *IntTy =
3357 IntegerType::get(ResTy->getContext(), FTy->getNumElements());
3358 IntrinsicCostAttributes ICA(Intrinsic::ctpop, IntTy, {IntTy},
3359 FMF ? *FMF : FastMathFlags());
3360 return thisT()->getCastInstrCost(Instruction::BitCast, IntTy, FTy,
3362 thisT()->getIntrinsicInstrCost(ICA, CostKind);
3363 }
3364 // Without any native support, this is equivalent to the cost of
3365 // vecreduce.opcode(ext(Ty A)).
3366 VectorType *ExtTy = VectorType::get(ResTy, Ty);
3367 InstructionCost RedCost =
3368 thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF, CostKind);
3369 InstructionCost ExtCost = thisT()->getCastInstrCost(
3370 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3372
3373 return RedCost + ExtCost;
3374 }
3375
3377 getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy,
3378 VectorType *Ty,
3379 TTI::TargetCostKind CostKind) const override {
3380 // Without any native support, this is equivalent to the cost of
3381 // vecreduce.add(mul(ext(Ty A), ext(Ty B))) or
3382 // vecreduce.add(mul(A, B)).
3383 assert((RedOpcode == Instruction::Add || RedOpcode == Instruction::Sub) &&
3384 "The reduction opcode is expected to be Add or Sub.");
3385 VectorType *ExtTy = VectorType::get(ResTy, Ty);
3386 InstructionCost RedCost = thisT()->getArithmeticReductionCost(
3387 RedOpcode, ExtTy, std::nullopt, CostKind);
3388 InstructionCost ExtCost = thisT()->getCastInstrCost(
3389 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3391
3392 InstructionCost MulCost =
3393 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
3394
3395 return RedCost + MulCost + 2 * ExtCost;
3396 }
3397
3399
3400 /// @}
3401};
3402
3403/// Concrete BasicTTIImpl that can be used if no further customization
3404/// is needed.
3405class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
3406 using BaseT = BasicTTIImplBase<BasicTTIImpl>;
3407
3408 friend class BasicTTIImplBase<BasicTTIImpl>;
3409
3410 const TargetSubtargetInfo *ST;
3411 const TargetLoweringBase *TLI;
3412
3413 const TargetSubtargetInfo *getST() const { return ST; }
3414 const TargetLoweringBase *getTLI() const { return TLI; }
3415
3416public:
3417 explicit BasicTTIImpl(const TargetMachine *TM, const Function &F);
3418};
3419
3420} // end namespace llvm
3421
3422#endif // LLVM_CODEGEN_BASICTTIIMPL_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
static const Function * getCalledFunction(const Value *V)
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
static unsigned getNumElements(Type *Ty)
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
This file provides helpers for the implementation of a TargetTransformInfo-conforming class.
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1339
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1202
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1497
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition APInt.h:1131
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition ArrayRef.h:195
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition ArrayRef.h:201
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
InstructionCost getFPOpCost(Type *Ty) const override
bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty) const override
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const override
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
bool shouldBuildLookupTables() const override
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isProfitableToHoist(Instruction *I) const override
unsigned getNumberOfParts(Type *Tp) const override
unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const override
bool useAA() const override
unsigned getPrefetchDistance() const override
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *SrcTy, int &Index, VectorType *&SubTy) const
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const override
InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
Estimate the overhead of scalarizing an instruction's operands.
bool isLegalAddScalableImmediate(int64_t Imm) const override
unsigned getAssumedAddrSpace(const Value *V) const override
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const override
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0) const override
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty) const override
bool haveFastSqrt(Type *Ty) const override
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const override
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const override
unsigned adjustInliningThreshold(const CallBase *CB) const override
unsigned getInliningThresholdMultiplier() const override
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
Estimate the overhead of scalarizing an instruction.
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Scalar, ArrayRef< std::tuple< Value *, User *, int > > ScalarUserAndIdx, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset)
bool shouldBuildRelLookupTables() const override
bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override
unsigned getEpilogueVectorizationMinVF() const override
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const override
InstructionCost getVectorSplitCost() const
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
std::optional< unsigned > getMaxVScale() const override
unsigned getFlatAddressSpace() const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Compute a cost of the given call instruction.
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
~BasicTTIImplBase() override=default
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
unsigned getMaxPrefetchIterationsAhead() const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
Get intrinsic cost based on argument types.
bool hasBranchDivergence(const Function *F=nullptr) const override
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const override
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override
std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const override
bool shouldPrefetchAddressSpace(unsigned AS) const override
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const override
unsigned getCacheLineSize() const override
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
bool shouldDropLSRSolutionIfLessProfitable() const override
int getInlinerVectorBonusPercent() const override
bool isVScaleKnownToBeAPowerOfTwo() const override
InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind) const override
InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
bool isLegalAddImmediate(int64_t imm) const override
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
bool isSingleThreaded() const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const override
bool isProfitableLSRChainElement(Instruction *I) const override
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx) const override
bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx) const override
std::optional< unsigned > getVScaleForTuning() const override
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const override
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *, const SCEV *, TTI::TargetCostKind) const override
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const override
bool isLegalICmpImmediate(int64_t imm) const override
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const override
unsigned getRegUsageForType(Type *Ty) const override
InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override
Get memory intrinsic cost based on arguments.
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool isTypeLegal(Type *Ty) const override
bool enableWritePrefetching() const override
bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const override
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
bool isNumRegsMajorCostOfLSR() const override
BasicTTIImpl(const TargetMachine *TM, const Function &F)
size_type count() const
count - Returns the number of bits which are set.
Definition BitVector.h:181
BitVector & set()
Definition BitVector.h:370
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:982
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
static CmpInst::Predicate getGTPredicate(Intrinsic::ID ID)
static CmpInst::Predicate getLTPredicate(Intrinsic::ID ID)
This class represents a range of values.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:324
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
Container class for subtarget features.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition Type.cpp:802
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition Function.h:352
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
const TargetLibraryInfo * getLibInfo() const
const SmallVectorImpl< Type * > & getArgTypes() const
const SmallVectorImpl< const Value * > & getArgs() const
InstructionCost getScalarizationCost() const
const IntrinsicInst * getInst() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
const FeatureBitset & getFeatureBits() const
Machine Value Type.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Information for memory intrinsic cost model.
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for applied optimization remarks.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Analysis providing profile information.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
static StackOffset getScalable(int64_t Scalable)
Definition TypeSize.h:40
static StackOffset getFixed(int64_t Fixed)
Definition TypeSize.h:39
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition Type.cpp:619
Multiway switch.
Provides information about what library functions are available for the current target.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool preferSelectsOverBooleanArithmetic(EVT VT) const
Should we prefer selects to doing arithmetic on boolean types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool isSuitableForBitTests(const DenseMap< const BasicBlock *, unsigned int > &DestCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
Primary interface to the complete machine description for the target machine.
bool isPositionIndependent() const
const Triple & getTargetTriple() const
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
CodeModel::Model getCodeModel() const
Returns the code model.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual bool isProfitableLSRChainElement(Instruction *I) const
virtual const DataLayout & getDataLayout() const
virtual std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const
virtual std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
virtual bool shouldDropLSRSolutionIfLessProfitable() const
virtual bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const
virtual bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const
virtual std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
virtual std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
virtual unsigned getEpilogueVectorizationMinVF() const
virtual InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
virtual bool isLoweredToCall(const Function *F) const
virtual InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info, TTI::OperandValueInfo Opd2Info, ArrayRef< const Value * > Args, const Instruction *CxtI=nullptr) const
virtual InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const
virtual bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const
virtual InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I) const
virtual InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
virtual InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, const Instruction *I) const
virtual TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
VectorInstrContext
Represents a hint about the context in which an insert/extract is used.
@ None
The insert/extract is not used with a load/store.
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
static bool requiresOrderedReduction(std::optional< FastMathFlags > FMF)
A helper function to determine the type of reduction algorithm used for a given Opcode and set of Fas...
@ TCC_Expensive
The cost of a 'div' instruction on x86.
@ TCC_Basic
The cost of a typical 'add' instruction.
MemIndexedMode
The type of load/store indexing.
static VectorInstrContext getVectorInstrContextHint(const Instruction *I)
Calculates a VectorInstrContext from I.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
@ SK_InsertSubvector
InsertSubvector. Index indicates start offset.
@ SK_Select
Selects elements from the corresponding lane of either source operand.
@ SK_PermuteSingleSrc
Shuffle elements of single source vector with any shuffle mask.
@ SK_Transpose
Transpose two vectors.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_PermuteTwoSrc
Merge elements from two source vectors into one with any shuffle mask.
@ SK_Reverse
Reverse the order of the vector.
@ SK_ExtractSubvector
ExtractSubvector Index indicates start offset.
CastContextHint
Represents a hint about the context in which a cast is used.
@ Normal
The cast is used with a normal load/store.
CacheLevel
The possible cache levels.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:418
LLVM_ABI bool isArch64Bit() const
Test whether the architecture is 64-bit.
Definition Triple.cpp:1809
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Definition Triple.h:632
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:300
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
Definition Type.h:381
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
Value * getOperand(unsigned i) const
Definition User.h:207
static LLVM_ABI bool isVPBinOp(Intrinsic::ID ID)
static LLVM_ABI bool isVPCast(Intrinsic::ID ID)
static LLVM_ABI bool isVPCmp(Intrinsic::ID ID)
static LLVM_ABI std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
static LLVM_ABI std::optional< Intrinsic::ID > getFunctionalIntrinsicIDForVP(Intrinsic::ID ID)
static LLVM_ABI bool isVPIntrinsic(Intrinsic::ID)
static LLVM_ABI bool isVPReduction(Intrinsic::ID ID)
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
Base class of all SIMD vector types.
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
Definition APInt.cpp:3020
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...
Definition ISDOpcodes.h:24
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:779
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
Definition ISDOpcodes.h:394
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:518
@ FMODF
FMODF - Decomposes the operand into integral and fractional parts, each having the same type and sign...
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ FSINCOSPI
FSINCOSPI - Compute both the sine and cosine times pi more accurately than FSINCOS(pi*x),...
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:417
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition ISDOpcodes.h:747
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:280
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ SSUBO
Same for subtraction.
Definition ISDOpcodes.h:352
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
Definition ISDOpcodes.h:541
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition ISDOpcodes.h:374
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:796
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition ISDOpcodes.h:348
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SMULO
Same for multiplication.
Definition ISDOpcodes.h:356
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition ISDOpcodes.h:727
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition ISDOpcodes.h:805
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
Definition ISDOpcodes.h:735
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
Definition ISDOpcodes.h:945
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:534
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition ISDOpcodes.h:365
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
LLVM_ABI bool isTargetIntrinsic(ID IID)
isTargetIntrinsic - Returns true if IID is an intrinsic specific to a certain target.
LLVM_ABI Libcall getSINCOSPI(EVT RetVT)
getSINCOSPI - Return the SINCOSPI_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getMODF(EVT VT)
getMODF - Return the MODF_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getSINCOS(EVT RetVT)
getSINCOS - Return the SINCOS_* value for the given types, or UNKNOWN_LIBCALL if there is none.
DiagnosticInfoOptimizationBase::Argument NV
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:839
InstructionCost Cost
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2544
Type * toScalarizedTy(Type *Ty)
A helper for converting vectorized types to scalarized (non-vector) types.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
LLVM_ABI unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID)
Returns the arithmetic instruction opcode used when expanding a reduction.
bool isVectorizedTy(Type *Ty)
Returns true if Ty is a vector type or a struct of vector types where all vector types share the same...
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
Definition STLExtras.h:1150
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:147
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
ElementCount getVectorizedTypeVF(Type *Ty)
Returns the number of vector elements for a vectorized type.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr int PoisonMaskElem
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:189
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
cl::opt< unsigned > PartialUnrollingThreshold
LLVM_ABI bool isVectorizedStructTy(StructType *StructTy)
Returns true if StructTy is an unpacked literal struct where all elements are vectors of matching ele...
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:137
ElementCount getVectorElementCount() const
Definition ValueTypes.h:350
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition ValueTypes.h:65
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Attributes of a target dependent hardware loop.
static bool hasVectorMaskArgument(RTLIB::LibcallImpl Impl)
Returns true if the function has a vector mask argument, which is assumed to be the last argument.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
bool AllowPeeling
Allow peeling off loop iterations.
bool AllowLoopNestsPeeling
Allow peeling off loop iterations for loop nests.
bool PeelProfiledIterations
Allow peeling basing on profile.
unsigned PeelCount
A forced peeling factor (the number of bodied of the original loop that should be peeled off before t...
Parameters that control the generic loop unrolling transformation.
bool UpperBound
Allow using trip count upper bound to unroll loops.
unsigned PartialOptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size, like OptSizeThreshold,...
unsigned PartialThreshold
The cost threshold for the unrolled loop, like Threshold, but used for partial/runtime unrolling (set...
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...
unsigned OptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size (set to UINT_MAX to disable).