LLVM 22.0.0git
BasicTTIImpl.h
Go to the documentation of this file.
1//===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file provides a helper that implements much of the TTI interface in
11/// terms of the target-independent code generator and TargetLowering
12/// interfaces.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
18
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/BitVector.h"
21#include "llvm/ADT/STLExtras.h"
35#include "llvm/IR/BasicBlock.h"
36#include "llvm/IR/Constant.h"
37#include "llvm/IR/Constants.h"
38#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/InstrTypes.h"
41#include "llvm/IR/Instruction.h"
43#include "llvm/IR/Intrinsics.h"
44#include "llvm/IR/Operator.h"
45#include "llvm/IR/Type.h"
46#include "llvm/IR/Value.h"
54#include <algorithm>
55#include <cassert>
56#include <cstdint>
57#include <limits>
58#include <optional>
59#include <utility>
60
61namespace llvm {
62
63class Function;
64class GlobalValue;
65class LLVMContext;
66class ScalarEvolution;
67class SCEV;
68class TargetMachine;
69
71
72/// Base class which can be used to help build a TTI implementation.
73///
74/// This class provides as much implementation of the TTI interface as is
75/// possible using the target independent parts of the code generator.
76///
77/// In order to subclass it, your class must implement a getST() method to
78/// return the subtarget, and a getTLI() method to return the target lowering.
79/// We need these methods implemented in the derived class so that this class
80/// doesn't have to duplicate storage for them.
81template <typename T>
83private:
85 using TTI = TargetTransformInfo;
86
87 /// Helper function to access this as a T.
88 const T *thisT() const { return static_cast<const T *>(this); }
89
90 /// Estimate a cost of Broadcast as an extract and sequence of insert
91 /// operations.
93 getBroadcastShuffleOverhead(FixedVectorType *VTy,
96 // Broadcast cost is equal to the cost of extracting the zero'th element
97 // plus the cost of inserting it into every element of the result vector.
98 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
99 CostKind, 0, nullptr, nullptr);
100
101 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
102 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
103 CostKind, i, nullptr, nullptr);
104 }
105 return Cost;
106 }
107
108 /// Estimate a cost of shuffle as a sequence of extract and insert
109 /// operations.
111 getPermuteShuffleOverhead(FixedVectorType *VTy,
114 // Shuffle cost is equal to the cost of extracting element from its argument
115 // plus the cost of inserting them onto the result vector.
116
117 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
118 // index 0 of first vector, index 1 of second vector,index 2 of first
119 // vector and finally index 3 of second vector and insert them at index
120 // <0,1,2,3> of result vector.
121 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
122 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
123 CostKind, i, nullptr, nullptr);
124 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
125 CostKind, i, nullptr, nullptr);
126 }
127 return Cost;
128 }
129
130 /// Estimate a cost of subvector extraction as a sequence of extract and
131 /// insert operations.
132 InstructionCost getExtractSubvectorOverhead(VectorType *VTy,
134 int Index,
135 FixedVectorType *SubVTy) const {
136 assert(VTy && SubVTy &&
137 "Can only extract subvectors from vectors");
138 int NumSubElts = SubVTy->getNumElements();
140 (Index + NumSubElts) <=
142 "SK_ExtractSubvector index out of range");
143
145 // Subvector extraction cost is equal to the cost of extracting element from
146 // the source type plus the cost of inserting them into the result vector
147 // type.
148 for (int i = 0; i != NumSubElts; ++i) {
149 Cost +=
150 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
151 CostKind, i + Index, nullptr, nullptr);
152 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
153 CostKind, i, nullptr, nullptr);
154 }
155 return Cost;
156 }
157
158 /// Estimate a cost of subvector insertion as a sequence of extract and
159 /// insert operations.
160 InstructionCost getInsertSubvectorOverhead(VectorType *VTy,
162 int Index,
163 FixedVectorType *SubVTy) const {
164 assert(VTy && SubVTy &&
165 "Can only insert subvectors into vectors");
166 int NumSubElts = SubVTy->getNumElements();
168 (Index + NumSubElts) <=
170 "SK_InsertSubvector index out of range");
171
173 // Subvector insertion cost is equal to the cost of extracting element from
174 // the source type plus the cost of inserting them into the result vector
175 // type.
176 for (int i = 0; i != NumSubElts; ++i) {
177 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
178 CostKind, i, nullptr, nullptr);
179 Cost +=
180 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, CostKind,
181 i + Index, nullptr, nullptr);
182 }
183 return Cost;
184 }
185
186 /// Local query method delegates up to T which *must* implement this!
187 const TargetSubtargetInfo *getST() const {
188 return static_cast<const T *>(this)->getST();
189 }
190
191 /// Local query method delegates up to T which *must* implement this!
192 const TargetLoweringBase *getTLI() const {
193 return static_cast<const T *>(this)->getTLI();
194 }
195
196 static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
197 switch (M) {
199 return ISD::UNINDEXED;
200 case TTI::MIM_PreInc:
201 return ISD::PRE_INC;
202 case TTI::MIM_PreDec:
203 return ISD::PRE_DEC;
204 case TTI::MIM_PostInc:
205 return ISD::POST_INC;
206 case TTI::MIM_PostDec:
207 return ISD::POST_DEC;
208 }
209 llvm_unreachable("Unexpected MemIndexedMode");
210 }
211
212 InstructionCost getCommonMaskedMemoryOpCost(unsigned Opcode, Type *DataTy,
213 Align Alignment,
214 bool VariableMask,
215 bool IsGatherScatter,
217 unsigned AddressSpace = 0) const {
218 // We cannot scalarize scalable vectors, so return Invalid.
219 if (isa<ScalableVectorType>(DataTy))
221
222 auto *VT = cast<FixedVectorType>(DataTy);
223 unsigned VF = VT->getNumElements();
224
225 // Assume the target does not have support for gather/scatter operations
226 // and provide a rough estimate.
227 //
228 // First, compute the cost of the individual memory operations.
229 InstructionCost AddrExtractCost =
230 IsGatherScatter ? getScalarizationOverhead(
232 PointerType::get(VT->getContext(), 0), VF),
233 /*Insert=*/false, /*Extract=*/true, CostKind)
234 : 0;
235
236 // The cost of the scalar loads/stores.
237 InstructionCost MemoryOpCost =
238 VF * thisT()->getMemoryOpCost(Opcode, VT->getElementType(), Alignment,
240
241 // Next, compute the cost of packing the result in a vector.
242 InstructionCost PackingCost =
243 getScalarizationOverhead(VT, Opcode != Instruction::Store,
244 Opcode == Instruction::Store, CostKind);
245
246 InstructionCost ConditionalCost = 0;
247 if (VariableMask) {
248 // Compute the cost of conditionally executing the memory operations with
249 // variable masks. This includes extracting the individual conditions, a
250 // branches and PHIs to combine the results.
251 // NOTE: Estimating the cost of conditionally executing the memory
252 // operations accurately is quite difficult and the current solution
253 // provides a very rough estimate only.
254 ConditionalCost =
257 /*Insert=*/false, /*Extract=*/true, CostKind) +
258 VF * (thisT()->getCFInstrCost(Instruction::Br, CostKind) +
259 thisT()->getCFInstrCost(Instruction::PHI, CostKind));
260 }
261
262 return AddrExtractCost + MemoryOpCost + PackingCost + ConditionalCost;
263 }
264
265 /// Checks if the provided mask \p is a splat mask, i.e. it contains only -1
266 /// or same non -1 index value and this index value contained at least twice.
267 /// So, mask <0, -1,-1, -1> is not considered splat (it is just identity),
268 /// same for <-1, 0, -1, -1> (just a slide), while <2, -1, 2, -1> is a splat
269 /// with \p Index=2.
270 static bool isSplatMask(ArrayRef<int> Mask, unsigned NumSrcElts, int &Index) {
271 // Check that the broadcast index meets at least twice.
272 bool IsCompared = false;
273 if (int SplatIdx = PoisonMaskElem;
274 all_of(enumerate(Mask), [&](const auto &P) {
275 if (P.value() == PoisonMaskElem)
276 return P.index() != Mask.size() - 1 || IsCompared;
277 if (static_cast<unsigned>(P.value()) >= NumSrcElts * 2)
278 return false;
279 if (SplatIdx == PoisonMaskElem) {
280 SplatIdx = P.value();
281 return P.index() != Mask.size() - 1;
282 }
283 IsCompared = true;
284 return SplatIdx == P.value();
285 })) {
286 Index = SplatIdx;
287 return true;
288 }
289 return false;
290 }
291
292 /// Several intrinsics that return structs (including llvm.sincos[pi] and
293 /// llvm.modf) can be lowered to a vector library call (for certain VFs). The
294 /// vector library functions correspond to the scalar calls (e.g. sincos or
295 /// modf), which unlike the intrinsic return values via output pointers. This
296 /// helper checks if a vector call exists for the given intrinsic, and returns
297 /// the cost, which includes the cost of the mask (if required), and the loads
298 /// for values returned via output pointers. \p LC is the scalar libcall and
299 /// \p CallRetElementIndex (optional) is the struct element which is mapped to
300 /// the call return value. If std::nullopt is returned, then no vector library
301 /// call is available, so the intrinsic should be assigned the default cost
302 /// (e.g. scalarization).
303 std::optional<InstructionCost> getMultipleResultIntrinsicVectorLibCallCost(
305 std::optional<unsigned> CallRetElementIndex = {}) const {
306 Type *RetTy = ICA.getReturnType();
307 // Vector variants of the intrinsic can be mapped to a vector library call.
308 auto const *LibInfo = ICA.getLibInfo();
309 if (!LibInfo || !isa<StructType>(RetTy) ||
311 return std::nullopt;
312
313 Type *Ty = getContainedTypes(RetTy).front();
314 EVT VT = getTLI()->getValueType(DL, Ty);
315
316 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
317
318 switch (ICA.getID()) {
319 case Intrinsic::modf:
320 LC = RTLIB::getMODF(VT);
321 break;
322 case Intrinsic::sincospi:
323 LC = RTLIB::getSINCOSPI(VT);
324 break;
325 case Intrinsic::sincos:
326 LC = RTLIB::getSINCOS(VT);
327 break;
328 default:
329 return std::nullopt;
330 }
331
332 // Find associated libcall.
333 RTLIB::LibcallImpl LibcallImpl = getTLI()->getLibcallImpl(LC);
334 if (LibcallImpl == RTLIB::Unsupported)
335 return std::nullopt;
336
337 LLVMContext &Ctx = RetTy->getContext();
338
339 // Cost the call + mask.
340 auto Cost =
341 thisT()->getCallInstrCost(nullptr, RetTy, ICA.getArgTypes(), CostKind);
342
345 auto VecTy = VectorType::get(IntegerType::getInt1Ty(Ctx), VF);
346 Cost += thisT()->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy,
347 VecTy, {}, CostKind, 0, nullptr, {});
348 }
349
350 // Lowering to a library call (with output pointers) may require us to emit
351 // reloads for the results.
352 for (auto [Idx, VectorTy] : enumerate(getContainedTypes(RetTy))) {
353 if (Idx == CallRetElementIndex)
354 continue;
355 Cost += thisT()->getMemoryOpCost(
356 Instruction::Load, VectorTy,
357 thisT()->getDataLayout().getABITypeAlign(VectorTy), 0, CostKind);
358 }
359 return Cost;
360 }
361
362 /// Filter out constant and duplicated entries in \p Ops and return a vector
363 /// containing the types from \p Tys corresponding to the remaining operands.
365 filterConstantAndDuplicatedOperands(ArrayRef<const Value *> Ops,
366 ArrayRef<Type *> Tys) {
367 SmallPtrSet<const Value *, 4> UniqueOperands;
368 SmallVector<Type *, 4> FilteredTys;
369 for (const auto &[Op, Ty] : zip_equal(Ops, Tys)) {
370 if (isa<Constant>(Op) || !UniqueOperands.insert(Op).second)
371 continue;
372 FilteredTys.push_back(Ty);
373 }
374 return FilteredTys;
375 }
376
377protected:
378 explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
379 : BaseT(DL) {}
380 ~BasicTTIImplBase() override = default;
381
383
384public:
385 /// \name Scalar TTI Implementations
386 /// @{
388 unsigned AddressSpace, Align Alignment,
389 unsigned *Fast) const override {
390 EVT E = EVT::getIntegerVT(Context, BitWidth);
391 return getTLI()->allowsMisalignedMemoryAccesses(
393 }
394
395 bool areInlineCompatible(const Function *Caller,
396 const Function *Callee) const override {
397 const TargetMachine &TM = getTLI()->getTargetMachine();
398
399 const FeatureBitset &CallerBits =
400 TM.getSubtargetImpl(*Caller)->getFeatureBits();
401 const FeatureBitset &CalleeBits =
402 TM.getSubtargetImpl(*Callee)->getFeatureBits();
403
404 // Inline a callee if its target-features are a subset of the callers
405 // target-features.
406 return (CallerBits & CalleeBits) == CalleeBits;
407 }
408
409 bool hasBranchDivergence(const Function *F = nullptr) const override {
410 return false;
411 }
412
413 bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override {
414 return false;
415 }
416
417 bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override {
418 return true;
419 }
420
421 unsigned getFlatAddressSpace() const override {
422 // Return an invalid address space.
423 return -1;
424 }
425
427 Intrinsic::ID IID) const override {
428 return false;
429 }
430
431 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override {
432 return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
433 }
434
435 unsigned getAssumedAddrSpace(const Value *V) const override {
436 return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
437 }
438
439 bool isSingleThreaded() const override {
440 return getTLI()->getTargetMachine().Options.ThreadModel ==
442 }
443
444 std::pair<const Value *, unsigned>
445 getPredicatedAddrSpace(const Value *V) const override {
446 return getTLI()->getTargetMachine().getPredicatedAddrSpace(V);
447 }
448
450 Value *NewV) const override {
451 return nullptr;
452 }
453
454 bool isLegalAddImmediate(int64_t imm) const override {
455 return getTLI()->isLegalAddImmediate(imm);
456 }
457
458 bool isLegalAddScalableImmediate(int64_t Imm) const override {
459 return getTLI()->isLegalAddScalableImmediate(Imm);
460 }
461
462 bool isLegalICmpImmediate(int64_t imm) const override {
463 return getTLI()->isLegalICmpImmediate(imm);
464 }
465
466 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
467 bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
468 Instruction *I = nullptr,
469 int64_t ScalableOffset = 0) const override {
471 AM.BaseGV = BaseGV;
472 AM.BaseOffs = BaseOffset;
473 AM.HasBaseReg = HasBaseReg;
474 AM.Scale = Scale;
475 AM.ScalableOffset = ScalableOffset;
476 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
477 }
478
479 int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) {
480 return getTLI()->getPreferredLargeGEPBaseOffset(MinOffset, MaxOffset);
481 }
482
483 unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
484 Type *ScalarValTy) const override {
485 auto &&IsSupportedByTarget = [this, ScalarMemTy, ScalarValTy](unsigned VF) {
486 auto *SrcTy = FixedVectorType::get(ScalarMemTy, VF / 2);
487 EVT VT = getTLI()->getValueType(DL, SrcTy);
488 if (getTLI()->isOperationLegal(ISD::STORE, VT) ||
489 getTLI()->isOperationCustom(ISD::STORE, VT))
490 return true;
491
492 EVT ValVT =
493 getTLI()->getValueType(DL, FixedVectorType::get(ScalarValTy, VF / 2));
494 EVT LegalizedVT =
495 getTLI()->getTypeToTransformTo(ScalarMemTy->getContext(), VT);
496 return getTLI()->isTruncStoreLegal(LegalizedVT, ValVT);
497 };
498 while (VF > 2 && IsSupportedByTarget(VF))
499 VF /= 2;
500 return VF;
501 }
502
503 bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty) const override {
504 EVT VT = getTLI()->getValueType(DL, Ty, /*AllowUnknown=*/true);
505 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
506 }
507
508 bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty) const override {
509 EVT VT = getTLI()->getValueType(DL, Ty, /*AllowUnknown=*/true);
510 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
511 }
512
514 const TTI::LSRCost &C2) const override {
516 }
517
521
525
529
531 StackOffset BaseOffset, bool HasBaseReg,
532 int64_t Scale,
533 unsigned AddrSpace) const override {
535 AM.BaseGV = BaseGV;
536 AM.BaseOffs = BaseOffset.getFixed();
537 AM.HasBaseReg = HasBaseReg;
538 AM.Scale = Scale;
539 AM.ScalableOffset = BaseOffset.getScalable();
540 if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace))
541 return 0;
543 }
544
545 bool isTruncateFree(Type *Ty1, Type *Ty2) const override {
546 return getTLI()->isTruncateFree(Ty1, Ty2);
547 }
548
549 bool isProfitableToHoist(Instruction *I) const override {
550 return getTLI()->isProfitableToHoist(I);
551 }
552
553 bool useAA() const override { return getST()->useAA(); }
554
555 bool isTypeLegal(Type *Ty) const override {
556 EVT VT = getTLI()->getValueType(DL, Ty, /*AllowUnknown=*/true);
557 return getTLI()->isTypeLegal(VT);
558 }
559
560 unsigned getRegUsageForType(Type *Ty) const override {
561 EVT ETy = getTLI()->getValueType(DL, Ty);
562 return getTLI()->getNumRegisters(Ty->getContext(), ETy);
563 }
564
565 InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr,
566 ArrayRef<const Value *> Operands, Type *AccessType,
567 TTI::TargetCostKind CostKind) const override {
568 return BaseT::getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind);
569 }
570
572 const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI,
573 BlockFrequencyInfo *BFI) const override {
574 /// Try to find the estimated number of clusters. Note that the number of
575 /// clusters identified in this function could be different from the actual
576 /// numbers found in lowering. This function ignore switches that are
577 /// lowered with a mix of jump table / bit test / BTree. This function was
578 /// initially intended to be used when estimating the cost of switch in
579 /// inline cost heuristic, but it's a generic cost model to be used in other
580 /// places (e.g., in loop unrolling).
581 unsigned N = SI.getNumCases();
582 const TargetLoweringBase *TLI = getTLI();
583 const DataLayout &DL = this->getDataLayout();
584
585 JumpTableSize = 0;
586 bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
587
588 // Early exit if both a jump table and bit test are not allowed.
589 if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
590 return N;
591
592 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
593 APInt MinCaseVal = MaxCaseVal;
594 for (auto CI : SI.cases()) {
595 const APInt &CaseVal = CI.getCaseValue()->getValue();
596 if (CaseVal.sgt(MaxCaseVal))
597 MaxCaseVal = CaseVal;
598 if (CaseVal.slt(MinCaseVal))
599 MinCaseVal = CaseVal;
600 }
601
602 // Check if suitable for a bit test
603 if (N <= DL.getIndexSizeInBits(0u)) {
605 for (auto I : SI.cases()) {
606 const BasicBlock *BB = I.getCaseSuccessor();
607 ++DestMap[BB];
608 }
609
610 if (TLI->isSuitableForBitTests(DestMap, MinCaseVal, MaxCaseVal, DL))
611 return 1;
612 }
613
614 // Check if suitable for a jump table.
615 if (IsJTAllowed) {
616 if (N < 2 || N < TLI->getMinimumJumpTableEntries())
617 return N;
619 (MaxCaseVal - MinCaseVal)
620 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
621 // Check whether a range of clusters is dense enough for a jump table
622 if (TLI->isSuitableForJumpTable(&SI, N, Range, PSI, BFI)) {
623 JumpTableSize = Range;
624 return 1;
625 }
626 }
627 return N;
628 }
629
630 bool shouldBuildLookupTables() const override {
631 const TargetLoweringBase *TLI = getTLI();
632 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
633 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
634 }
635
636 bool shouldBuildRelLookupTables() const override {
637 const TargetMachine &TM = getTLI()->getTargetMachine();
638 // If non-PIC mode, do not generate a relative lookup table.
639 if (!TM.isPositionIndependent())
640 return false;
641
642 /// Relative lookup table entries consist of 32-bit offsets.
643 /// Do not generate relative lookup tables for large code models
644 /// in 64-bit achitectures where 32-bit offsets might not be enough.
645 if (TM.getCodeModel() == CodeModel::Medium ||
647 return false;
648
649 const Triple &TargetTriple = TM.getTargetTriple();
650 if (!TargetTriple.isArch64Bit())
651 return false;
652
653 // TODO: Triggers issues on aarch64 on darwin, so temporarily disable it
654 // there.
655 if (TargetTriple.getArch() == Triple::aarch64 && TargetTriple.isOSDarwin())
656 return false;
657
658 return true;
659 }
660
661 bool haveFastSqrt(Type *Ty) const override {
662 const TargetLoweringBase *TLI = getTLI();
663 EVT VT = TLI->getValueType(DL, Ty);
664 return TLI->isTypeLegal(VT) &&
666 }
667
668 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override { return true; }
669
670 InstructionCost getFPOpCost(Type *Ty) const override {
671 // Check whether FADD is available, as a proxy for floating-point in
672 // general.
673 const TargetLoweringBase *TLI = getTLI();
674 EVT VT = TLI->getValueType(DL, Ty);
678 }
679
681 const Function &Fn) const override {
682 switch (Inst.getOpcode()) {
683 default:
684 break;
685 case Instruction::SDiv:
686 case Instruction::SRem:
687 case Instruction::UDiv:
688 case Instruction::URem: {
689 if (!isa<ConstantInt>(Inst.getOperand(1)))
690 return false;
691 EVT VT = getTLI()->getValueType(DL, Inst.getType());
692 return !getTLI()->isIntDivCheap(VT, Fn.getAttributes());
693 }
694 };
695
696 return false;
697 }
698
699 unsigned getInliningThresholdMultiplier() const override { return 1; }
700 unsigned adjustInliningThreshold(const CallBase *CB) const override {
701 return 0;
702 }
703 unsigned getCallerAllocaCost(const CallBase *CB,
704 const AllocaInst *AI) const override {
705 return 0;
706 }
707
708 int getInlinerVectorBonusPercent() const override { return 150; }
709
712 OptimizationRemarkEmitter *ORE) const override {
713 // This unrolling functionality is target independent, but to provide some
714 // motivation for its intended use, for x86:
715
716 // According to the Intel 64 and IA-32 Architectures Optimization Reference
717 // Manual, Intel Core models and later have a loop stream detector (and
718 // associated uop queue) that can benefit from partial unrolling.
719 // The relevant requirements are:
720 // - The loop must have no more than 4 (8 for Nehalem and later) branches
721 // taken, and none of them may be calls.
722 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
723
724 // According to the Software Optimization Guide for AMD Family 15h
725 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
726 // and loop buffer which can benefit from partial unrolling.
727 // The relevant requirements are:
728 // - The loop must have fewer than 16 branches
729 // - The loop must have less than 40 uops in all executed loop branches
730
731 // The number of taken branches in a loop is hard to estimate here, and
732 // benchmarking has revealed that it is better not to be conservative when
733 // estimating the branch count. As a result, we'll ignore the branch limits
734 // until someone finds a case where it matters in practice.
735
736 unsigned MaxOps;
737 const TargetSubtargetInfo *ST = getST();
738 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
740 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
741 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
742 else
743 return;
744
745 // Scan the loop: don't unroll loops with calls.
746 for (BasicBlock *BB : L->blocks()) {
747 for (Instruction &I : *BB) {
748 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
749 if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
750 if (!thisT()->isLoweredToCall(F))
751 continue;
752 }
753
754 if (ORE) {
755 ORE->emit([&]() {
756 return OptimizationRemark("TTI", "DontUnroll", L->getStartLoc(),
757 L->getHeader())
758 << "advising against unrolling the loop because it "
759 "contains a "
760 << ore::NV("Call", &I);
761 });
762 }
763 return;
764 }
765 }
766 }
767
768 // Enable runtime and partial unrolling up to the specified size.
769 // Enable using trip count upper bound to unroll loops.
770 UP.Partial = UP.Runtime = UP.UpperBound = true;
771 UP.PartialThreshold = MaxOps;
772
773 // Avoid unrolling when optimizing for size.
774 UP.OptSizeThreshold = 0;
776
777 // Set number of instructions optimized when "back edge"
778 // becomes "fall through" to default value of 2.
779 UP.BEInsns = 2;
780 }
781
783 TTI::PeelingPreferences &PP) const override {
784 PP.PeelCount = 0;
785 PP.AllowPeeling = true;
786 PP.AllowLoopNestsPeeling = false;
787 PP.PeelProfiledIterations = true;
788 }
789
792 HardwareLoopInfo &HWLoopInfo) const override {
793 return BaseT::isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
794 }
795
796 unsigned getEpilogueVectorizationMinVF() const override {
798 }
799
802 }
803
805 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) const override {
806 return BaseT::getPreferredTailFoldingStyle(IVUpdateMayOverflow);
807 }
808
809 std::optional<Instruction *>
812 }
813
814 std::optional<Value *>
816 APInt DemandedMask, KnownBits &Known,
817 bool &KnownBitsComputed) const override {
818 return BaseT::simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
819 KnownBitsComputed);
820 }
821
823 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
824 APInt &UndefElts2, APInt &UndefElts3,
825 std::function<void(Instruction *, unsigned, APInt, APInt &)>
826 SimplifyAndSetOp) const override {
828 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
829 SimplifyAndSetOp);
830 }
831
832 std::optional<unsigned>
834 return std::optional<unsigned>(
835 getST()->getCacheSize(static_cast<unsigned>(Level)));
836 }
837
838 std::optional<unsigned>
840 std::optional<unsigned> TargetResult =
841 getST()->getCacheAssociativity(static_cast<unsigned>(Level));
842
843 if (TargetResult)
844 return TargetResult;
845
846 return BaseT::getCacheAssociativity(Level);
847 }
848
849 unsigned getCacheLineSize() const override {
850 return getST()->getCacheLineSize();
851 }
852
853 unsigned getPrefetchDistance() const override {
854 return getST()->getPrefetchDistance();
855 }
856
857 unsigned getMinPrefetchStride(unsigned NumMemAccesses,
858 unsigned NumStridedMemAccesses,
859 unsigned NumPrefetches,
860 bool HasCall) const override {
861 return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
862 NumPrefetches, HasCall);
863 }
864
865 unsigned getMaxPrefetchIterationsAhead() const override {
866 return getST()->getMaxPrefetchIterationsAhead();
867 }
868
869 bool enableWritePrefetching() const override {
870 return getST()->enableWritePrefetching();
871 }
872
873 bool shouldPrefetchAddressSpace(unsigned AS) const override {
874 return getST()->shouldPrefetchAddressSpace(AS);
875 }
876
877 /// @}
878
879 /// \name Vector TTI Implementations
880 /// @{
881
886
887 std::optional<unsigned> getMaxVScale() const override { return std::nullopt; }
888 std::optional<unsigned> getVScaleForTuning() const override {
889 return std::nullopt;
890 }
891 bool isVScaleKnownToBeAPowerOfTwo() const override { return false; }
892
893 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
894 /// are set if the demanded result elements need to be inserted and/or
895 /// extracted from vectors.
897 VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract,
898 TTI::TargetCostKind CostKind, bool ForPoisonSrc = true,
899 ArrayRef<Value *> VL = {}) const override {
900 /// FIXME: a bitfield is not a reasonable abstraction for talking about
901 /// which elements are needed from a scalable vector
902 if (isa<ScalableVectorType>(InTy))
904 auto *Ty = cast<FixedVectorType>(InTy);
905
906 assert(DemandedElts.getBitWidth() == Ty->getNumElements() &&
907 (VL.empty() || VL.size() == Ty->getNumElements()) &&
908 "Vector size mismatch");
909
911
912 for (int i = 0, e = Ty->getNumElements(); i < e; ++i) {
913 if (!DemandedElts[i])
914 continue;
915 if (Insert) {
916 Value *InsertedVal = VL.empty() ? nullptr : VL[i];
917 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
918 CostKind, i, nullptr, InsertedVal);
919 }
920 if (Extract)
921 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
922 CostKind, i, nullptr, nullptr);
923 }
924
925 return Cost;
926 }
927
929 return false;
930 }
931
932 bool
934 unsigned ScalarOpdIdx) const override {
935 return false;
936 }
937
939 int OpdIdx) const override {
940 return OpdIdx == -1;
941 }
942
943 bool
945 int RetIdx) const override {
946 return RetIdx == 0;
947 }
948
949 /// Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
951 bool Extract,
953 if (isa<ScalableVectorType>(InTy))
955 auto *Ty = cast<FixedVectorType>(InTy);
956
957 APInt DemandedElts = APInt::getAllOnes(Ty->getNumElements());
958 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
959 CostKind);
960 }
961
962 /// Estimate the overhead of scalarizing an instruction's
963 /// operands. The (potentially vector) types to use for each of
964 /// argument are passes via Tys.
966 ArrayRef<Type *> Tys, TTI::TargetCostKind CostKind) const override {
968 for (Type *Ty : Tys) {
969 // Disregard things like metadata arguments.
970 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
971 !Ty->isPtrOrPtrVectorTy())
972 continue;
973
974 if (auto *VecTy = dyn_cast<VectorType>(Ty))
975 Cost += getScalarizationOverhead(VecTy, /*Insert*/ false,
976 /*Extract*/ true, CostKind);
977 }
978
979 return Cost;
980 }
981
982 /// Estimate the overhead of scalarizing the inputs and outputs of an
983 /// instruction, with return type RetTy and arguments Args of type Tys. If
984 /// Args are unknown (empty), then the cost associated with one argument is
985 /// added as a heuristic.
991 RetTy, /*Insert*/ true, /*Extract*/ false, CostKind);
992 if (!Args.empty())
994 filterConstantAndDuplicatedOperands(Args, Tys), CostKind);
995 else
996 // When no information on arguments is provided, we add the cost
997 // associated with one argument as a heuristic.
998 Cost += getScalarizationOverhead(RetTy, /*Insert*/ false,
999 /*Extract*/ true, CostKind);
1000
1001 return Cost;
1002 }
1003
1004 /// Estimate the cost of type-legalization and the legalized type.
1005 std::pair<InstructionCost, MVT> getTypeLegalizationCost(Type *Ty) const {
1006 LLVMContext &C = Ty->getContext();
1007 EVT MTy = getTLI()->getValueType(DL, Ty);
1008
1010 // We keep legalizing the type until we find a legal kind. We assume that
1011 // the only operation that costs anything is the split. After splitting
1012 // we need to handle two types.
1013 while (true) {
1014 TargetLoweringBase::LegalizeKind LK = getTLI()->getTypeConversion(C, MTy);
1015
1017 // Ensure we return a sensible simple VT here, since many callers of
1018 // this function require it.
1019 MVT VT = MTy.isSimple() ? MTy.getSimpleVT() : MVT::i64;
1020 return std::make_pair(InstructionCost::getInvalid(), VT);
1021 }
1022
1023 if (LK.first == TargetLoweringBase::TypeLegal)
1024 return std::make_pair(Cost, MTy.getSimpleVT());
1025
1026 if (LK.first == TargetLoweringBase::TypeSplitVector ||
1028 Cost *= 2;
1029
1030 // Do not loop with f128 type.
1031 if (MTy == LK.second)
1032 return std::make_pair(Cost, MTy.getSimpleVT());
1033
1034 // Keep legalizing the type.
1035 MTy = LK.second;
1036 }
1037 }
1038
1039 unsigned getMaxInterleaveFactor(ElementCount VF) const override { return 1; }
1040
1042 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
1045 ArrayRef<const Value *> Args = {},
1046 const Instruction *CxtI = nullptr) const override {
1047 // Check if any of the operands are vector operands.
1048 const TargetLoweringBase *TLI = getTLI();
1049 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1050 assert(ISD && "Invalid opcode");
1051
1052 // TODO: Handle more cost kinds.
1054 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind,
1055 Opd1Info, Opd2Info,
1056 Args, CxtI);
1057
1058 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
1059
1060 bool IsFloat = Ty->isFPOrFPVectorTy();
1061 // Assume that floating point arithmetic operations cost twice as much as
1062 // integer operations.
1063 InstructionCost OpCost = (IsFloat ? 2 : 1);
1064
1065 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
1066 // The operation is legal. Assume it costs 1.
1067 // TODO: Once we have extract/insert subvector cost we need to use them.
1068 return LT.first * OpCost;
1069 }
1070
1071 if (!TLI->isOperationExpand(ISD, LT.second)) {
1072 // If the operation is custom lowered, then assume that the code is twice
1073 // as expensive.
1074 return LT.first * 2 * OpCost;
1075 }
1076
1077 // An 'Expand' of URem and SRem is special because it may default
1078 // to expanding the operation into a sequence of sub-operations
1079 // i.e. X % Y -> X-(X/Y)*Y.
1080 if (ISD == ISD::UREM || ISD == ISD::SREM) {
1081 bool IsSigned = ISD == ISD::SREM;
1082 if (TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIVREM : ISD::UDIVREM,
1083 LT.second) ||
1084 TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIV : ISD::UDIV,
1085 LT.second)) {
1086 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
1087 InstructionCost DivCost = thisT()->getArithmeticInstrCost(
1088 DivOpc, Ty, CostKind, Opd1Info, Opd2Info);
1089 InstructionCost MulCost =
1090 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty, CostKind);
1091 InstructionCost SubCost =
1092 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty, CostKind);
1093 return DivCost + MulCost + SubCost;
1094 }
1095 }
1096
1097 // We cannot scalarize scalable vectors, so return Invalid.
1100
1101 // Else, assume that we need to scalarize this op.
1102 // TODO: If one of the types get legalized by splitting, handle this
1103 // similarly to what getCastInstrCost() does.
1104 if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
1105 InstructionCost Cost = thisT()->getArithmeticInstrCost(
1106 Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info,
1107 Args, CxtI);
1108 // Return the cost of multiple scalar invocation plus the cost of
1109 // inserting and extracting the values.
1110 SmallVector<Type *> Tys(Args.size(), Ty);
1111 return getScalarizationOverhead(VTy, Args, Tys, CostKind) +
1112 VTy->getNumElements() * Cost;
1113 }
1114
1115 // We don't know anything about this scalar instruction.
1116 return OpCost;
1117 }
1118
1120 ArrayRef<int> Mask,
1121 VectorType *SrcTy, int &Index,
1122 VectorType *&SubTy) const {
1123 if (Mask.empty())
1124 return Kind;
1125 int NumDstElts = Mask.size();
1126 int NumSrcElts = SrcTy->getElementCount().getKnownMinValue();
1127 switch (Kind) {
1129 if (ShuffleVectorInst::isReverseMask(Mask, NumSrcElts))
1130 return TTI::SK_Reverse;
1131 if (ShuffleVectorInst::isZeroEltSplatMask(Mask, NumSrcElts))
1132 return TTI::SK_Broadcast;
1133 if (isSplatMask(Mask, NumSrcElts, Index))
1134 return TTI::SK_Broadcast;
1135 if (ShuffleVectorInst::isExtractSubvectorMask(Mask, NumSrcElts, Index) &&
1136 (Index + NumDstElts) <= NumSrcElts) {
1137 SubTy = FixedVectorType::get(SrcTy->getElementType(), NumDstElts);
1139 }
1140 break;
1141 }
1142 case TTI::SK_PermuteTwoSrc: {
1143 if (all_of(Mask, [NumSrcElts](int M) { return M < NumSrcElts; }))
1145 Index, SubTy);
1146 int NumSubElts;
1147 if (NumDstElts > 2 && ShuffleVectorInst::isInsertSubvectorMask(
1148 Mask, NumSrcElts, NumSubElts, Index)) {
1149 if (Index + NumSubElts > NumSrcElts)
1150 return Kind;
1151 SubTy = FixedVectorType::get(SrcTy->getElementType(), NumSubElts);
1153 }
1154 if (ShuffleVectorInst::isSelectMask(Mask, NumSrcElts))
1155 return TTI::SK_Select;
1156 if (ShuffleVectorInst::isTransposeMask(Mask, NumSrcElts))
1157 return TTI::SK_Transpose;
1158 if (ShuffleVectorInst::isSpliceMask(Mask, NumSrcElts, Index))
1159 return TTI::SK_Splice;
1160 break;
1161 }
1162 case TTI::SK_Select:
1163 case TTI::SK_Reverse:
1164 case TTI::SK_Broadcast:
1165 case TTI::SK_Transpose:
1168 case TTI::SK_Splice:
1169 break;
1170 }
1171 return Kind;
1172 }
1173
1177 VectorType *SubTp, ArrayRef<const Value *> Args = {},
1178 const Instruction *CxtI = nullptr) const override {
1179 switch (improveShuffleKindFromMask(Kind, Mask, SrcTy, Index, SubTp)) {
1180 case TTI::SK_Broadcast:
1181 if (auto *FVT = dyn_cast<FixedVectorType>(SrcTy))
1182 return getBroadcastShuffleOverhead(FVT, CostKind);
1184 case TTI::SK_Select:
1185 case TTI::SK_Splice:
1186 case TTI::SK_Reverse:
1187 case TTI::SK_Transpose:
1190 if (auto *FVT = dyn_cast<FixedVectorType>(SrcTy))
1191 return getPermuteShuffleOverhead(FVT, CostKind);
1194 return getExtractSubvectorOverhead(SrcTy, CostKind, Index,
1195 cast<FixedVectorType>(SubTp));
1197 return getInsertSubvectorOverhead(DstTy, CostKind, Index,
1198 cast<FixedVectorType>(SubTp));
1199 }
1200 llvm_unreachable("Unknown TTI::ShuffleKind");
1201 }
1202
1204 getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1206 const Instruction *I = nullptr) const override {
1207 if (BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I) == 0)
1208 return 0;
1209
1210 const TargetLoweringBase *TLI = getTLI();
1211 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1212 assert(ISD && "Invalid opcode");
1213 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Src);
1214 std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(Dst);
1215
1216 TypeSize SrcSize = SrcLT.second.getSizeInBits();
1217 TypeSize DstSize = DstLT.second.getSizeInBits();
1218 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
1219 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
1220
1221 switch (Opcode) {
1222 default:
1223 break;
1224 case Instruction::Trunc:
1225 // Check for NOOP conversions.
1226 if (TLI->isTruncateFree(SrcLT.second, DstLT.second))
1227 return 0;
1228 [[fallthrough]];
1229 case Instruction::BitCast:
1230 // Bitcast between types that are legalized to the same type are free and
1231 // assume int to/from ptr of the same size is also free.
1232 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
1233 SrcSize == DstSize)
1234 return 0;
1235 break;
1236 case Instruction::FPExt:
1237 if (I && getTLI()->isExtFree(I))
1238 return 0;
1239 break;
1240 case Instruction::ZExt:
1241 if (TLI->isZExtFree(SrcLT.second, DstLT.second))
1242 return 0;
1243 [[fallthrough]];
1244 case Instruction::SExt:
1245 if (I && getTLI()->isExtFree(I))
1246 return 0;
1247
1248 // If this is a zext/sext of a load, return 0 if the corresponding
1249 // extending load exists on target and the result type is legal.
1250 if (CCH == TTI::CastContextHint::Normal) {
1251 EVT ExtVT = EVT::getEVT(Dst);
1252 EVT LoadVT = EVT::getEVT(Src);
1253 unsigned LType =
1254 ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD);
1255 if (DstLT.first == SrcLT.first &&
1256 TLI->isLoadExtLegal(LType, ExtVT, LoadVT))
1257 return 0;
1258 }
1259 break;
1260 case Instruction::AddrSpaceCast:
1261 if (TLI->isFreeAddrSpaceCast(Src->getPointerAddressSpace(),
1262 Dst->getPointerAddressSpace()))
1263 return 0;
1264 break;
1265 }
1266
1267 auto *SrcVTy = dyn_cast<VectorType>(Src);
1268 auto *DstVTy = dyn_cast<VectorType>(Dst);
1269
1270 // If the cast is marked as legal (or promote) then assume low cost.
1271 if (SrcLT.first == DstLT.first &&
1272 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
1273 return SrcLT.first;
1274
1275 // Handle scalar conversions.
1276 if (!SrcVTy && !DstVTy) {
1277 // Just check the op cost. If the operation is legal then assume it costs
1278 // 1.
1279 if (!TLI->isOperationExpand(ISD, DstLT.second))
1280 return 1;
1281
1282 // Assume that illegal scalar instruction are expensive.
1283 return 4;
1284 }
1285
1286 // Check vector-to-vector casts.
1287 if (DstVTy && SrcVTy) {
1288 // If the cast is between same-sized registers, then the check is simple.
1289 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
1290
1291 // Assume that Zext is done using AND.
1292 if (Opcode == Instruction::ZExt)
1293 return SrcLT.first;
1294
1295 // Assume that sext is done using SHL and SRA.
1296 if (Opcode == Instruction::SExt)
1297 return SrcLT.first * 2;
1298
1299 // Just check the op cost. If the operation is legal then assume it
1300 // costs
1301 // 1 and multiply by the type-legalization overhead.
1302 if (!TLI->isOperationExpand(ISD, DstLT.second))
1303 return SrcLT.first * 1;
1304 }
1305
1306 // If we are legalizing by splitting, query the concrete TTI for the cost
1307 // of casting the original vector twice. We also need to factor in the
1308 // cost of the split itself. Count that as 1, to be consistent with
1309 // getTypeLegalizationCost().
1310 bool SplitSrc =
1311 TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
1313 bool SplitDst =
1314 TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
1316 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isKnownEven() &&
1317 DstVTy->getElementCount().isKnownEven()) {
1318 Type *SplitDstTy = VectorType::getHalfElementsVectorType(DstVTy);
1319 Type *SplitSrcTy = VectorType::getHalfElementsVectorType(SrcVTy);
1320 const T *TTI = thisT();
1321 // If both types need to be split then the split is free.
1322 InstructionCost SplitCost =
1323 (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
1324 return SplitCost +
1325 (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
1326 CostKind, I));
1327 }
1328
1329 // Scalarization cost is Invalid, can't assume any num elements.
1330 if (isa<ScalableVectorType>(DstVTy))
1332
1333 // In other cases where the source or destination are illegal, assume
1334 // the operation will get scalarized.
1335 unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
1336 InstructionCost Cost = thisT()->getCastInstrCost(
1337 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind, I);
1338
1339 // Return the cost of multiple scalar invocation plus the cost of
1340 // inserting and extracting the values.
1341 return getScalarizationOverhead(DstVTy, /*Insert*/ true, /*Extract*/ true,
1342 CostKind) +
1343 Num * Cost;
1344 }
1345
1346 // We already handled vector-to-vector and scalar-to-scalar conversions.
1347 // This
1348 // is where we handle bitcast between vectors and scalars. We need to assume
1349 // that the conversion is scalarized in one way or another.
1350 if (Opcode == Instruction::BitCast) {
1351 // Illegal bitcasts are done by storing and loading from a stack slot.
1352 return (SrcVTy ? getScalarizationOverhead(SrcVTy, /*Insert*/ false,
1353 /*Extract*/ true, CostKind)
1354 : 0) +
1355 (DstVTy ? getScalarizationOverhead(DstVTy, /*Insert*/ true,
1356 /*Extract*/ false, CostKind)
1357 : 0);
1358 }
1359
1360 llvm_unreachable("Unhandled cast");
1361 }
1362
1364 getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
1365 unsigned Index,
1366 TTI::TargetCostKind CostKind) const override {
1367 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1368 CostKind, Index, nullptr, nullptr) +
1369 thisT()->getCastInstrCost(Opcode, Dst, VecTy->getElementType(),
1371 }
1372
1375 const Instruction *I = nullptr) const override {
1376 return BaseT::getCFInstrCost(Opcode, CostKind, I);
1377 }
1378
1380 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
1384 const Instruction *I = nullptr) const override {
1385 const TargetLoweringBase *TLI = getTLI();
1386 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1387 assert(ISD && "Invalid opcode");
1388
1389 if (getTLI()->getValueType(DL, ValTy, true) == MVT::Other)
1390 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1391 Op1Info, Op2Info, I);
1392
1393 // Selects on vectors are actually vector selects.
1394 if (ISD == ISD::SELECT) {
1395 assert(CondTy && "CondTy must exist");
1396 if (CondTy->isVectorTy())
1397 ISD = ISD::VSELECT;
1398 }
1399 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1400
1401 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
1402 !TLI->isOperationExpand(ISD, LT.second)) {
1403 // The operation is legal. Assume it costs 1. Multiply
1404 // by the type-legalization overhead.
1405 return LT.first * 1;
1406 }
1407
1408 // Otherwise, assume that the cast is scalarized.
1409 // TODO: If one of the types get legalized by splitting, handle this
1410 // similarly to what getCastInstrCost() does.
1411 if (auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
1412 if (isa<ScalableVectorType>(ValTy))
1414
1415 unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
1416 InstructionCost Cost = thisT()->getCmpSelInstrCost(
1417 Opcode, ValVTy->getScalarType(), CondTy->getScalarType(), VecPred,
1418 CostKind, Op1Info, Op2Info, I);
1419
1420 // Return the cost of multiple scalar invocation plus the cost of
1421 // inserting and extracting the values.
1422 return getScalarizationOverhead(ValVTy, /*Insert*/ true,
1423 /*Extract*/ false, CostKind) +
1424 Num * Cost;
1425 }
1426
1427 // Unknown scalar opcode.
1428 return 1;
1429 }
1430
1433 unsigned Index, const Value *Op0,
1434 const Value *Op1) const override {
1435 return getRegUsageForType(Val->getScalarType());
1436 }
1437
1438 /// \param ScalarUserAndIdx encodes the information about extracts from a
1439 /// vector with 'Scalar' being the value being extracted,'User' being the user
1440 /// of the extract(nullptr if user is not known before vectorization) and
1441 /// 'Idx' being the extract lane.
1444 unsigned Index, Value *Scalar,
1445 ArrayRef<std::tuple<Value *, User *, int>>
1446 ScalarUserAndIdx) const override {
1447 return thisT()->getVectorInstrCost(Opcode, Val, CostKind, Index, nullptr,
1448 nullptr);
1449 }
1450
1453 unsigned Index) const override {
1454 Value *Op0 = nullptr;
1455 Value *Op1 = nullptr;
1456 if (auto *IE = dyn_cast<InsertElementInst>(&I)) {
1457 Op0 = IE->getOperand(0);
1458 Op1 = IE->getOperand(1);
1459 }
1460 return thisT()->getVectorInstrCost(I.getOpcode(), Val, CostKind, Index, Op0,
1461 Op1);
1462 }
1463
1467 unsigned Index) const override {
1468 unsigned NewIndex = -1;
1469 if (auto *FVTy = dyn_cast<FixedVectorType>(Val)) {
1470 assert(Index < FVTy->getNumElements() &&
1471 "Unexpected index from end of vector");
1472 NewIndex = FVTy->getNumElements() - 1 - Index;
1473 }
1474 return thisT()->getVectorInstrCost(Opcode, Val, CostKind, NewIndex, nullptr,
1475 nullptr);
1476 }
1477
1479 getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF,
1480 const APInt &DemandedDstElts,
1481 TTI::TargetCostKind CostKind) const override {
1482 assert(DemandedDstElts.getBitWidth() == (unsigned)VF * ReplicationFactor &&
1483 "Unexpected size of DemandedDstElts.");
1484
1486
1487 auto *SrcVT = FixedVectorType::get(EltTy, VF);
1488 auto *ReplicatedVT = FixedVectorType::get(EltTy, VF * ReplicationFactor);
1489
1490 // The Mask shuffling cost is extract all the elements of the Mask
1491 // and insert each of them Factor times into the wide vector:
1492 //
1493 // E.g. an interleaved group with factor 3:
1494 // %mask = icmp ult <8 x i32> %vec1, %vec2
1495 // %interleaved.mask = shufflevector <8 x i1> %mask, <8 x i1> undef,
1496 // <24 x i32> <0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7>
1497 // The cost is estimated as extract all mask elements from the <8xi1> mask
1498 // vector and insert them factor times into the <24xi1> shuffled mask
1499 // vector.
1500 APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedDstElts, VF);
1501 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
1502 /*Insert*/ false,
1503 /*Extract*/ true, CostKind);
1504 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
1505 /*Insert*/ true,
1506 /*Extract*/ false, CostKind);
1507
1508 return Cost;
1509 }
1510
1512 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
1515 const Instruction *I = nullptr) const override {
1516 assert(!Src->isVoidTy() && "Invalid type");
1517 // Assume types, such as structs, are expensive.
1518 if (getTLI()->getValueType(DL, Src, true) == MVT::Other)
1519 return 4;
1520 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
1521
1522 // Assuming that all loads of legal types cost 1.
1523 InstructionCost Cost = LT.first;
1525 return Cost;
1526
1527 const DataLayout &DL = this->getDataLayout();
1528 if (Src->isVectorTy() &&
1529 // In practice it's not currently possible to have a change in lane
1530 // length for extending loads or truncating stores so both types should
1531 // have the same scalable property.
1532 TypeSize::isKnownLT(DL.getTypeStoreSizeInBits(Src),
1533 LT.second.getSizeInBits())) {
1534 // This is a vector load that legalizes to a larger type than the vector
1535 // itself. Unless the corresponding extending load or truncating store is
1536 // legal, then this will scalarize.
1538 EVT MemVT = getTLI()->getValueType(DL, Src);
1539 if (Opcode == Instruction::Store)
1540 LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
1541 else
1542 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
1543
1544 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
1545 // This is a vector load/store for some illegal type that is scalarized.
1546 // We must account for the cost of building or decomposing the vector.
1548 cast<VectorType>(Src), Opcode != Instruction::Store,
1549 Opcode == Instruction::Store, CostKind);
1550 }
1551 }
1552
1553 return Cost;
1554 }
1555
1557 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1558 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1559 bool UseMaskForCond = false, bool UseMaskForGaps = false) const override {
1560
1561 // We cannot scalarize scalable vectors, so return Invalid.
1562 if (isa<ScalableVectorType>(VecTy))
1564
1565 auto *VT = cast<FixedVectorType>(VecTy);
1566
1567 unsigned NumElts = VT->getNumElements();
1568 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
1569
1570 unsigned NumSubElts = NumElts / Factor;
1571 auto *SubVT = FixedVectorType::get(VT->getElementType(), NumSubElts);
1572
1573 // Firstly, the cost of load/store operation.
1575 if (UseMaskForCond || UseMaskForGaps) {
1576 unsigned IID = Opcode == Instruction::Load ? Intrinsic::masked_load
1577 : Intrinsic::masked_store;
1578 Cost = thisT()->getMemIntrinsicInstrCost(
1579 MemIntrinsicCostAttributes(IID, VecTy, Alignment, AddressSpace),
1580 CostKind);
1581 } else
1582 Cost = thisT()->getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace,
1583 CostKind);
1584
1585 // Legalize the vector type, and get the legalized and unlegalized type
1586 // sizes.
1587 MVT VecTyLT = getTypeLegalizationCost(VecTy).second;
1588 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1589 unsigned VecTyLTSize = VecTyLT.getStoreSize();
1590
1591 // Scale the cost of the memory operation by the fraction of legalized
1592 // instructions that will actually be used. We shouldn't account for the
1593 // cost of dead instructions since they will be removed.
1594 //
1595 // E.g., An interleaved load of factor 8:
1596 // %vec = load <16 x i64>, <16 x i64>* %ptr
1597 // %v0 = shufflevector %vec, undef, <0, 8>
1598 //
1599 // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
1600 // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
1601 // type). The other loads are unused.
1602 //
1603 // TODO: Note that legalization can turn masked loads/stores into unmasked
1604 // (legalized) loads/stores. This can be reflected in the cost.
1605 if (Cost.isValid() && VecTySize > VecTyLTSize) {
1606 // The number of loads of a legal type it will take to represent a load
1607 // of the unlegalized vector type.
1608 unsigned NumLegalInsts = divideCeil(VecTySize, VecTyLTSize);
1609
1610 // The number of elements of the unlegalized type that correspond to a
1611 // single legal instruction.
1612 unsigned NumEltsPerLegalInst = divideCeil(NumElts, NumLegalInsts);
1613
1614 // Determine which legal instructions will be used.
1615 BitVector UsedInsts(NumLegalInsts, false);
1616 for (unsigned Index : Indices)
1617 for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1618 UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);
1619
1620 // Scale the cost of the load by the fraction of legal instructions that
1621 // will be used.
1622 Cost = divideCeil(UsedInsts.count() * Cost.getValue(), NumLegalInsts);
1623 }
1624
1625 // Then plus the cost of interleave operation.
1626 assert(Indices.size() <= Factor &&
1627 "Interleaved memory op has too many members");
1628
1629 const APInt DemandedAllSubElts = APInt::getAllOnes(NumSubElts);
1630 const APInt DemandedAllResultElts = APInt::getAllOnes(NumElts);
1631
1632 APInt DemandedLoadStoreElts = APInt::getZero(NumElts);
1633 for (unsigned Index : Indices) {
1634 assert(Index < Factor && "Invalid index for interleaved memory op");
1635 for (unsigned Elm = 0; Elm < NumSubElts; Elm++)
1636 DemandedLoadStoreElts.setBit(Index + Elm * Factor);
1637 }
1638
1639 if (Opcode == Instruction::Load) {
1640 // The interleave cost is similar to extract sub vectors' elements
1641 // from the wide vector, and insert them into sub vectors.
1642 //
1643 // E.g. An interleaved load of factor 2 (with one member of index 0):
1644 // %vec = load <8 x i32>, <8 x i32>* %ptr
1645 // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
1646 // The cost is estimated as extract elements at 0, 2, 4, 6 from the
1647 // <8 x i32> vector and insert them into a <4 x i32> vector.
1648 InstructionCost InsSubCost = thisT()->getScalarizationOverhead(
1649 SubVT, DemandedAllSubElts,
1650 /*Insert*/ true, /*Extract*/ false, CostKind);
1651 Cost += Indices.size() * InsSubCost;
1652 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1653 /*Insert*/ false,
1654 /*Extract*/ true, CostKind);
1655 } else {
1656 // The interleave cost is extract elements from sub vectors, and
1657 // insert them into the wide vector.
1658 //
1659 // E.g. An interleaved store of factor 3 with 2 members at indices 0,1:
1660 // (using VF=4):
1661 // %v0_v1 = shuffle %v0, %v1, <0,4,undef,1,5,undef,2,6,undef,3,7,undef>
1662 // %gaps.mask = <true, true, false, true, true, false,
1663 // true, true, false, true, true, false>
1664 // call llvm.masked.store <12 x i32> %v0_v1, <12 x i32>* %ptr,
1665 // i32 Align, <12 x i1> %gaps.mask
1666 // The cost is estimated as extract all elements (of actual members,
1667 // excluding gaps) from both <4 x i32> vectors and insert into the <12 x
1668 // i32> vector.
1669 InstructionCost ExtSubCost = thisT()->getScalarizationOverhead(
1670 SubVT, DemandedAllSubElts,
1671 /*Insert*/ false, /*Extract*/ true, CostKind);
1672 Cost += ExtSubCost * Indices.size();
1673 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1674 /*Insert*/ true,
1675 /*Extract*/ false, CostKind);
1676 }
1677
1678 if (!UseMaskForCond)
1679 return Cost;
1680
1681 Type *I8Type = Type::getInt8Ty(VT->getContext());
1682
1683 Cost += thisT()->getReplicationShuffleCost(
1684 I8Type, Factor, NumSubElts,
1685 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
1686 CostKind);
1687
1688 // The Gaps mask is invariant and created outside the loop, therefore the
1689 // cost of creating it is not accounted for here. However if we have both
1690 // a MaskForGaps and some other mask that guards the execution of the
1691 // memory access, we need to account for the cost of And-ing the two masks
1692 // inside the loop.
1693 if (UseMaskForGaps) {
1694 auto *MaskVT = FixedVectorType::get(I8Type, NumElts);
1695 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1696 CostKind);
1697 }
1698
1699 return Cost;
1700 }
1701
1702 /// Get intrinsic cost based on arguments.
1705 TTI::TargetCostKind CostKind) const override {
1706 // Check for generically free intrinsics.
1708 return 0;
1709
1710 // Assume that target intrinsics are cheap.
1711 Intrinsic::ID IID = ICA.getID();
1714
1715 // VP Intrinsics should have the same cost as their non-vp counterpart.
1716 // TODO: Adjust the cost to make the vp intrinsic cheaper than its non-vp
1717 // counterpart when the vector length argument is smaller than the maximum
1718 // vector length.
1719 // TODO: Support other kinds of VPIntrinsics
1720 if (VPIntrinsic::isVPIntrinsic(ICA.getID())) {
1721 std::optional<unsigned> FOp =
1723 if (FOp) {
1724 if (ICA.getID() == Intrinsic::vp_load) {
1725 Align Alignment;
1726 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1727 Alignment = VPI->getPointerAlignment().valueOrOne();
1728 unsigned AS = 0;
1729 if (ICA.getArgTypes().size() > 1)
1730 if (auto *PtrTy = dyn_cast<PointerType>(ICA.getArgTypes()[0]))
1731 AS = PtrTy->getAddressSpace();
1732 return thisT()->getMemoryOpCost(*FOp, ICA.getReturnType(), Alignment,
1733 AS, CostKind);
1734 }
1735 if (ICA.getID() == Intrinsic::vp_store) {
1736 Align Alignment;
1737 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1738 Alignment = VPI->getPointerAlignment().valueOrOne();
1739 unsigned AS = 0;
1740 if (ICA.getArgTypes().size() >= 2)
1741 if (auto *PtrTy = dyn_cast<PointerType>(ICA.getArgTypes()[1]))
1742 AS = PtrTy->getAddressSpace();
1743 return thisT()->getMemoryOpCost(*FOp, ICA.getArgTypes()[0], Alignment,
1744 AS, CostKind);
1745 }
1747 ICA.getID() == Intrinsic::vp_fneg) {
1748 return thisT()->getArithmeticInstrCost(*FOp, ICA.getReturnType(),
1749 CostKind);
1750 }
1751 if (VPCastIntrinsic::isVPCast(ICA.getID())) {
1752 return thisT()->getCastInstrCost(
1753 *FOp, ICA.getReturnType(), ICA.getArgTypes()[0],
1755 }
1756 if (VPCmpIntrinsic::isVPCmp(ICA.getID())) {
1757 // We can only handle vp_cmp intrinsics with underlying instructions.
1758 if (ICA.getInst()) {
1759 assert(FOp);
1760 auto *UI = cast<VPCmpIntrinsic>(ICA.getInst());
1761 return thisT()->getCmpSelInstrCost(*FOp, ICA.getArgTypes()[0],
1762 ICA.getReturnType(),
1763 UI->getPredicate(), CostKind);
1764 }
1765 }
1766 }
1767 if (ICA.getID() == Intrinsic::vp_load_ff) {
1768 Type *RetTy = ICA.getReturnType();
1769 Type *DataTy = cast<StructType>(RetTy)->getElementType(0);
1770 Align Alignment;
1771 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1772 Alignment = VPI->getPointerAlignment().valueOrOne();
1773 return thisT()->getMemIntrinsicInstrCost(
1774 MemIntrinsicCostAttributes(ICA.getID(), DataTy, Alignment),
1775 CostKind);
1776 }
1777 if (ICA.getID() == Intrinsic::vp_scatter) {
1778 if (ICA.isTypeBasedOnly()) {
1779 IntrinsicCostAttributes MaskedScatter(
1782 ICA.getFlags());
1783 return getTypeBasedIntrinsicInstrCost(MaskedScatter, CostKind);
1784 }
1785 Align Alignment;
1786 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1787 Alignment = VPI->getPointerAlignment().valueOrOne();
1788 bool VarMask = isa<Constant>(ICA.getArgs()[2]);
1789 return thisT()->getMemIntrinsicInstrCost(
1790 MemIntrinsicCostAttributes(Intrinsic::vp_scatter,
1791 ICA.getArgTypes()[0], ICA.getArgs()[1],
1792 VarMask, Alignment, nullptr),
1793 CostKind);
1794 }
1795 if (ICA.getID() == Intrinsic::vp_gather) {
1796 if (ICA.isTypeBasedOnly()) {
1797 IntrinsicCostAttributes MaskedGather(
1800 ICA.getFlags());
1801 return getTypeBasedIntrinsicInstrCost(MaskedGather, CostKind);
1802 }
1803 Align Alignment;
1804 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1805 Alignment = VPI->getPointerAlignment().valueOrOne();
1806 bool VarMask = isa<Constant>(ICA.getArgs()[1]);
1807 return thisT()->getMemIntrinsicInstrCost(
1808 MemIntrinsicCostAttributes(Intrinsic::vp_gather,
1809 ICA.getReturnType(), ICA.getArgs()[0],
1810 VarMask, Alignment, nullptr),
1811 CostKind);
1812 }
1813
1814 if (ICA.getID() == Intrinsic::vp_select ||
1815 ICA.getID() == Intrinsic::vp_merge) {
1816 TTI::OperandValueInfo OpInfoX, OpInfoY;
1817 if (!ICA.isTypeBasedOnly()) {
1818 OpInfoX = TTI::getOperandInfo(ICA.getArgs()[0]);
1819 OpInfoY = TTI::getOperandInfo(ICA.getArgs()[1]);
1820 }
1821 return getCmpSelInstrCost(
1822 Instruction::Select, ICA.getReturnType(), ICA.getArgTypes()[0],
1823 CmpInst::BAD_ICMP_PREDICATE, CostKind, OpInfoX, OpInfoY);
1824 }
1825
1826 std::optional<Intrinsic::ID> FID =
1828
1829 // Not functionally equivalent but close enough for cost modelling.
1830 if (ICA.getID() == Intrinsic::experimental_vp_reverse)
1831 FID = Intrinsic::vector_reverse;
1832
1833 if (FID) {
1834 // Non-vp version will have same arg types except mask and vector
1835 // length.
1836 assert(ICA.getArgTypes().size() >= 2 &&
1837 "Expected VPIntrinsic to have Mask and Vector Length args and "
1838 "types");
1839
1840 ArrayRef<const Value *> NewArgs = ArrayRef(ICA.getArgs());
1841 if (!ICA.isTypeBasedOnly())
1842 NewArgs = NewArgs.drop_back(2);
1844
1845 // VPReduction intrinsics have a start value argument that their non-vp
1846 // counterparts do not have, except for the fadd and fmul non-vp
1847 // counterpart.
1849 *FID != Intrinsic::vector_reduce_fadd &&
1850 *FID != Intrinsic::vector_reduce_fmul) {
1851 if (!ICA.isTypeBasedOnly())
1852 NewArgs = NewArgs.drop_front();
1853 NewTys = NewTys.drop_front();
1854 }
1855
1856 IntrinsicCostAttributes NewICA(*FID, ICA.getReturnType(), NewArgs,
1857 NewTys, ICA.getFlags());
1858 return thisT()->getIntrinsicInstrCost(NewICA, CostKind);
1859 }
1860 }
1861
1862 if (ICA.isTypeBasedOnly())
1864
1865 Type *RetTy = ICA.getReturnType();
1866
1867 ElementCount RetVF = isVectorizedTy(RetTy) ? getVectorizedTypeVF(RetTy)
1869
1870 const IntrinsicInst *I = ICA.getInst();
1871 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
1872 FastMathFlags FMF = ICA.getFlags();
1873 switch (IID) {
1874 default:
1875 break;
1876
1877 case Intrinsic::powi:
1878 if (auto *RHSC = dyn_cast<ConstantInt>(Args[1])) {
1879 bool ShouldOptForSize = I->getParent()->getParent()->hasOptSize();
1880 if (getTLI()->isBeneficialToExpandPowI(RHSC->getSExtValue(),
1881 ShouldOptForSize)) {
1882 // The cost is modeled on the expansion performed by ExpandPowI in
1883 // SelectionDAGBuilder.
1884 APInt Exponent = RHSC->getValue().abs();
1885 unsigned ActiveBits = Exponent.getActiveBits();
1886 unsigned PopCount = Exponent.popcount();
1887 InstructionCost Cost = (ActiveBits + PopCount - 2) *
1888 thisT()->getArithmeticInstrCost(
1889 Instruction::FMul, RetTy, CostKind);
1890 if (RHSC->isNegative())
1891 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv, RetTy,
1892 CostKind);
1893 return Cost;
1894 }
1895 }
1896 break;
1897 case Intrinsic::cttz:
1898 // FIXME: If necessary, this should go in target-specific overrides.
1899 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCttz(RetTy))
1901 break;
1902
1903 case Intrinsic::ctlz:
1904 // FIXME: If necessary, this should go in target-specific overrides.
1905 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCtlz(RetTy))
1907 break;
1908
1909 case Intrinsic::memcpy:
1910 return thisT()->getMemcpyCost(ICA.getInst());
1911
1912 case Intrinsic::masked_scatter: {
1913 const Value *Mask = Args[2];
1914 bool VarMask = !isa<Constant>(Mask);
1915 Align Alignment = I->getParamAlign(1).valueOrOne();
1916 return thisT()->getMemIntrinsicInstrCost(
1917 MemIntrinsicCostAttributes(Intrinsic::masked_scatter,
1918 ICA.getArgTypes()[0], Args[1], VarMask,
1919 Alignment, I),
1920 CostKind);
1921 }
1922 case Intrinsic::masked_gather: {
1923 const Value *Mask = Args[1];
1924 bool VarMask = !isa<Constant>(Mask);
1925 Align Alignment = I->getParamAlign(0).valueOrOne();
1926 return thisT()->getMemIntrinsicInstrCost(
1927 MemIntrinsicCostAttributes(Intrinsic::masked_gather, RetTy, Args[0],
1928 VarMask, Alignment, I),
1929 CostKind);
1930 }
1931 case Intrinsic::masked_compressstore: {
1932 const Value *Data = Args[0];
1933 const Value *Mask = Args[2];
1934 Align Alignment = I->getParamAlign(1).valueOrOne();
1935 return thisT()->getMemIntrinsicInstrCost(
1936 MemIntrinsicCostAttributes(IID, Data->getType(), !isa<Constant>(Mask),
1937 Alignment, I),
1938 CostKind);
1939 }
1940 case Intrinsic::masked_expandload: {
1941 const Value *Mask = Args[1];
1942 Align Alignment = I->getParamAlign(0).valueOrOne();
1943 return thisT()->getMemIntrinsicInstrCost(
1944 MemIntrinsicCostAttributes(IID, RetTy, !isa<Constant>(Mask),
1945 Alignment, I),
1946 CostKind);
1947 }
1948 case Intrinsic::experimental_vp_strided_store: {
1949 const Value *Data = Args[0];
1950 const Value *Ptr = Args[1];
1951 const Value *Mask = Args[3];
1952 const Value *EVL = Args[4];
1953 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1954 Type *EltTy = cast<VectorType>(Data->getType())->getElementType();
1955 Align Alignment =
1956 I->getParamAlign(1).value_or(thisT()->DL.getABITypeAlign(EltTy));
1957 return thisT()->getMemIntrinsicInstrCost(
1958 MemIntrinsicCostAttributes(IID, Data->getType(), Ptr, VarMask,
1959 Alignment, I),
1960 CostKind);
1961 }
1962 case Intrinsic::experimental_vp_strided_load: {
1963 const Value *Ptr = Args[0];
1964 const Value *Mask = Args[2];
1965 const Value *EVL = Args[3];
1966 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1967 Type *EltTy = cast<VectorType>(RetTy)->getElementType();
1968 Align Alignment =
1969 I->getParamAlign(0).value_or(thisT()->DL.getABITypeAlign(EltTy));
1970 return thisT()->getMemIntrinsicInstrCost(
1971 MemIntrinsicCostAttributes(IID, RetTy, Ptr, VarMask, Alignment, I),
1972 CostKind);
1973 }
1974 case Intrinsic::stepvector: {
1975 if (isa<ScalableVectorType>(RetTy))
1977 // The cost of materialising a constant integer vector.
1979 }
1980 case Intrinsic::vector_extract: {
1981 // FIXME: Handle case where a scalable vector is extracted from a scalable
1982 // vector
1983 if (isa<ScalableVectorType>(RetTy))
1985 unsigned Index = cast<ConstantInt>(Args[1])->getZExtValue();
1986 return thisT()->getShuffleCost(TTI::SK_ExtractSubvector,
1987 cast<VectorType>(RetTy),
1988 cast<VectorType>(Args[0]->getType()), {},
1989 CostKind, Index, cast<VectorType>(RetTy));
1990 }
1991 case Intrinsic::vector_insert: {
1992 // FIXME: Handle case where a scalable vector is inserted into a scalable
1993 // vector
1994 if (isa<ScalableVectorType>(Args[1]->getType()))
1996 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
1997 return thisT()->getShuffleCost(
1999 cast<VectorType>(Args[0]->getType()), {}, CostKind, Index,
2000 cast<VectorType>(Args[1]->getType()));
2001 }
2002 case Intrinsic::vector_splice: {
2003 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
2004 return thisT()->getShuffleCost(TTI::SK_Splice, cast<VectorType>(RetTy),
2005 cast<VectorType>(Args[0]->getType()), {},
2006 CostKind, Index, cast<VectorType>(RetTy));
2007 }
2008 case Intrinsic::vector_reduce_add:
2009 case Intrinsic::vector_reduce_mul:
2010 case Intrinsic::vector_reduce_and:
2011 case Intrinsic::vector_reduce_or:
2012 case Intrinsic::vector_reduce_xor:
2013 case Intrinsic::vector_reduce_smax:
2014 case Intrinsic::vector_reduce_smin:
2015 case Intrinsic::vector_reduce_fmax:
2016 case Intrinsic::vector_reduce_fmin:
2017 case Intrinsic::vector_reduce_fmaximum:
2018 case Intrinsic::vector_reduce_fminimum:
2019 case Intrinsic::vector_reduce_umax:
2020 case Intrinsic::vector_reduce_umin: {
2021 IntrinsicCostAttributes Attrs(IID, RetTy, Args[0]->getType(), FMF, I, 1);
2023 }
2024 case Intrinsic::vector_reduce_fadd:
2025 case Intrinsic::vector_reduce_fmul: {
2027 IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF, I, 1);
2029 }
2030 case Intrinsic::fshl:
2031 case Intrinsic::fshr: {
2032 const Value *X = Args[0];
2033 const Value *Y = Args[1];
2034 const Value *Z = Args[2];
2037 const TTI::OperandValueInfo OpInfoZ = TTI::getOperandInfo(Z);
2038
2039 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
2040 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
2042 Cost +=
2043 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
2044 Cost +=
2045 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);
2046 Cost += thisT()->getArithmeticInstrCost(
2047 BinaryOperator::Shl, RetTy, CostKind, OpInfoX,
2048 {OpInfoZ.Kind, TTI::OP_None});
2049 Cost += thisT()->getArithmeticInstrCost(
2050 BinaryOperator::LShr, RetTy, CostKind, OpInfoY,
2051 {OpInfoZ.Kind, TTI::OP_None});
2052 // Non-constant shift amounts requires a modulo. If the typesize is a
2053 // power-2 then this will be converted to an and, otherwise it will use a
2054 // urem.
2055 if (!OpInfoZ.isConstant())
2056 Cost += thisT()->getArithmeticInstrCost(
2057 isPowerOf2_32(RetTy->getScalarSizeInBits()) ? BinaryOperator::And
2058 : BinaryOperator::URem,
2059 RetTy, CostKind, OpInfoZ,
2060 {TTI::OK_UniformConstantValue, TTI::OP_None});
2061 // For non-rotates (X != Y) we must add shift-by-zero handling costs.
2062 if (X != Y) {
2063 Type *CondTy = RetTy->getWithNewBitWidth(1);
2064 Cost +=
2065 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2067 Cost +=
2068 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2070 }
2071 return Cost;
2072 }
2073 case Intrinsic::experimental_cttz_elts: {
2074 EVT ArgType = getTLI()->getValueType(DL, ICA.getArgTypes()[0], true);
2075
2076 // If we're not expanding the intrinsic then we assume this is cheap
2077 // to implement.
2078 if (!getTLI()->shouldExpandCttzElements(ArgType))
2079 return getTypeLegalizationCost(RetTy).first;
2080
2081 // TODO: The costs below reflect the expansion code in
2082 // SelectionDAGBuilder, but we may want to sacrifice some accuracy in
2083 // favour of compile time.
2084
2085 // Find the smallest "sensible" element type to use for the expansion.
2086 bool ZeroIsPoison = !cast<ConstantInt>(Args[1])->isZero();
2087 ConstantRange VScaleRange(APInt(64, 1), APInt::getZero(64));
2088 if (isa<ScalableVectorType>(ICA.getArgTypes()[0]) && I && I->getCaller())
2089 VScaleRange = getVScaleRange(I->getCaller(), 64);
2090
2091 unsigned EltWidth = getTLI()->getBitWidthForCttzElements(
2092 RetTy, ArgType.getVectorElementCount(), ZeroIsPoison, &VScaleRange);
2093 Type *NewEltTy = IntegerType::getIntNTy(RetTy->getContext(), EltWidth);
2094
2095 // Create the new vector type & get the vector length
2096 Type *NewVecTy = VectorType::get(
2097 NewEltTy, cast<VectorType>(Args[0]->getType())->getElementCount());
2098
2099 IntrinsicCostAttributes StepVecAttrs(Intrinsic::stepvector, NewVecTy, {},
2100 FMF);
2102 thisT()->getIntrinsicInstrCost(StepVecAttrs, CostKind);
2103
2104 Cost +=
2105 thisT()->getArithmeticInstrCost(Instruction::Sub, NewVecTy, CostKind);
2106 Cost += thisT()->getCastInstrCost(Instruction::SExt, NewVecTy,
2107 Args[0]->getType(),
2109 Cost +=
2110 thisT()->getArithmeticInstrCost(Instruction::And, NewVecTy, CostKind);
2111
2112 IntrinsicCostAttributes ReducAttrs(Intrinsic::vector_reduce_umax,
2113 NewEltTy, NewVecTy, FMF, I, 1);
2114 Cost += thisT()->getTypeBasedIntrinsicInstrCost(ReducAttrs, CostKind);
2115 Cost +=
2116 thisT()->getArithmeticInstrCost(Instruction::Sub, NewEltTy, CostKind);
2117
2118 return Cost;
2119 }
2120 case Intrinsic::get_active_lane_mask:
2121 case Intrinsic::experimental_vector_match:
2122 case Intrinsic::experimental_vector_histogram_add:
2123 case Intrinsic::experimental_vector_histogram_uadd_sat:
2124 case Intrinsic::experimental_vector_histogram_umax:
2125 case Intrinsic::experimental_vector_histogram_umin:
2126 return thisT()->getTypeBasedIntrinsicInstrCost(ICA, CostKind);
2127 case Intrinsic::modf:
2128 case Intrinsic::sincos:
2129 case Intrinsic::sincospi: {
2130 std::optional<unsigned> CallRetElementIndex;
2131 // The first element of the modf result is returned by value in the
2132 // libcall.
2133 if (ICA.getID() == Intrinsic::modf)
2134 CallRetElementIndex = 0;
2135
2136 if (auto Cost = getMultipleResultIntrinsicVectorLibCallCost(
2137 ICA, CostKind, CallRetElementIndex))
2138 return *Cost;
2139 // Otherwise, fallback to default scalarization cost.
2140 break;
2141 }
2142 case Intrinsic::loop_dependence_war_mask:
2143 case Intrinsic::loop_dependence_raw_mask: {
2144 // Compute the cost of the expanded version of these intrinsics:
2145 //
2146 // The possible expansions are...
2147 //
2148 // loop_dependence_war_mask:
2149 // diff = (ptrB - ptrA) / eltSize
2150 // cmp = icmp sle diff, 0
2151 // upper_bound = select cmp, -1, diff
2152 // mask = get_active_lane_mask 0, upper_bound
2153 //
2154 // loop_dependence_raw_mask:
2155 // diff = (abs(ptrB - ptrA)) / eltSize
2156 // cmp = icmp eq diff, 0
2157 // upper_bound = select cmp, -1, diff
2158 // mask = get_active_lane_mask 0, upper_bound
2159 //
2160 auto *PtrTy = cast<PointerType>(ICA.getArgTypes()[0]);
2161 Type *IntPtrTy = IntegerType::getIntNTy(
2162 RetTy->getContext(), thisT()->getDataLayout().getPointerSizeInBits(
2163 PtrTy->getAddressSpace()));
2164 bool IsReadAfterWrite = IID == Intrinsic::loop_dependence_raw_mask;
2165
2167 thisT()->getArithmeticInstrCost(Instruction::Sub, IntPtrTy, CostKind);
2168 if (IsReadAfterWrite) {
2169 IntrinsicCostAttributes AbsAttrs(Intrinsic::abs, IntPtrTy, {IntPtrTy},
2170 {});
2171 Cost += thisT()->getIntrinsicInstrCost(AbsAttrs, CostKind);
2172 }
2173
2174 TTI::OperandValueInfo EltSizeOpInfo =
2175 TTI::getOperandInfo(ICA.getArgs()[2]);
2176 Cost += thisT()->getArithmeticInstrCost(Instruction::SDiv, IntPtrTy,
2177 CostKind, {}, EltSizeOpInfo);
2178
2179 Type *CondTy = IntegerType::getInt1Ty(RetTy->getContext());
2180 CmpInst::Predicate Pred =
2181 IsReadAfterWrite ? CmpInst::ICMP_EQ : CmpInst::ICMP_SLE;
2182 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CondTy,
2183 IntPtrTy, Pred, CostKind);
2184 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, IntPtrTy,
2185 CondTy, Pred, CostKind);
2186
2187 IntrinsicCostAttributes Attrs(Intrinsic::get_active_lane_mask, RetTy,
2188 {IntPtrTy, IntPtrTy}, FMF);
2189 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2190 return Cost;
2191 }
2192 }
2193
2194 // Assume that we need to scalarize this intrinsic.)
2195 // Compute the scalarization overhead based on Args for a vector
2196 // intrinsic.
2197 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
2198 if (RetVF.isVector() && !RetVF.isScalable()) {
2199 ScalarizationCost = 0;
2200 if (!RetTy->isVoidTy()) {
2201 for (Type *VectorTy : getContainedTypes(RetTy)) {
2202 ScalarizationCost += getScalarizationOverhead(
2203 cast<VectorType>(VectorTy),
2204 /*Insert=*/true, /*Extract=*/false, CostKind);
2205 }
2206 }
2207 ScalarizationCost += getOperandsScalarizationOverhead(
2208 filterConstantAndDuplicatedOperands(Args, ICA.getArgTypes()),
2209 CostKind);
2210 }
2211
2212 IntrinsicCostAttributes Attrs(IID, RetTy, ICA.getArgTypes(), FMF, I,
2213 ScalarizationCost);
2214 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
2215 }
2216
2217 /// Get intrinsic cost based on argument types.
2218 /// If ScalarizationCostPassed is std::numeric_limits<unsigned>::max(), the
2219 /// cost of scalarizing the arguments and the return value will be computed
2220 /// based on types.
2224 Intrinsic::ID IID = ICA.getID();
2225 Type *RetTy = ICA.getReturnType();
2226 const SmallVectorImpl<Type *> &Tys = ICA.getArgTypes();
2227 FastMathFlags FMF = ICA.getFlags();
2228 InstructionCost ScalarizationCostPassed = ICA.getScalarizationCost();
2229 bool SkipScalarizationCost = ICA.skipScalarizationCost();
2230
2231 VectorType *VecOpTy = nullptr;
2232 if (!Tys.empty()) {
2233 // The vector reduction operand is operand 0 except for fadd/fmul.
2234 // Their operand 0 is a scalar start value, so the vector op is operand 1.
2235 unsigned VecTyIndex = 0;
2236 if (IID == Intrinsic::vector_reduce_fadd ||
2237 IID == Intrinsic::vector_reduce_fmul)
2238 VecTyIndex = 1;
2239 assert(Tys.size() > VecTyIndex && "Unexpected IntrinsicCostAttributes");
2240 VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
2241 }
2242
2243 // Library call cost - other than size, make it expensive.
2244 unsigned SingleCallCost = CostKind == TTI::TCK_CodeSize ? 1 : 10;
2245 unsigned ISD = 0;
2246 switch (IID) {
2247 default: {
2248 // Scalable vectors cannot be scalarized, so return Invalid.
2249 if (isa<ScalableVectorType>(RetTy) || any_of(Tys, [](const Type *Ty) {
2250 return isa<ScalableVectorType>(Ty);
2251 }))
2253
2254 // Assume that we need to scalarize this intrinsic.
2255 InstructionCost ScalarizationCost =
2256 SkipScalarizationCost ? ScalarizationCostPassed : 0;
2257 unsigned ScalarCalls = 1;
2258 Type *ScalarRetTy = RetTy;
2259 if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
2260 if (!SkipScalarizationCost)
2261 ScalarizationCost = getScalarizationOverhead(
2262 RetVTy, /*Insert*/ true, /*Extract*/ false, CostKind);
2263 ScalarCalls = std::max(ScalarCalls,
2265 ScalarRetTy = RetTy->getScalarType();
2266 }
2267 SmallVector<Type *, 4> ScalarTys;
2268 for (Type *Ty : Tys) {
2269 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
2270 if (!SkipScalarizationCost)
2271 ScalarizationCost += getScalarizationOverhead(
2272 VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
2273 ScalarCalls = std::max(ScalarCalls,
2275 Ty = Ty->getScalarType();
2276 }
2277 ScalarTys.push_back(Ty);
2278 }
2279 if (ScalarCalls == 1)
2280 return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
2281
2282 IntrinsicCostAttributes ScalarAttrs(IID, ScalarRetTy, ScalarTys, FMF);
2283 InstructionCost ScalarCost =
2284 thisT()->getIntrinsicInstrCost(ScalarAttrs, CostKind);
2285
2286 return ScalarCalls * ScalarCost + ScalarizationCost;
2287 }
2288 // Look for intrinsics that can be lowered directly or turned into a scalar
2289 // intrinsic call.
2290 case Intrinsic::sqrt:
2291 ISD = ISD::FSQRT;
2292 break;
2293 case Intrinsic::sin:
2294 ISD = ISD::FSIN;
2295 break;
2296 case Intrinsic::cos:
2297 ISD = ISD::FCOS;
2298 break;
2299 case Intrinsic::sincos:
2300 ISD = ISD::FSINCOS;
2301 break;
2302 case Intrinsic::sincospi:
2304 break;
2305 case Intrinsic::modf:
2306 ISD = ISD::FMODF;
2307 break;
2308 case Intrinsic::tan:
2309 ISD = ISD::FTAN;
2310 break;
2311 case Intrinsic::asin:
2312 ISD = ISD::FASIN;
2313 break;
2314 case Intrinsic::acos:
2315 ISD = ISD::FACOS;
2316 break;
2317 case Intrinsic::atan:
2318 ISD = ISD::FATAN;
2319 break;
2320 case Intrinsic::atan2:
2321 ISD = ISD::FATAN2;
2322 break;
2323 case Intrinsic::sinh:
2324 ISD = ISD::FSINH;
2325 break;
2326 case Intrinsic::cosh:
2327 ISD = ISD::FCOSH;
2328 break;
2329 case Intrinsic::tanh:
2330 ISD = ISD::FTANH;
2331 break;
2332 case Intrinsic::exp:
2333 ISD = ISD::FEXP;
2334 break;
2335 case Intrinsic::exp2:
2336 ISD = ISD::FEXP2;
2337 break;
2338 case Intrinsic::exp10:
2339 ISD = ISD::FEXP10;
2340 break;
2341 case Intrinsic::log:
2342 ISD = ISD::FLOG;
2343 break;
2344 case Intrinsic::log10:
2345 ISD = ISD::FLOG10;
2346 break;
2347 case Intrinsic::log2:
2348 ISD = ISD::FLOG2;
2349 break;
2350 case Intrinsic::ldexp:
2351 ISD = ISD::FLDEXP;
2352 break;
2353 case Intrinsic::fabs:
2354 ISD = ISD::FABS;
2355 break;
2356 case Intrinsic::canonicalize:
2358 break;
2359 case Intrinsic::minnum:
2360 ISD = ISD::FMINNUM;
2361 break;
2362 case Intrinsic::maxnum:
2363 ISD = ISD::FMAXNUM;
2364 break;
2365 case Intrinsic::minimum:
2367 break;
2368 case Intrinsic::maximum:
2370 break;
2371 case Intrinsic::minimumnum:
2373 break;
2374 case Intrinsic::maximumnum:
2376 break;
2377 case Intrinsic::copysign:
2379 break;
2380 case Intrinsic::floor:
2381 ISD = ISD::FFLOOR;
2382 break;
2383 case Intrinsic::ceil:
2384 ISD = ISD::FCEIL;
2385 break;
2386 case Intrinsic::trunc:
2387 ISD = ISD::FTRUNC;
2388 break;
2389 case Intrinsic::nearbyint:
2391 break;
2392 case Intrinsic::rint:
2393 ISD = ISD::FRINT;
2394 break;
2395 case Intrinsic::lrint:
2396 ISD = ISD::LRINT;
2397 break;
2398 case Intrinsic::llrint:
2399 ISD = ISD::LLRINT;
2400 break;
2401 case Intrinsic::round:
2402 ISD = ISD::FROUND;
2403 break;
2404 case Intrinsic::roundeven:
2406 break;
2407 case Intrinsic::lround:
2408 ISD = ISD::LROUND;
2409 break;
2410 case Intrinsic::llround:
2411 ISD = ISD::LLROUND;
2412 break;
2413 case Intrinsic::pow:
2414 ISD = ISD::FPOW;
2415 break;
2416 case Intrinsic::fma:
2417 ISD = ISD::FMA;
2418 break;
2419 case Intrinsic::fmuladd:
2420 ISD = ISD::FMA;
2421 break;
2422 case Intrinsic::experimental_constrained_fmuladd:
2424 break;
2425 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
2426 case Intrinsic::lifetime_start:
2427 case Intrinsic::lifetime_end:
2428 case Intrinsic::sideeffect:
2429 case Intrinsic::pseudoprobe:
2430 case Intrinsic::arithmetic_fence:
2431 return 0;
2432 case Intrinsic::masked_store: {
2433 Type *Ty = Tys[0];
2434 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2435 return thisT()->getMemIntrinsicInstrCost(
2436 MemIntrinsicCostAttributes(IID, Ty, TyAlign, 0), CostKind);
2437 }
2438 case Intrinsic::masked_load: {
2439 Type *Ty = RetTy;
2440 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2441 return thisT()->getMemIntrinsicInstrCost(
2442 MemIntrinsicCostAttributes(IID, Ty, TyAlign, 0), CostKind);
2443 }
2444 case Intrinsic::experimental_vp_strided_store: {
2445 auto *Ty = cast<VectorType>(ICA.getArgTypes()[0]);
2446 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2447 return thisT()->getMemIntrinsicInstrCost(
2448 MemIntrinsicCostAttributes(IID, Ty, /*Ptr=*/nullptr,
2449 /*VariableMask=*/true, Alignment,
2450 ICA.getInst()),
2451 CostKind);
2452 }
2453 case Intrinsic::experimental_vp_strided_load: {
2454 auto *Ty = cast<VectorType>(ICA.getReturnType());
2455 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2456 return thisT()->getMemIntrinsicInstrCost(
2457 MemIntrinsicCostAttributes(IID, Ty, /*Ptr=*/nullptr,
2458 /*VariableMask=*/true, Alignment,
2459 ICA.getInst()),
2460 CostKind);
2461 }
2462 case Intrinsic::vector_reduce_add:
2463 case Intrinsic::vector_reduce_mul:
2464 case Intrinsic::vector_reduce_and:
2465 case Intrinsic::vector_reduce_or:
2466 case Intrinsic::vector_reduce_xor:
2467 return thisT()->getArithmeticReductionCost(
2468 getArithmeticReductionInstruction(IID), VecOpTy, std::nullopt,
2469 CostKind);
2470 case Intrinsic::vector_reduce_fadd:
2471 case Intrinsic::vector_reduce_fmul:
2472 return thisT()->getArithmeticReductionCost(
2473 getArithmeticReductionInstruction(IID), VecOpTy, FMF, CostKind);
2474 case Intrinsic::vector_reduce_smax:
2475 case Intrinsic::vector_reduce_smin:
2476 case Intrinsic::vector_reduce_umax:
2477 case Intrinsic::vector_reduce_umin:
2478 case Intrinsic::vector_reduce_fmax:
2479 case Intrinsic::vector_reduce_fmin:
2480 case Intrinsic::vector_reduce_fmaximum:
2481 case Intrinsic::vector_reduce_fminimum:
2482 return thisT()->getMinMaxReductionCost(getMinMaxReductionIntrinsicOp(IID),
2483 VecOpTy, ICA.getFlags(), CostKind);
2484 case Intrinsic::experimental_vector_match: {
2485 auto *SearchTy = cast<VectorType>(ICA.getArgTypes()[0]);
2486 auto *NeedleTy = cast<FixedVectorType>(ICA.getArgTypes()[1]);
2487 unsigned SearchSize = NeedleTy->getNumElements();
2488
2489 // If we're not expanding the intrinsic then we assume this is cheap to
2490 // implement.
2491 EVT SearchVT = getTLI()->getValueType(DL, SearchTy);
2492 if (!getTLI()->shouldExpandVectorMatch(SearchVT, SearchSize))
2493 return getTypeLegalizationCost(RetTy).first;
2494
2495 // Approximate the cost based on the expansion code in
2496 // SelectionDAGBuilder.
2498 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, NeedleTy,
2499 CostKind, 1, nullptr, nullptr);
2500 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SearchTy,
2501 CostKind, 0, nullptr, nullptr);
2502 Cost += thisT()->getShuffleCost(TTI::SK_Broadcast, SearchTy, SearchTy, {},
2503 CostKind, 0, nullptr);
2504 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SearchTy, RetTy,
2506 Cost +=
2507 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
2508 Cost *= SearchSize;
2509 Cost +=
2510 thisT()->getArithmeticInstrCost(BinaryOperator::And, RetTy, CostKind);
2511 return Cost;
2512 }
2513 case Intrinsic::vector_reverse:
2514 return thisT()->getShuffleCost(TTI::SK_Reverse, cast<VectorType>(RetTy),
2515 cast<VectorType>(ICA.getArgTypes()[0]), {},
2516 CostKind, 0, cast<VectorType>(RetTy));
2517 case Intrinsic::experimental_vector_histogram_add:
2518 case Intrinsic::experimental_vector_histogram_uadd_sat:
2519 case Intrinsic::experimental_vector_histogram_umax:
2520 case Intrinsic::experimental_vector_histogram_umin: {
2522 Type *EltTy = ICA.getArgTypes()[1];
2523
2524 // Targets with scalable vectors must handle this on their own.
2525 if (!PtrsTy)
2527
2528 Align Alignment = thisT()->DL.getABITypeAlign(EltTy);
2530 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, PtrsTy,
2531 CostKind, 1, nullptr, nullptr);
2532 Cost += thisT()->getMemoryOpCost(Instruction::Load, EltTy, Alignment, 0,
2533 CostKind);
2534 switch (IID) {
2535 default:
2536 llvm_unreachable("Unhandled histogram update operation.");
2537 case Intrinsic::experimental_vector_histogram_add:
2538 Cost +=
2539 thisT()->getArithmeticInstrCost(Instruction::Add, EltTy, CostKind);
2540 break;
2541 case Intrinsic::experimental_vector_histogram_uadd_sat: {
2542 IntrinsicCostAttributes UAddSat(Intrinsic::uadd_sat, EltTy, {EltTy});
2543 Cost += thisT()->getIntrinsicInstrCost(UAddSat, CostKind);
2544 break;
2545 }
2546 case Intrinsic::experimental_vector_histogram_umax: {
2547 IntrinsicCostAttributes UMax(Intrinsic::umax, EltTy, {EltTy});
2548 Cost += thisT()->getIntrinsicInstrCost(UMax, CostKind);
2549 break;
2550 }
2551 case Intrinsic::experimental_vector_histogram_umin: {
2552 IntrinsicCostAttributes UMin(Intrinsic::umin, EltTy, {EltTy});
2553 Cost += thisT()->getIntrinsicInstrCost(UMin, CostKind);
2554 break;
2555 }
2556 }
2557 Cost += thisT()->getMemoryOpCost(Instruction::Store, EltTy, Alignment, 0,
2558 CostKind);
2559 Cost *= PtrsTy->getNumElements();
2560 return Cost;
2561 }
2562 case Intrinsic::get_active_lane_mask: {
2563 Type *ArgTy = ICA.getArgTypes()[0];
2564 EVT ResVT = getTLI()->getValueType(DL, RetTy, true);
2565 EVT ArgVT = getTLI()->getValueType(DL, ArgTy, true);
2566
2567 // If we're not expanding the intrinsic then we assume this is cheap
2568 // to implement.
2569 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgVT))
2570 return getTypeLegalizationCost(RetTy).first;
2571
2572 // Create the expanded types that will be used to calculate the uadd_sat
2573 // operation.
2574 Type *ExpRetTy =
2575 VectorType::get(ArgTy, cast<VectorType>(RetTy)->getElementCount());
2576 IntrinsicCostAttributes Attrs(Intrinsic::uadd_sat, ExpRetTy, {}, FMF);
2578 thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
2579 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy, RetTy,
2581 return Cost;
2582 }
2583 case Intrinsic::experimental_memset_pattern:
2584 // This cost is set to match the cost of the memset_pattern16 libcall.
2585 // It should likely be re-evaluated after migration to this intrinsic
2586 // is complete.
2587 return TTI::TCC_Basic * 4;
2588 case Intrinsic::abs:
2589 ISD = ISD::ABS;
2590 break;
2591 case Intrinsic::fshl:
2592 ISD = ISD::FSHL;
2593 break;
2594 case Intrinsic::fshr:
2595 ISD = ISD::FSHR;
2596 break;
2597 case Intrinsic::smax:
2598 ISD = ISD::SMAX;
2599 break;
2600 case Intrinsic::smin:
2601 ISD = ISD::SMIN;
2602 break;
2603 case Intrinsic::umax:
2604 ISD = ISD::UMAX;
2605 break;
2606 case Intrinsic::umin:
2607 ISD = ISD::UMIN;
2608 break;
2609 case Intrinsic::sadd_sat:
2610 ISD = ISD::SADDSAT;
2611 break;
2612 case Intrinsic::ssub_sat:
2613 ISD = ISD::SSUBSAT;
2614 break;
2615 case Intrinsic::uadd_sat:
2616 ISD = ISD::UADDSAT;
2617 break;
2618 case Intrinsic::usub_sat:
2619 ISD = ISD::USUBSAT;
2620 break;
2621 case Intrinsic::smul_fix:
2622 ISD = ISD::SMULFIX;
2623 break;
2624 case Intrinsic::umul_fix:
2625 ISD = ISD::UMULFIX;
2626 break;
2627 case Intrinsic::sadd_with_overflow:
2628 ISD = ISD::SADDO;
2629 break;
2630 case Intrinsic::ssub_with_overflow:
2631 ISD = ISD::SSUBO;
2632 break;
2633 case Intrinsic::uadd_with_overflow:
2634 ISD = ISD::UADDO;
2635 break;
2636 case Intrinsic::usub_with_overflow:
2637 ISD = ISD::USUBO;
2638 break;
2639 case Intrinsic::smul_with_overflow:
2640 ISD = ISD::SMULO;
2641 break;
2642 case Intrinsic::umul_with_overflow:
2643 ISD = ISD::UMULO;
2644 break;
2645 case Intrinsic::fptosi_sat:
2646 case Intrinsic::fptoui_sat: {
2647 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Tys[0]);
2648 std::pair<InstructionCost, MVT> RetLT = getTypeLegalizationCost(RetTy);
2649
2650 // For cast instructions, types are different between source and
2651 // destination. Also need to check if the source type can be legalize.
2652 if (!SrcLT.first.isValid() || !RetLT.first.isValid())
2654 ISD = IID == Intrinsic::fptosi_sat ? ISD::FP_TO_SINT_SAT
2656 break;
2657 }
2658 case Intrinsic::ctpop:
2659 ISD = ISD::CTPOP;
2660 // In case of legalization use TCC_Expensive. This is cheaper than a
2661 // library call but still not a cheap instruction.
2662 SingleCallCost = TargetTransformInfo::TCC_Expensive;
2663 break;
2664 case Intrinsic::ctlz:
2665 ISD = ISD::CTLZ;
2666 break;
2667 case Intrinsic::cttz:
2668 ISD = ISD::CTTZ;
2669 break;
2670 case Intrinsic::bswap:
2671 ISD = ISD::BSWAP;
2672 break;
2673 case Intrinsic::bitreverse:
2675 break;
2676 case Intrinsic::ucmp:
2677 ISD = ISD::UCMP;
2678 break;
2679 case Intrinsic::scmp:
2680 ISD = ISD::SCMP;
2681 break;
2682 }
2683
2684 auto *ST = dyn_cast<StructType>(RetTy);
2685 Type *LegalizeTy = ST ? ST->getContainedType(0) : RetTy;
2686 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(LegalizeTy);
2687
2688 const TargetLoweringBase *TLI = getTLI();
2689
2690 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
2691 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
2692 TLI->isFAbsFree(LT.second)) {
2693 return 0;
2694 }
2695
2696 // The operation is legal. Assume it costs 1.
2697 // If the type is split to multiple registers, assume that there is some
2698 // overhead to this.
2699 // TODO: Once we have extract/insert subvector cost we need to use them.
2700 if (LT.first > 1)
2701 return (LT.first * 2);
2702 else
2703 return (LT.first * 1);
2704 } else if (TLI->isOperationCustom(ISD, LT.second)) {
2705 // If the operation is custom lowered then assume
2706 // that the code is twice as expensive.
2707 return (LT.first * 2);
2708 }
2709
2710 switch (IID) {
2711 case Intrinsic::fmuladd: {
2712 // If we can't lower fmuladd into an FMA estimate the cost as a floating
2713 // point mul followed by an add.
2714
2715 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
2716 CostKind) +
2717 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
2718 CostKind);
2719 }
2720 case Intrinsic::experimental_constrained_fmuladd: {
2721 IntrinsicCostAttributes FMulAttrs(
2722 Intrinsic::experimental_constrained_fmul, RetTy, Tys);
2723 IntrinsicCostAttributes FAddAttrs(
2724 Intrinsic::experimental_constrained_fadd, RetTy, Tys);
2725 return thisT()->getIntrinsicInstrCost(FMulAttrs, CostKind) +
2726 thisT()->getIntrinsicInstrCost(FAddAttrs, CostKind);
2727 }
2728 case Intrinsic::smin:
2729 case Intrinsic::smax:
2730 case Intrinsic::umin:
2731 case Intrinsic::umax: {
2732 // minmax(X,Y) = select(icmp(X,Y),X,Y)
2733 Type *CondTy = RetTy->getWithNewBitWidth(1);
2734 bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
2735 CmpInst::Predicate Pred =
2736 IsUnsigned ? CmpInst::ICMP_UGT : CmpInst::ICMP_SGT;
2738 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2739 Pred, CostKind);
2740 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2741 Pred, CostKind);
2742 return Cost;
2743 }
2744 case Intrinsic::sadd_with_overflow:
2745 case Intrinsic::ssub_with_overflow: {
2746 Type *SumTy = RetTy->getContainedType(0);
2747 Type *OverflowTy = RetTy->getContainedType(1);
2748 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
2749 ? BinaryOperator::Add
2750 : BinaryOperator::Sub;
2751
2752 // Add:
2753 // Overflow -> (Result < LHS) ^ (RHS < 0)
2754 // Sub:
2755 // Overflow -> (Result < LHS) ^ (RHS > 0)
2757 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
2758 Cost +=
2759 2 * thisT()->getCmpSelInstrCost(Instruction::ICmp, SumTy, OverflowTy,
2761 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
2762 CostKind);
2763 return Cost;
2764 }
2765 case Intrinsic::uadd_with_overflow:
2766 case Intrinsic::usub_with_overflow: {
2767 Type *SumTy = RetTy->getContainedType(0);
2768 Type *OverflowTy = RetTy->getContainedType(1);
2769 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
2770 ? BinaryOperator::Add
2771 : BinaryOperator::Sub;
2772 CmpInst::Predicate Pred = IID == Intrinsic::uadd_with_overflow
2775
2777 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
2778 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy,
2779 OverflowTy, Pred, CostKind);
2780 return Cost;
2781 }
2782 case Intrinsic::smul_with_overflow:
2783 case Intrinsic::umul_with_overflow: {
2784 Type *MulTy = RetTy->getContainedType(0);
2785 Type *OverflowTy = RetTy->getContainedType(1);
2786 unsigned ExtSize = MulTy->getScalarSizeInBits() * 2;
2787 Type *ExtTy = MulTy->getWithNewBitWidth(ExtSize);
2788 bool IsSigned = IID == Intrinsic::smul_with_overflow;
2789
2790 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
2792
2794 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH, CostKind);
2795 Cost +=
2796 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2797 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
2798 CCH, CostKind);
2799 Cost += thisT()->getArithmeticInstrCost(
2800 Instruction::LShr, ExtTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2802
2803 if (IsSigned)
2804 Cost += thisT()->getArithmeticInstrCost(
2805 Instruction::AShr, MulTy, CostKind,
2808
2809 Cost += thisT()->getCmpSelInstrCost(
2810 BinaryOperator::ICmp, MulTy, OverflowTy, CmpInst::ICMP_NE, CostKind);
2811 return Cost;
2812 }
2813 case Intrinsic::sadd_sat:
2814 case Intrinsic::ssub_sat: {
2815 // Assume a default expansion.
2816 Type *CondTy = RetTy->getWithNewBitWidth(1);
2817
2818 Type *OpTy = StructType::create({RetTy, CondTy});
2819 Intrinsic::ID OverflowOp = IID == Intrinsic::sadd_sat
2820 ? Intrinsic::sadd_with_overflow
2821 : Intrinsic::ssub_with_overflow;
2823
2824 // SatMax -> Overflow && SumDiff < 0
2825 // SatMin -> Overflow && SumDiff >= 0
2827 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
2828 nullptr, ScalarizationCostPassed);
2829 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2830 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2831 Pred, CostKind);
2832 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
2833 CondTy, Pred, CostKind);
2834 return Cost;
2835 }
2836 case Intrinsic::uadd_sat:
2837 case Intrinsic::usub_sat: {
2838 Type *CondTy = RetTy->getWithNewBitWidth(1);
2839
2840 Type *OpTy = StructType::create({RetTy, CondTy});
2841 Intrinsic::ID OverflowOp = IID == Intrinsic::uadd_sat
2842 ? Intrinsic::uadd_with_overflow
2843 : Intrinsic::usub_with_overflow;
2844
2846 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
2847 nullptr, ScalarizationCostPassed);
2848 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2849 Cost +=
2850 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2852 return Cost;
2853 }
2854 case Intrinsic::smul_fix:
2855 case Intrinsic::umul_fix: {
2856 unsigned ExtSize = RetTy->getScalarSizeInBits() * 2;
2857 Type *ExtTy = RetTy->getWithNewBitWidth(ExtSize);
2858
2859 unsigned ExtOp =
2860 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
2862
2864 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH, CostKind);
2865 Cost +=
2866 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2867 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
2868 CCH, CostKind);
2869 Cost += thisT()->getArithmeticInstrCost(
2870 Instruction::LShr, RetTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2872 Cost += thisT()->getArithmeticInstrCost(
2873 Instruction::Shl, RetTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2875 Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy, CostKind);
2876 return Cost;
2877 }
2878 case Intrinsic::abs: {
2879 // abs(X) = select(icmp(X,0),X,sub(0,X))
2880 Type *CondTy = RetTy->getWithNewBitWidth(1);
2883 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2884 Pred, CostKind);
2885 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2886 Pred, CostKind);
2887 // TODO: Should we add an OperandValueProperties::OP_Zero property?
2888 Cost += thisT()->getArithmeticInstrCost(
2889 BinaryOperator::Sub, RetTy, CostKind,
2891 return Cost;
2892 }
2893 case Intrinsic::fshl:
2894 case Intrinsic::fshr: {
2895 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
2896 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
2897 Type *CondTy = RetTy->getWithNewBitWidth(1);
2899 Cost +=
2900 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
2901 Cost +=
2902 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);
2903 Cost +=
2904 thisT()->getArithmeticInstrCost(BinaryOperator::Shl, RetTy, CostKind);
2905 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::LShr, RetTy,
2906 CostKind);
2907 // Non-constant shift amounts requires a modulo. If the typesize is a
2908 // power-2 then this will be converted to an and, otherwise it will use a
2909 // urem.
2910 Cost += thisT()->getArithmeticInstrCost(
2911 isPowerOf2_32(RetTy->getScalarSizeInBits()) ? BinaryOperator::And
2912 : BinaryOperator::URem,
2913 RetTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2914 {TTI::OK_UniformConstantValue, TTI::OP_None});
2915 // Shift-by-zero handling.
2916 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2918 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2920 return Cost;
2921 }
2922 case Intrinsic::fptosi_sat:
2923 case Intrinsic::fptoui_sat: {
2924 if (Tys.empty())
2925 break;
2926 Type *FromTy = Tys[0];
2927 bool IsSigned = IID == Intrinsic::fptosi_sat;
2928
2930 IntrinsicCostAttributes Attrs1(Intrinsic::minnum, FromTy,
2931 {FromTy, FromTy});
2932 Cost += thisT()->getIntrinsicInstrCost(Attrs1, CostKind);
2933 IntrinsicCostAttributes Attrs2(Intrinsic::maxnum, FromTy,
2934 {FromTy, FromTy});
2935 Cost += thisT()->getIntrinsicInstrCost(Attrs2, CostKind);
2936 Cost += thisT()->getCastInstrCost(
2937 IsSigned ? Instruction::FPToSI : Instruction::FPToUI, RetTy, FromTy,
2939 if (IsSigned) {
2940 Type *CondTy = RetTy->getWithNewBitWidth(1);
2941 Cost += thisT()->getCmpSelInstrCost(
2942 BinaryOperator::FCmp, FromTy, CondTy, CmpInst::FCMP_UNO, CostKind);
2943 Cost += thisT()->getCmpSelInstrCost(
2944 BinaryOperator::Select, RetTy, CondTy, CmpInst::FCMP_UNO, CostKind);
2945 }
2946 return Cost;
2947 }
2948 case Intrinsic::ucmp:
2949 case Intrinsic::scmp: {
2950 Type *CmpTy = Tys[0];
2951 Type *CondTy = RetTy->getWithNewBitWidth(1);
2953 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2955 CostKind) +
2956 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2958 CostKind);
2959
2960 EVT VT = TLI->getValueType(DL, CmpTy, true);
2962 // x < y ? -1 : (x > y ? 1 : 0)
2963 Cost += 2 * thisT()->getCmpSelInstrCost(
2964 BinaryOperator::Select, RetTy, CondTy,
2966 } else {
2967 // zext(x > y) - zext(x < y)
2968 Cost +=
2969 2 * thisT()->getCastInstrCost(CastInst::ZExt, RetTy, CondTy,
2971 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
2972 CostKind);
2973 }
2974 return Cost;
2975 }
2976 case Intrinsic::maximumnum:
2977 case Intrinsic::minimumnum: {
2978 // On platform that support FMAXNUM_IEEE/FMINNUM_IEEE, we expand
2979 // maximumnum/minimumnum to
2980 // ARG0 = fcanonicalize ARG0, ARG0 // to quiet ARG0
2981 // ARG1 = fcanonicalize ARG1, ARG1 // to quiet ARG1
2982 // RESULT = MAXNUM_IEEE ARG0, ARG1 // or MINNUM_IEEE
2983 // FIXME: In LangRef, we claimed FMAXNUM has the same behaviour of
2984 // FMAXNUM_IEEE, while the backend hasn't migrated the code yet.
2985 // Finally, we will remove FMAXNUM_IEEE and FMINNUM_IEEE.
2986 int IeeeISD =
2987 IID == Intrinsic::maximumnum ? ISD::FMAXNUM_IEEE : ISD::FMINNUM_IEEE;
2988 if (TLI->isOperationLegal(IeeeISD, LT.second)) {
2989 IntrinsicCostAttributes FCanonicalizeAttrs(Intrinsic::canonicalize,
2990 RetTy, Tys[0]);
2991 InstructionCost FCanonicalizeCost =
2992 thisT()->getIntrinsicInstrCost(FCanonicalizeAttrs, CostKind);
2993 return LT.first + FCanonicalizeCost * 2;
2994 }
2995 break;
2996 }
2997 default:
2998 break;
2999 }
3000
3001 // Else, assume that we need to scalarize this intrinsic. For math builtins
3002 // this will emit a costly libcall, adding call overhead and spills. Make it
3003 // very expensive.
3004 if (isVectorizedTy(RetTy)) {
3005 ArrayRef<Type *> RetVTys = getContainedTypes(RetTy);
3006
3007 // Scalable vectors cannot be scalarized, so return Invalid.
3008 if (any_of(concat<Type *const>(RetVTys, Tys),
3009 [](Type *Ty) { return isa<ScalableVectorType>(Ty); }))
3011
3012 InstructionCost ScalarizationCost = ScalarizationCostPassed;
3013 if (!SkipScalarizationCost) {
3014 ScalarizationCost = 0;
3015 for (Type *RetVTy : RetVTys) {
3016 ScalarizationCost += getScalarizationOverhead(
3017 cast<VectorType>(RetVTy), /*Insert=*/true,
3018 /*Extract=*/false, CostKind);
3019 }
3020 }
3021
3022 unsigned ScalarCalls = getVectorizedTypeVF(RetTy).getFixedValue();
3023 SmallVector<Type *, 4> ScalarTys;
3024 for (Type *Ty : Tys) {
3025 if (Ty->isVectorTy())
3026 Ty = Ty->getScalarType();
3027 ScalarTys.push_back(Ty);
3028 }
3029 IntrinsicCostAttributes Attrs(IID, toScalarizedTy(RetTy), ScalarTys, FMF);
3030 InstructionCost ScalarCost =
3031 thisT()->getIntrinsicInstrCost(Attrs, CostKind);
3032 for (Type *Ty : Tys) {
3033 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
3034 if (!ICA.skipScalarizationCost())
3035 ScalarizationCost += getScalarizationOverhead(
3036 VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
3037 ScalarCalls = std::max(ScalarCalls,
3039 }
3040 }
3041 return ScalarCalls * ScalarCost + ScalarizationCost;
3042 }
3043
3044 // This is going to be turned into a library call, make it expensive.
3045 return SingleCallCost;
3046 }
3047
3048 /// Get memory intrinsic cost based on arguments.
3051 TTI::TargetCostKind CostKind) const override {
3052 unsigned Id = MICA.getID();
3053 Type *DataTy = MICA.getDataType();
3054 bool VariableMask = MICA.getVariableMask();
3055 Align Alignment = MICA.getAlignment();
3056
3057 switch (Id) {
3058 case Intrinsic::experimental_vp_strided_load:
3059 case Intrinsic::experimental_vp_strided_store: {
3060 unsigned Opcode = Id == Intrinsic::experimental_vp_strided_load
3061 ? Instruction::Load
3062 : Instruction::Store;
3063 // For a target without strided memory operations (or for an illegal
3064 // operation type on one which does), assume we lower to a gather/scatter
3065 // operation. (Which may in turn be scalarized.)
3066 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3067 VariableMask, true, CostKind);
3068 }
3069 case Intrinsic::masked_scatter:
3070 case Intrinsic::masked_gather:
3071 case Intrinsic::vp_scatter:
3072 case Intrinsic::vp_gather: {
3073 unsigned Opcode = (MICA.getID() == Intrinsic::masked_gather ||
3074 MICA.getID() == Intrinsic::vp_gather)
3075 ? Instruction::Load
3076 : Instruction::Store;
3077
3078 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3079 VariableMask, true, CostKind);
3080 }
3081 case Intrinsic::vp_load:
3082 case Intrinsic::vp_store:
3084 case Intrinsic::masked_load:
3085 case Intrinsic::masked_store: {
3086 unsigned Opcode =
3087 Id == Intrinsic::masked_load ? Instruction::Load : Instruction::Store;
3088 // TODO: Pass on AddressSpace when we have test coverage.
3089 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, true, false,
3090 CostKind);
3091 }
3092 case Intrinsic::masked_compressstore:
3093 case Intrinsic::masked_expandload: {
3094 unsigned Opcode = MICA.getID() == Intrinsic::masked_expandload
3095 ? Instruction::Load
3096 : Instruction::Store;
3097 // Treat expand load/compress store as gather/scatter operation.
3098 // TODO: implement more precise cost estimation for these intrinsics.
3099 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,
3100 VariableMask,
3101 /*IsGatherScatter*/ true, CostKind);
3102 }
3103 case Intrinsic::vp_load_ff:
3105 default:
3106 llvm_unreachable("unexpected intrinsic");
3107 }
3108 }
3109
3110 /// Compute a cost of the given call instruction.
3111 ///
3112 /// Compute the cost of calling function F with return type RetTy and
3113 /// argument types Tys. F might be nullptr, in this case the cost of an
3114 /// arbitrary call with the specified signature will be returned.
3115 /// This is used, for instance, when we estimate call of a vector
3116 /// counterpart of the given function.
3117 /// \param F Called function, might be nullptr.
3118 /// \param RetTy Return value types.
3119 /// \param Tys Argument types.
3120 /// \returns The cost of Call instruction.
3123 TTI::TargetCostKind CostKind) const override {
3124 return 10;
3125 }
3126
3127 unsigned getNumberOfParts(Type *Tp) const override {
3128 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
3129 if (!LT.first.isValid())
3130 return 0;
3131 // Try to find actual number of parts for non-power-of-2 elements as
3132 // ceil(num-of-elements/num-of-subtype-elements).
3133 if (auto *FTp = dyn_cast<FixedVectorType>(Tp);
3134 Tp && LT.second.isFixedLengthVector() &&
3135 !has_single_bit(FTp->getNumElements())) {
3136 if (auto *SubTp = dyn_cast_if_present<FixedVectorType>(
3137 EVT(LT.second).getTypeForEVT(Tp->getContext()));
3138 SubTp && SubTp->getElementType() == FTp->getElementType())
3139 return divideCeil(FTp->getNumElements(), SubTp->getNumElements());
3140 }
3141 return LT.first.getValue();
3142 }
3143
3146 TTI::TargetCostKind) const override {
3147 return 0;
3148 }
3149
3150 /// Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
3151 /// We're assuming that reduction operation are performing the following way:
3152 ///
3153 /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
3154 /// <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
3155 /// \----------------v-------------/ \----------v------------/
3156 /// n/2 elements n/2 elements
3157 /// %red1 = op <n x t> %val, <n x t> val1
3158 /// After this operation we have a vector %red1 where only the first n/2
3159 /// elements are meaningful, the second n/2 elements are undefined and can be
3160 /// dropped. All other operations are actually working with the vector of
3161 /// length n/2, not n, though the real vector length is still n.
3162 /// %val2 = shufflevector<n x t> %red1, <n x t> %undef,
3163 /// <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
3164 /// \----------------v-------------/ \----------v------------/
3165 /// n/4 elements 3*n/4 elements
3166 /// %red2 = op <n x t> %red1, <n x t> val2 - working with the vector of
3167 /// length n/2, the resulting vector has length n/4 etc.
3168 ///
3169 /// The cost model should take into account that the actual length of the
3170 /// vector is reduced on each iteration.
3173 // Targets must implement a default value for the scalable case, since
3174 // we don't know how many lanes the vector has.
3177
3178 Type *ScalarTy = Ty->getElementType();
3179 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
3180 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
3181 ScalarTy == IntegerType::getInt1Ty(Ty->getContext()) &&
3182 NumVecElts >= 2) {
3183 // Or reduction for i1 is represented as:
3184 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
3185 // %res = cmp ne iReduxWidth %val, 0
3186 // And reduction for i1 is represented as:
3187 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
3188 // %res = cmp eq iReduxWidth %val, 11111
3189 Type *ValTy = IntegerType::get(Ty->getContext(), NumVecElts);
3190 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
3192 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
3195 }
3196 unsigned NumReduxLevels = Log2_32(NumVecElts);
3197 InstructionCost ArithCost = 0;
3198 InstructionCost ShuffleCost = 0;
3199 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3200 unsigned LongVectorCount = 0;
3201 unsigned MVTLen =
3202 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3203 while (NumVecElts > MVTLen) {
3204 NumVecElts /= 2;
3205 VectorType *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
3206 ShuffleCost += thisT()->getShuffleCost(
3207 TTI::SK_ExtractSubvector, SubTy, Ty, {}, CostKind, NumVecElts, SubTy);
3208 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy, CostKind);
3209 Ty = SubTy;
3210 ++LongVectorCount;
3211 }
3212
3213 NumReduxLevels -= LongVectorCount;
3214
3215 // The minimal length of the vector is limited by the real length of vector
3216 // operations performed on the current platform. That's why several final
3217 // reduction operations are performed on the vectors with the same
3218 // architecture-dependent length.
3219
3220 // By default reductions need one shuffle per reduction level.
3221 ShuffleCost +=
3222 NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
3223 Ty, {}, CostKind, 0, Ty);
3224 ArithCost +=
3225 NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty, CostKind);
3226 return ShuffleCost + ArithCost +
3227 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3228 CostKind, 0, nullptr, nullptr);
3229 }
3230
3231 /// Try to calculate the cost of performing strict (in-order) reductions,
3232 /// which involves doing a sequence of floating point additions in lane
3233 /// order, starting with an initial value. For example, consider a scalar
3234 /// initial value 'InitVal' of type float and a vector of type <4 x float>:
3235 ///
3236 /// Vector = <float %v0, float %v1, float %v2, float %v3>
3237 ///
3238 /// %add1 = %InitVal + %v0
3239 /// %add2 = %add1 + %v1
3240 /// %add3 = %add2 + %v2
3241 /// %add4 = %add3 + %v3
3242 ///
3243 /// As a simple estimate we can say the cost of such a reduction is 4 times
3244 /// the cost of a scalar FP addition. We can only estimate the costs for
3245 /// fixed-width vectors here because for scalable vectors we do not know the
3246 /// runtime number of operations.
3249 // Targets must implement a default value for the scalable case, since
3250 // we don't know how many lanes the vector has.
3253
3254 auto *VTy = cast<FixedVectorType>(Ty);
3256 VTy, /*Insert=*/false, /*Extract=*/true, CostKind);
3257 InstructionCost ArithCost = thisT()->getArithmeticInstrCost(
3258 Opcode, VTy->getElementType(), CostKind);
3259 ArithCost *= VTy->getNumElements();
3260
3261 return ExtractCost + ArithCost;
3262 }
3263
3266 std::optional<FastMathFlags> FMF,
3267 TTI::TargetCostKind CostKind) const override {
3268 assert(Ty && "Unknown reduction vector type");
3270 return getOrderedReductionCost(Opcode, Ty, CostKind);
3271 return getTreeReductionCost(Opcode, Ty, CostKind);
3272 }
3273
3274 /// Try to calculate op costs for min/max reduction operations.
3275 /// \param CondTy Conditional type for the Select instruction.
3278 TTI::TargetCostKind CostKind) const override {
3279 // Targets must implement a default value for the scalable case, since
3280 // we don't know how many lanes the vector has.
3283
3284 Type *ScalarTy = Ty->getElementType();
3285 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
3286 unsigned NumReduxLevels = Log2_32(NumVecElts);
3287 InstructionCost MinMaxCost = 0;
3288 InstructionCost ShuffleCost = 0;
3289 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3290 unsigned LongVectorCount = 0;
3291 unsigned MVTLen =
3292 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3293 while (NumVecElts > MVTLen) {
3294 NumVecElts /= 2;
3295 auto *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
3296
3297 ShuffleCost += thisT()->getShuffleCost(
3298 TTI::SK_ExtractSubvector, SubTy, Ty, {}, CostKind, NumVecElts, SubTy);
3299
3300 IntrinsicCostAttributes Attrs(IID, SubTy, {SubTy, SubTy}, FMF);
3301 MinMaxCost += getIntrinsicInstrCost(Attrs, CostKind);
3302 Ty = SubTy;
3303 ++LongVectorCount;
3304 }
3305
3306 NumReduxLevels -= LongVectorCount;
3307
3308 // The minimal length of the vector is limited by the real length of vector
3309 // operations performed on the current platform. That's why several final
3310 // reduction opertions are perfomed on the vectors with the same
3311 // architecture-dependent length.
3312 ShuffleCost +=
3313 NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
3314 Ty, {}, CostKind, 0, Ty);
3315 IntrinsicCostAttributes Attrs(IID, Ty, {Ty, Ty}, FMF);
3316 MinMaxCost += NumReduxLevels * getIntrinsicInstrCost(Attrs, CostKind);
3317 // The last min/max should be in vector registers and we counted it above.
3318 // So just need a single extractelement.
3319 return ShuffleCost + MinMaxCost +
3320 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3321 CostKind, 0, nullptr, nullptr);
3322 }
3323
3325 getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy,
3326 VectorType *Ty, std::optional<FastMathFlags> FMF,
3327 TTI::TargetCostKind CostKind) const override {
3328 if (auto *FTy = dyn_cast<FixedVectorType>(Ty);
3329 FTy && IsUnsigned && Opcode == Instruction::Add &&
3330 FTy->getElementType() == IntegerType::getInt1Ty(Ty->getContext())) {
3331 // Represent vector_reduce_add(ZExt(<n x i1>)) as
3332 // ZExtOrTrunc(ctpop(bitcast <n x i1> to in)).
3333 auto *IntTy =
3334 IntegerType::get(ResTy->getContext(), FTy->getNumElements());
3335 IntrinsicCostAttributes ICA(Intrinsic::ctpop, IntTy, {IntTy},
3336 FMF ? *FMF : FastMathFlags());
3337 return thisT()->getCastInstrCost(Instruction::BitCast, IntTy, FTy,
3339 thisT()->getIntrinsicInstrCost(ICA, CostKind);
3340 }
3341 // Without any native support, this is equivalent to the cost of
3342 // vecreduce.opcode(ext(Ty A)).
3343 VectorType *ExtTy = VectorType::get(ResTy, Ty);
3344 InstructionCost RedCost =
3345 thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF, CostKind);
3346 InstructionCost ExtCost = thisT()->getCastInstrCost(
3347 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3349
3350 return RedCost + ExtCost;
3351 }
3352
3354 getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy,
3355 VectorType *Ty,
3356 TTI::TargetCostKind CostKind) const override {
3357 // Without any native support, this is equivalent to the cost of
3358 // vecreduce.add(mul(ext(Ty A), ext(Ty B))) or
3359 // vecreduce.add(mul(A, B)).
3360 assert((RedOpcode == Instruction::Add || RedOpcode == Instruction::Sub) &&
3361 "The reduction opcode is expected to be Add or Sub.");
3362 VectorType *ExtTy = VectorType::get(ResTy, Ty);
3363 InstructionCost RedCost = thisT()->getArithmeticReductionCost(
3364 RedOpcode, ExtTy, std::nullopt, CostKind);
3365 InstructionCost ExtCost = thisT()->getCastInstrCost(
3366 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3368
3369 InstructionCost MulCost =
3370 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
3371
3372 return RedCost + MulCost + 2 * ExtCost;
3373 }
3374
3376
3377 /// @}
3378};
3379
3380/// Concrete BasicTTIImpl that can be used if no further customization
3381/// is needed.
3382class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
3383 using BaseT = BasicTTIImplBase<BasicTTIImpl>;
3384
3385 friend class BasicTTIImplBase<BasicTTIImpl>;
3386
3387 const TargetSubtargetInfo *ST;
3388 const TargetLoweringBase *TLI;
3389
3390 const TargetSubtargetInfo *getST() const { return ST; }
3391 const TargetLoweringBase *getTLI() const { return TLI; }
3392
3393public:
3394 explicit BasicTTIImpl(const TargetMachine *TM, const Function &F);
3395};
3396
3397} // end namespace llvm
3398
3399#endif // LLVM_CODEGEN_BASICTTIIMPL_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
static const Function * getCalledFunction(const Value *V)
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
static unsigned getNumElements(Type *Ty)
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
This file provides helpers for the implementation of a TargetTransformInfo-conforming class.
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1331
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1202
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1489
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition APInt.h:1131
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition ArrayRef.h:195
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition ArrayRef.h:201
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
InstructionCost getFPOpCost(Type *Ty) const override
bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty) const override
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const override
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
bool shouldBuildLookupTables() const override
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}) const override
Estimate the overhead of scalarizing an instruction.
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isProfitableToHoist(Instruction *I) const override
unsigned getNumberOfParts(Type *Tp) const override
unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const override
InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
bool useAA() const override
unsigned getPrefetchDistance() const override
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *SrcTy, int &Index, VectorType *&SubTy) const
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const override
bool isLegalAddScalableImmediate(int64_t Imm) const override
unsigned getAssumedAddrSpace(const Value *V) const override
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const override
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0) const override
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty) const override
bool haveFastSqrt(Type *Ty) const override
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Scalar, ArrayRef< std::tuple< Value *, User *, int > > ScalarUserAndIdx) const override
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const override
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const override
unsigned adjustInliningThreshold(const CallBase *CB) const override
unsigned getInliningThresholdMultiplier() const override
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset)
bool shouldBuildRelLookupTables() const override
bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override
unsigned getEpilogueVectorizationMinVF() const override
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const override
InstructionCost getVectorSplitCost() const
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
std::optional< unsigned > getMaxVScale() const override
unsigned getFlatAddressSpace() const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Compute a cost of the given call instruction.
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
~BasicTTIImplBase() override=default
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
unsigned getMaxPrefetchIterationsAhead() const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
Get intrinsic cost based on argument types.
bool hasBranchDivergence(const Function *F=nullptr) const override
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const override
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override
std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const override
bool shouldPrefetchAddressSpace(unsigned AS) const override
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const override
unsigned getCacheLineSize() const override
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
bool shouldDropLSRSolutionIfLessProfitable() const override
int getInlinerVectorBonusPercent() const override
bool isVScaleKnownToBeAPowerOfTwo() const override
InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind) const override
InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
bool isLegalAddImmediate(int64_t imm) const override
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
bool isSingleThreaded() const override
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind) const
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
bool isProfitableLSRChainElement(Instruction *I) const override
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx) const override
bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx) const override
std::optional< unsigned > getVScaleForTuning() const override
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const override
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *, const SCEV *, TTI::TargetCostKind) const override
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const override
bool isLegalICmpImmediate(int64_t imm) const override
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const override
unsigned getRegUsageForType(Type *Ty) const override
InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override
Get memory intrinsic cost based on arguments.
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool isTypeLegal(Type *Ty) const override
bool enableWritePrefetching() const override
bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const override
InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Estimate the overhead of scalarizing an instruction's operands.
bool isNumRegsMajorCostOfLSR() const override
BasicTTIImpl(const TargetMachine *TM, const Function &F)
size_type count() const
count - Returns the number of bits which are set.
Definition BitVector.h:181
BitVector & set()
Definition BitVector.h:370
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:982
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
static CmpInst::Predicate getGTPredicate(Intrinsic::ID ID)
static CmpInst::Predicate getLTPredicate(Intrinsic::ID ID)
This class represents a range of values.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:324
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
Container class for subtarget features.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition Type.cpp:802
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition Function.h:352
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
const TargetLibraryInfo * getLibInfo() const
const SmallVectorImpl< Type * > & getArgTypes() const
const SmallVectorImpl< const Value * > & getArgs() const
InstructionCost getScalarizationCost() const
const IntrinsicInst * getInst() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
const FeatureBitset & getFeatureBits() const
Machine Value Type.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Information for memory intrinsic cost model.
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for applied optimization remarks.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Analysis providing profile information.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
static StackOffset getScalable(int64_t Scalable)
Definition TypeSize.h:40
static StackOffset getFixed(int64_t Fixed)
Definition TypeSize.h:39
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition Type.cpp:619
Multiway switch.
Provides information about what library functions are available for the current target.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool preferSelectsOverBooleanArithmetic(EVT VT) const
Should we prefer selects to doing arithmetic on boolean types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool isSuitableForBitTests(const DenseMap< const BasicBlock *, unsigned int > &DestCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
Primary interface to the complete machine description for the target machine.
bool isPositionIndependent() const
const Triple & getTargetTriple() const
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
CodeModel::Model getCodeModel() const
Returns the code model.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual bool isProfitableLSRChainElement(Instruction *I) const
virtual const DataLayout & getDataLayout() const
virtual std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const
virtual std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
virtual bool shouldDropLSRSolutionIfLessProfitable() const
virtual bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const
virtual bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const
virtual std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
virtual std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
virtual unsigned getEpilogueVectorizationMinVF() const
virtual bool isLoweredToCall(const Function *F) const
virtual InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info, TTI::OperandValueInfo Opd2Info, ArrayRef< const Value * > Args, const Instruction *CxtI=nullptr) const
virtual InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const
virtual bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const
virtual InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I) const
virtual InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
virtual InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, const Instruction *I) const
virtual TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
static bool requiresOrderedReduction(std::optional< FastMathFlags > FMF)
A helper function to determine the type of reduction algorithm used for a given Opcode and set of Fas...
@ TCC_Expensive
The cost of a 'div' instruction on x86.
@ TCC_Basic
The cost of a typical 'add' instruction.
MemIndexedMode
The type of load/store indexing.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
@ SK_InsertSubvector
InsertSubvector. Index indicates start offset.
@ SK_Select
Selects elements from the corresponding lane of either source operand.
@ SK_PermuteSingleSrc
Shuffle elements of single source vector with any shuffle mask.
@ SK_Transpose
Transpose two vectors.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_PermuteTwoSrc
Merge elements from two source vectors into one with any shuffle mask.
@ SK_Reverse
Reverse the order of the vector.
@ SK_ExtractSubvector
ExtractSubvector Index indicates start offset.
CastContextHint
Represents a hint about the context in which a cast is used.
@ None
The cast is not used with a load/store of any kind.
@ Normal
The cast is used with a normal load/store.
CacheLevel
The possible cache levels.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:413
LLVM_ABI bool isArch64Bit() const
Test whether the architecture is 64-bit.
Definition Triple.cpp:1791
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Definition Triple.h:627
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:300
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
Definition Type.h:381
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
Value * getOperand(unsigned i) const
Definition User.h:232
static LLVM_ABI bool isVPBinOp(Intrinsic::ID ID)
static LLVM_ABI bool isVPCast(Intrinsic::ID ID)
static LLVM_ABI bool isVPCmp(Intrinsic::ID ID)
static LLVM_ABI std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
static LLVM_ABI std::optional< Intrinsic::ID > getFunctionalIntrinsicIDForVP(Intrinsic::ID ID)
static LLVM_ABI bool isVPIntrinsic(Intrinsic::ID)
static LLVM_ABI bool isVPReduction(Intrinsic::ID ID)
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
Base class of all SIMD vector types.
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
Definition APInt.cpp:3009
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...
Definition ISDOpcodes.h:24
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:771
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
Definition ISDOpcodes.h:387
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:511
@ FMODF
FMODF - Decomposes the operand into integral and fractional parts, each having the same type and sign...
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ FSINCOSPI
FSINCOSPI - Compute both the sine and cosine times pi more accurately than FSINCOS(pi*x),...
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:410
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition ISDOpcodes.h:744
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:275
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ SSUBO
Same for subtraction.
Definition ISDOpcodes.h:347
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
Definition ISDOpcodes.h:534
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition ISDOpcodes.h:369
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:784
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition ISDOpcodes.h:343
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SMULO
Same for multiplication.
Definition ISDOpcodes.h:351
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition ISDOpcodes.h:724
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition ISDOpcodes.h:793
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
Definition ISDOpcodes.h:732
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
Definition ISDOpcodes.h:933
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:527
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition ISDOpcodes.h:360
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
LLVM_ABI bool isTargetIntrinsic(ID IID)
isTargetIntrinsic - Returns true if IID is an intrinsic specific to a certain target.
LLVM_ABI Libcall getSINCOSPI(EVT RetVT)
getSINCOSPI - Return the SINCOSPI_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getMODF(EVT VT)
getMODF - Return the MODF_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getSINCOS(EVT RetVT)
getSINCOS - Return the SINCOS_* value for the given types, or UNKNOWN_LIBCALL if there is none.
DiagnosticInfoOptimizationBase::Argument NV
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:839
InstructionCost Cost
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2494
Type * toScalarizedTy(Type *Ty)
A helper for converting vectorized types to scalarized (non-vector) types.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
LLVM_ABI unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID)
Returns the arithmetic instruction opcode used when expanding a reduction.
bool isVectorizedTy(Type *Ty)
Returns true if Ty is a vector type or a struct of vector types where all vector types share the same...
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
Definition STLExtras.h:1150
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:147
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
ElementCount getVectorizedTypeVF(Type *Ty)
Returns the number of vector elements for a vectorized type.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr int PoisonMaskElem
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:189
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
cl::opt< unsigned > PartialUnrollingThreshold
LLVM_ABI bool isVectorizedStructTy(StructType *StructTy)
Returns true if StructTy is an unpacked literal struct where all elements are vectors of matching ele...
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:137
ElementCount getVectorElementCount() const
Definition ValueTypes.h:350
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition ValueTypes.h:65
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Attributes of a target dependent hardware loop.
static bool hasVectorMaskArgument(RTLIB::LibcallImpl Impl)
Returns true if the function has a vector mask argument, which is assumed to be the last argument.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
bool AllowPeeling
Allow peeling off loop iterations.
bool AllowLoopNestsPeeling
Allow peeling off loop iterations for loop nests.
bool PeelProfiledIterations
Allow peeling basing on profile.
unsigned PeelCount
A forced peeling factor (the number of bodied of the original loop that should be peeled off before t...
Parameters that control the generic loop unrolling transformation.
bool UpperBound
Allow using trip count upper bound to unroll loops.
unsigned PartialOptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size, like OptSizeThreshold,...
unsigned PartialThreshold
The cost threshold for the unrolled loop, like Threshold, but used for partial/runtime unrolling (set...
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...
unsigned OptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size (set to UINT_MAX to disable).