LLVM 22.0.0git
BasicTTIImpl.h
Go to the documentation of this file.
1//===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file provides a helper that implements much of the TTI interface in
11/// terms of the target-independent code generator and TargetLowering
12/// interfaces.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
18
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/BitVector.h"
21#include "llvm/ADT/STLExtras.h"
35#include "llvm/IR/BasicBlock.h"
36#include "llvm/IR/Constant.h"
37#include "llvm/IR/Constants.h"
38#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/InstrTypes.h"
41#include "llvm/IR/Instruction.h"
43#include "llvm/IR/Intrinsics.h"
44#include "llvm/IR/Operator.h"
45#include "llvm/IR/Type.h"
46#include "llvm/IR/Value.h"
54#include <algorithm>
55#include <cassert>
56#include <cstdint>
57#include <limits>
58#include <optional>
59#include <utility>
60
61namespace llvm {
62
63class Function;
64class GlobalValue;
65class LLVMContext;
66class ScalarEvolution;
67class SCEV;
68class TargetMachine;
69
71
72/// Base class which can be used to help build a TTI implementation.
73///
74/// This class provides as much implementation of the TTI interface as is
75/// possible using the target independent parts of the code generator.
76///
77/// In order to subclass it, your class must implement a getST() method to
78/// return the subtarget, and a getTLI() method to return the target lowering.
79/// We need these methods implemented in the derived class so that this class
80/// doesn't have to duplicate storage for them.
81template <typename T>
83private:
85 using TTI = TargetTransformInfo;
86
87 /// Helper function to access this as a T.
88 const T *thisT() const { return static_cast<const T *>(this); }
89
90 /// Estimate a cost of Broadcast as an extract and sequence of insert
91 /// operations.
93 getBroadcastShuffleOverhead(FixedVectorType *VTy,
96 // Broadcast cost is equal to the cost of extracting the zero'th element
97 // plus the cost of inserting it into every element of the result vector.
98 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
99 CostKind, 0, nullptr, nullptr);
100
101 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
102 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
103 CostKind, i, nullptr, nullptr);
104 }
105 return Cost;
106 }
107
108 /// Estimate a cost of shuffle as a sequence of extract and insert
109 /// operations.
111 getPermuteShuffleOverhead(FixedVectorType *VTy,
114 // Shuffle cost is equal to the cost of extracting element from its argument
115 // plus the cost of inserting them onto the result vector.
116
117 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
118 // index 0 of first vector, index 1 of second vector,index 2 of first
119 // vector and finally index 3 of second vector and insert them at index
120 // <0,1,2,3> of result vector.
121 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
122 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
123 CostKind, i, nullptr, nullptr);
124 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
125 CostKind, i, nullptr, nullptr);
126 }
127 return Cost;
128 }
129
130 /// Estimate a cost of subvector extraction as a sequence of extract and
131 /// insert operations.
132 InstructionCost getExtractSubvectorOverhead(VectorType *VTy,
134 int Index,
135 FixedVectorType *SubVTy) const {
136 assert(VTy && SubVTy &&
137 "Can only extract subvectors from vectors");
138 int NumSubElts = SubVTy->getNumElements();
140 (Index + NumSubElts) <=
142 "SK_ExtractSubvector index out of range");
143
145 // Subvector extraction cost is equal to the cost of extracting element from
146 // the source type plus the cost of inserting them into the result vector
147 // type.
148 for (int i = 0; i != NumSubElts; ++i) {
149 Cost +=
150 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
151 CostKind, i + Index, nullptr, nullptr);
152 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
153 CostKind, i, nullptr, nullptr);
154 }
155 return Cost;
156 }
157
158 /// Estimate a cost of subvector insertion as a sequence of extract and
159 /// insert operations.
160 InstructionCost getInsertSubvectorOverhead(VectorType *VTy,
162 int Index,
163 FixedVectorType *SubVTy) const {
164 assert(VTy && SubVTy &&
165 "Can only insert subvectors into vectors");
166 int NumSubElts = SubVTy->getNumElements();
168 (Index + NumSubElts) <=
170 "SK_InsertSubvector index out of range");
171
173 // Subvector insertion cost is equal to the cost of extracting element from
174 // the source type plus the cost of inserting them into the result vector
175 // type.
176 for (int i = 0; i != NumSubElts; ++i) {
177 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
178 CostKind, i, nullptr, nullptr);
179 Cost +=
180 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, CostKind,
181 i + Index, nullptr, nullptr);
182 }
183 return Cost;
184 }
185
186 /// Local query method delegates up to T which *must* implement this!
187 const TargetSubtargetInfo *getST() const {
188 return static_cast<const T *>(this)->getST();
189 }
190
191 /// Local query method delegates up to T which *must* implement this!
192 const TargetLoweringBase *getTLI() const {
193 return static_cast<const T *>(this)->getTLI();
194 }
195
196 static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
197 switch (M) {
199 return ISD::UNINDEXED;
200 case TTI::MIM_PreInc:
201 return ISD::PRE_INC;
202 case TTI::MIM_PreDec:
203 return ISD::PRE_DEC;
204 case TTI::MIM_PostInc:
205 return ISD::POST_INC;
206 case TTI::MIM_PostDec:
207 return ISD::POST_DEC;
208 }
209 llvm_unreachable("Unexpected MemIndexedMode");
210 }
211
212 InstructionCost getCommonMaskedMemoryOpCost(unsigned Opcode, Type *DataTy,
213 Align Alignment,
214 bool VariableMask,
215 bool IsGatherScatter,
217 unsigned AddressSpace = 0) const {
218 // We cannot scalarize scalable vectors, so return Invalid.
219 if (isa<ScalableVectorType>(DataTy))
221
222 auto *VT = cast<FixedVectorType>(DataTy);
223 unsigned VF = VT->getNumElements();
224
225 // Assume the target does not have support for gather/scatter operations
226 // and provide a rough estimate.
227 //
228 // First, compute the cost of the individual memory operations.
229 InstructionCost AddrExtractCost =
230 IsGatherScatter ? getScalarizationOverhead(
232 PointerType::get(VT->getContext(), 0), VF),
233 /*Insert=*/false, /*Extract=*/true, CostKind)
234 : 0;
235
236 // The cost of the scalar loads/stores.
237 InstructionCost MemoryOpCost =
238 VF * thisT()->getMemoryOpCost(Opcode, VT->getElementType(), Alignment,
240
241 // Next, compute the cost of packing the result in a vector.
242 InstructionCost PackingCost =
243 getScalarizationOverhead(VT, Opcode != Instruction::Store,
244 Opcode == Instruction::Store, CostKind);
245
246 InstructionCost ConditionalCost = 0;
247 if (VariableMask) {
248 // Compute the cost of conditionally executing the memory operations with
249 // variable masks. This includes extracting the individual conditions, a
250 // branches and PHIs to combine the results.
251 // NOTE: Estimating the cost of conditionally executing the memory
252 // operations accurately is quite difficult and the current solution
253 // provides a very rough estimate only.
254 ConditionalCost =
257 /*Insert=*/false, /*Extract=*/true, CostKind) +
258 VF * (thisT()->getCFInstrCost(Instruction::Br, CostKind) +
259 thisT()->getCFInstrCost(Instruction::PHI, CostKind));
260 }
261
262 return AddrExtractCost + MemoryOpCost + PackingCost + ConditionalCost;
263 }
264
265 /// Checks if the provided mask \p is a splat mask, i.e. it contains only -1
266 /// or same non -1 index value and this index value contained at least twice.
267 /// So, mask <0, -1,-1, -1> is not considered splat (it is just identity),
268 /// same for <-1, 0, -1, -1> (just a slide), while <2, -1, 2, -1> is a splat
269 /// with \p Index=2.
270 static bool isSplatMask(ArrayRef<int> Mask, unsigned NumSrcElts, int &Index) {
271 // Check that the broadcast index meets at least twice.
272 bool IsCompared = false;
273 if (int SplatIdx = PoisonMaskElem;
274 all_of(enumerate(Mask), [&](const auto &P) {
275 if (P.value() == PoisonMaskElem)
276 return P.index() != Mask.size() - 1 || IsCompared;
277 if (static_cast<unsigned>(P.value()) >= NumSrcElts * 2)
278 return false;
279 if (SplatIdx == PoisonMaskElem) {
280 SplatIdx = P.value();
281 return P.index() != Mask.size() - 1;
282 }
283 IsCompared = true;
284 return SplatIdx == P.value();
285 })) {
286 Index = SplatIdx;
287 return true;
288 }
289 return false;
290 }
291
292 /// Several intrinsics that return structs (including llvm.sincos[pi] and
293 /// llvm.modf) can be lowered to a vector library call (for certain VFs). The
294 /// vector library functions correspond to the scalar calls (e.g. sincos or
295 /// modf), which unlike the intrinsic return values via output pointers. This
296 /// helper checks if a vector call exists for the given intrinsic, and returns
297 /// the cost, which includes the cost of the mask (if required), and the loads
298 /// for values returned via output pointers. \p LC is the scalar libcall and
299 /// \p CallRetElementIndex (optional) is the struct element which is mapped to
300 /// the call return value. If std::nullopt is returned, then no vector library
301 /// call is available, so the intrinsic should be assigned the default cost
302 /// (e.g. scalarization).
303 std::optional<InstructionCost> getMultipleResultIntrinsicVectorLibCallCost(
305 std::optional<unsigned> CallRetElementIndex = {}) const {
306 Type *RetTy = ICA.getReturnType();
307 // Vector variants of the intrinsic can be mapped to a vector library call.
308 auto const *LibInfo = ICA.getLibInfo();
309 if (!LibInfo || !isa<StructType>(RetTy) ||
311 return std::nullopt;
312
313 Type *Ty = getContainedTypes(RetTy).front();
314 EVT VT = getTLI()->getValueType(DL, Ty);
315
316 EVT ScalarVT = VT.getScalarType();
317 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
318
319 switch (ICA.getID()) {
320 case Intrinsic::modf:
321 LC = RTLIB::getMODF(ScalarVT);
322 break;
323 case Intrinsic::sincospi:
324 LC = RTLIB::getSINCOSPI(ScalarVT);
325 break;
326 case Intrinsic::sincos:
327 LC = RTLIB::getSINCOS(ScalarVT);
328 break;
329 default:
330 return std::nullopt;
331 }
332
333 // Find associated libcall.
334 RTLIB::LibcallImpl LibcallImpl = getTLI()->getLibcallImpl(LC);
335 if (LibcallImpl == RTLIB::Unsupported)
336 return std::nullopt;
337
338 StringRef LCName =
340
341 // Search for a corresponding vector variant.
342 //
343 // FIXME: Should use RuntimeLibcallsInfo, not TargetLibraryInfo to get the
344 // vector mapping.
345 LLVMContext &Ctx = RetTy->getContext();
347 VecDesc const *VD = nullptr;
348 for (bool Masked : {false, true}) {
349 if ((VD = LibInfo->getVectorMappingInfo(LCName, VF, Masked)))
350 break;
351 }
352 if (!VD)
353 return std::nullopt;
354
355 // Cost the call + mask.
356 auto Cost =
357 thisT()->getCallInstrCost(nullptr, RetTy, ICA.getArgTypes(), CostKind);
358 if (VD->isMasked()) {
359 auto VecTy = VectorType::get(IntegerType::getInt1Ty(Ctx), VF);
360 Cost += thisT()->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy,
361 VecTy, {}, CostKind, 0, nullptr, {});
362 }
363
364 // Lowering to a library call (with output pointers) may require us to emit
365 // reloads for the results.
366 for (auto [Idx, VectorTy] : enumerate(getContainedTypes(RetTy))) {
367 if (Idx == CallRetElementIndex)
368 continue;
369 Cost += thisT()->getMemoryOpCost(
370 Instruction::Load, VectorTy,
371 thisT()->getDataLayout().getABITypeAlign(VectorTy), 0, CostKind);
372 }
373 return Cost;
374 }
375
376 /// Filter out constant and duplicated entries in \p Ops and return a vector
377 /// containing the types from \p Tys corresponding to the remaining operands.
379 filterConstantAndDuplicatedOperands(ArrayRef<const Value *> Ops,
380 ArrayRef<Type *> Tys) {
381 SmallPtrSet<const Value *, 4> UniqueOperands;
382 SmallVector<Type *, 4> FilteredTys;
383 for (const auto &[Op, Ty] : zip_equal(Ops, Tys)) {
384 if (isa<Constant>(Op) || !UniqueOperands.insert(Op).second)
385 continue;
386 FilteredTys.push_back(Ty);
387 }
388 return FilteredTys;
389 }
390
391protected:
392 explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
393 : BaseT(DL) {}
394 ~BasicTTIImplBase() override = default;
395
397
398public:
399 /// \name Scalar TTI Implementations
400 /// @{
402 unsigned AddressSpace, Align Alignment,
403 unsigned *Fast) const override {
404 EVT E = EVT::getIntegerVT(Context, BitWidth);
405 return getTLI()->allowsMisalignedMemoryAccesses(
407 }
408
409 bool areInlineCompatible(const Function *Caller,
410 const Function *Callee) const override {
411 const TargetMachine &TM = getTLI()->getTargetMachine();
412
413 const FeatureBitset &CallerBits =
414 TM.getSubtargetImpl(*Caller)->getFeatureBits();
415 const FeatureBitset &CalleeBits =
416 TM.getSubtargetImpl(*Callee)->getFeatureBits();
417
418 // Inline a callee if its target-features are a subset of the callers
419 // target-features.
420 return (CallerBits & CalleeBits) == CalleeBits;
421 }
422
423 bool hasBranchDivergence(const Function *F = nullptr) const override {
424 return false;
425 }
426
427 bool isSourceOfDivergence(const Value *V) const override { return false; }
428
429 bool isAlwaysUniform(const Value *V) const override { return false; }
430
431 bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override {
432 return false;
433 }
434
435 bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override {
436 return true;
437 }
438
439 unsigned getFlatAddressSpace() const override {
440 // Return an invalid address space.
441 return -1;
442 }
443
445 Intrinsic::ID IID) const override {
446 return false;
447 }
448
449 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override {
450 return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
451 }
452
453 unsigned getAssumedAddrSpace(const Value *V) const override {
454 return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
455 }
456
457 bool isSingleThreaded() const override {
458 return getTLI()->getTargetMachine().Options.ThreadModel ==
460 }
461
462 std::pair<const Value *, unsigned>
463 getPredicatedAddrSpace(const Value *V) const override {
464 return getTLI()->getTargetMachine().getPredicatedAddrSpace(V);
465 }
466
468 Value *NewV) const override {
469 return nullptr;
470 }
471
472 bool isLegalAddImmediate(int64_t imm) const override {
473 return getTLI()->isLegalAddImmediate(imm);
474 }
475
476 bool isLegalAddScalableImmediate(int64_t Imm) const override {
477 return getTLI()->isLegalAddScalableImmediate(Imm);
478 }
479
480 bool isLegalICmpImmediate(int64_t imm) const override {
481 return getTLI()->isLegalICmpImmediate(imm);
482 }
483
484 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
485 bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
486 Instruction *I = nullptr,
487 int64_t ScalableOffset = 0) const override {
489 AM.BaseGV = BaseGV;
490 AM.BaseOffs = BaseOffset;
491 AM.HasBaseReg = HasBaseReg;
492 AM.Scale = Scale;
493 AM.ScalableOffset = ScalableOffset;
494 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
495 }
496
497 int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) {
498 return getTLI()->getPreferredLargeGEPBaseOffset(MinOffset, MaxOffset);
499 }
500
501 unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
502 Type *ScalarValTy) const override {
503 auto &&IsSupportedByTarget = [this, ScalarMemTy, ScalarValTy](unsigned VF) {
504 auto *SrcTy = FixedVectorType::get(ScalarMemTy, VF / 2);
505 EVT VT = getTLI()->getValueType(DL, SrcTy);
506 if (getTLI()->isOperationLegal(ISD::STORE, VT) ||
507 getTLI()->isOperationCustom(ISD::STORE, VT))
508 return true;
509
510 EVT ValVT =
511 getTLI()->getValueType(DL, FixedVectorType::get(ScalarValTy, VF / 2));
512 EVT LegalizedVT =
513 getTLI()->getTypeToTransformTo(ScalarMemTy->getContext(), VT);
514 return getTLI()->isTruncStoreLegal(LegalizedVT, ValVT);
515 };
516 while (VF > 2 && IsSupportedByTarget(VF))
517 VF /= 2;
518 return VF;
519 }
520
521 bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty) const override {
522 EVT VT = getTLI()->getValueType(DL, Ty, /*AllowUnknown=*/true);
523 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
524 }
525
526 bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty) const override {
527 EVT VT = getTLI()->getValueType(DL, Ty, /*AllowUnknown=*/true);
528 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
529 }
530
532 const TTI::LSRCost &C2) const override {
534 }
535
539
543
547
549 StackOffset BaseOffset, bool HasBaseReg,
550 int64_t Scale,
551 unsigned AddrSpace) const override {
553 AM.BaseGV = BaseGV;
554 AM.BaseOffs = BaseOffset.getFixed();
555 AM.HasBaseReg = HasBaseReg;
556 AM.Scale = Scale;
557 AM.ScalableOffset = BaseOffset.getScalable();
558 if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace))
559 return 0;
561 }
562
563 bool isTruncateFree(Type *Ty1, Type *Ty2) const override {
564 return getTLI()->isTruncateFree(Ty1, Ty2);
565 }
566
567 bool isProfitableToHoist(Instruction *I) const override {
568 return getTLI()->isProfitableToHoist(I);
569 }
570
571 bool useAA() const override { return getST()->useAA(); }
572
573 bool isTypeLegal(Type *Ty) const override {
574 EVT VT = getTLI()->getValueType(DL, Ty, /*AllowUnknown=*/true);
575 return getTLI()->isTypeLegal(VT);
576 }
577
578 unsigned getRegUsageForType(Type *Ty) const override {
579 EVT ETy = getTLI()->getValueType(DL, Ty);
580 return getTLI()->getNumRegisters(Ty->getContext(), ETy);
581 }
582
584 ArrayRef<const Value *> Operands, Type *AccessType,
585 TTI::TargetCostKind CostKind) const override {
586 return BaseT::getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind);
587 }
588
590 const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI,
591 BlockFrequencyInfo *BFI) const override {
592 /// Try to find the estimated number of clusters. Note that the number of
593 /// clusters identified in this function could be different from the actual
594 /// numbers found in lowering. This function ignore switches that are
595 /// lowered with a mix of jump table / bit test / BTree. This function was
596 /// initially intended to be used when estimating the cost of switch in
597 /// inline cost heuristic, but it's a generic cost model to be used in other
598 /// places (e.g., in loop unrolling).
599 unsigned N = SI.getNumCases();
600 const TargetLoweringBase *TLI = getTLI();
601 const DataLayout &DL = this->getDataLayout();
602
603 JumpTableSize = 0;
604 bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
605
606 // Early exit if both a jump table and bit test are not allowed.
607 if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
608 return N;
609
610 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
611 APInt MinCaseVal = MaxCaseVal;
612 for (auto CI : SI.cases()) {
613 const APInt &CaseVal = CI.getCaseValue()->getValue();
614 if (CaseVal.sgt(MaxCaseVal))
615 MaxCaseVal = CaseVal;
616 if (CaseVal.slt(MinCaseVal))
617 MinCaseVal = CaseVal;
618 }
619
620 // Check if suitable for a bit test
621 if (N <= DL.getIndexSizeInBits(0u)) {
623 for (auto I : SI.cases()) {
624 const BasicBlock *BB = I.getCaseSuccessor();
625 ++DestMap[BB];
626 }
627
628 if (TLI->isSuitableForBitTests(DestMap, MinCaseVal, MaxCaseVal, DL))
629 return 1;
630 }
631
632 // Check if suitable for a jump table.
633 if (IsJTAllowed) {
634 if (N < 2 || N < TLI->getMinimumJumpTableEntries())
635 return N;
637 (MaxCaseVal - MinCaseVal)
638 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
639 // Check whether a range of clusters is dense enough for a jump table
640 if (TLI->isSuitableForJumpTable(&SI, N, Range, PSI, BFI)) {
641 JumpTableSize = Range;
642 return 1;
643 }
644 }
645 return N;
646 }
647
648 bool shouldBuildLookupTables() const override {
649 const TargetLoweringBase *TLI = getTLI();
650 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
651 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
652 }
653
654 bool shouldBuildRelLookupTables() const override {
655 const TargetMachine &TM = getTLI()->getTargetMachine();
656 // If non-PIC mode, do not generate a relative lookup table.
657 if (!TM.isPositionIndependent())
658 return false;
659
660 /// Relative lookup table entries consist of 32-bit offsets.
661 /// Do not generate relative lookup tables for large code models
662 /// in 64-bit achitectures where 32-bit offsets might not be enough.
663 if (TM.getCodeModel() == CodeModel::Medium ||
664 TM.getCodeModel() == CodeModel::Large)
665 return false;
666
667 const Triple &TargetTriple = TM.getTargetTriple();
668 if (!TargetTriple.isArch64Bit())
669 return false;
670
671 // TODO: Triggers issues on aarch64 on darwin, so temporarily disable it
672 // there.
673 if (TargetTriple.getArch() == Triple::aarch64 && TargetTriple.isOSDarwin())
674 return false;
675
676 return true;
677 }
678
679 bool haveFastSqrt(Type *Ty) const override {
680 const TargetLoweringBase *TLI = getTLI();
681 EVT VT = TLI->getValueType(DL, Ty);
682 return TLI->isTypeLegal(VT) &&
683 TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
684 }
685
686 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override { return true; }
687
688 InstructionCost getFPOpCost(Type *Ty) const override {
689 // Check whether FADD is available, as a proxy for floating-point in
690 // general.
691 const TargetLoweringBase *TLI = getTLI();
692 EVT VT = TLI->getValueType(DL, Ty);
696 }
697
699 const Function &Fn) const override {
700 switch (Inst.getOpcode()) {
701 default:
702 break;
703 case Instruction::SDiv:
704 case Instruction::SRem:
705 case Instruction::UDiv:
706 case Instruction::URem: {
707 if (!isa<ConstantInt>(Inst.getOperand(1)))
708 return false;
709 EVT VT = getTLI()->getValueType(DL, Inst.getType());
710 return !getTLI()->isIntDivCheap(VT, Fn.getAttributes());
711 }
712 };
713
714 return false;
715 }
716
717 unsigned getInliningThresholdMultiplier() const override { return 1; }
718 unsigned adjustInliningThreshold(const CallBase *CB) const override {
719 return 0;
720 }
721 unsigned getCallerAllocaCost(const CallBase *CB,
722 const AllocaInst *AI) const override {
723 return 0;
724 }
725
726 int getInlinerVectorBonusPercent() const override { return 150; }
727
730 OptimizationRemarkEmitter *ORE) const override {
731 // This unrolling functionality is target independent, but to provide some
732 // motivation for its intended use, for x86:
733
734 // According to the Intel 64 and IA-32 Architectures Optimization Reference
735 // Manual, Intel Core models and later have a loop stream detector (and
736 // associated uop queue) that can benefit from partial unrolling.
737 // The relevant requirements are:
738 // - The loop must have no more than 4 (8 for Nehalem and later) branches
739 // taken, and none of them may be calls.
740 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
741
742 // According to the Software Optimization Guide for AMD Family 15h
743 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
744 // and loop buffer which can benefit from partial unrolling.
745 // The relevant requirements are:
746 // - The loop must have fewer than 16 branches
747 // - The loop must have less than 40 uops in all executed loop branches
748
749 // The number of taken branches in a loop is hard to estimate here, and
750 // benchmarking has revealed that it is better not to be conservative when
751 // estimating the branch count. As a result, we'll ignore the branch limits
752 // until someone finds a case where it matters in practice.
753
754 unsigned MaxOps;
755 const TargetSubtargetInfo *ST = getST();
756 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
758 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
759 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
760 else
761 return;
762
763 // Scan the loop: don't unroll loops with calls.
764 for (BasicBlock *BB : L->blocks()) {
765 for (Instruction &I : *BB) {
766 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
767 if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
768 if (!thisT()->isLoweredToCall(F))
769 continue;
770 }
771
772 if (ORE) {
773 ORE->emit([&]() {
774 return OptimizationRemark("TTI", "DontUnroll", L->getStartLoc(),
775 L->getHeader())
776 << "advising against unrolling the loop because it "
777 "contains a "
778 << ore::NV("Call", &I);
779 });
780 }
781 return;
782 }
783 }
784 }
785
786 // Enable runtime and partial unrolling up to the specified size.
787 // Enable using trip count upper bound to unroll loops.
788 UP.Partial = UP.Runtime = UP.UpperBound = true;
789 UP.PartialThreshold = MaxOps;
790
791 // Avoid unrolling when optimizing for size.
792 UP.OptSizeThreshold = 0;
794
795 // Set number of instructions optimized when "back edge"
796 // becomes "fall through" to default value of 2.
797 UP.BEInsns = 2;
798 }
799
801 TTI::PeelingPreferences &PP) const override {
802 PP.PeelCount = 0;
803 PP.AllowPeeling = true;
804 PP.AllowLoopNestsPeeling = false;
805 PP.PeelProfiledIterations = true;
806 }
807
810 HardwareLoopInfo &HWLoopInfo) const override {
811 return BaseT::isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
812 }
813
814 unsigned getEpilogueVectorizationMinVF() const override {
816 }
817
820 }
821
823 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) const override {
824 return BaseT::getPreferredTailFoldingStyle(IVUpdateMayOverflow);
825 }
826
827 std::optional<Instruction *>
830 }
831
832 std::optional<Value *>
834 APInt DemandedMask, KnownBits &Known,
835 bool &KnownBitsComputed) const override {
836 return BaseT::simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
837 KnownBitsComputed);
838 }
839
841 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
842 APInt &UndefElts2, APInt &UndefElts3,
843 std::function<void(Instruction *, unsigned, APInt, APInt &)>
844 SimplifyAndSetOp) const override {
846 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
847 SimplifyAndSetOp);
848 }
849
850 std::optional<unsigned>
852 return std::optional<unsigned>(
853 getST()->getCacheSize(static_cast<unsigned>(Level)));
854 }
855
856 std::optional<unsigned>
858 std::optional<unsigned> TargetResult =
859 getST()->getCacheAssociativity(static_cast<unsigned>(Level));
860
861 if (TargetResult)
862 return TargetResult;
863
864 return BaseT::getCacheAssociativity(Level);
865 }
866
867 unsigned getCacheLineSize() const override {
868 return getST()->getCacheLineSize();
869 }
870
871 unsigned getPrefetchDistance() const override {
872 return getST()->getPrefetchDistance();
873 }
874
875 unsigned getMinPrefetchStride(unsigned NumMemAccesses,
876 unsigned NumStridedMemAccesses,
877 unsigned NumPrefetches,
878 bool HasCall) const override {
879 return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
880 NumPrefetches, HasCall);
881 }
882
883 unsigned getMaxPrefetchIterationsAhead() const override {
884 return getST()->getMaxPrefetchIterationsAhead();
885 }
886
887 bool enableWritePrefetching() const override {
888 return getST()->enableWritePrefetching();
889 }
890
891 bool shouldPrefetchAddressSpace(unsigned AS) const override {
892 return getST()->shouldPrefetchAddressSpace(AS);
893 }
894
895 /// @}
896
897 /// \name Vector TTI Implementations
898 /// @{
899
904
905 std::optional<unsigned> getMaxVScale() const override { return std::nullopt; }
906 std::optional<unsigned> getVScaleForTuning() const override {
907 return std::nullopt;
908 }
909 bool isVScaleKnownToBeAPowerOfTwo() const override { return false; }
910
911 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
912 /// are set if the demanded result elements need to be inserted and/or
913 /// extracted from vectors.
915 VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract,
916 TTI::TargetCostKind CostKind, bool ForPoisonSrc = true,
917 ArrayRef<Value *> VL = {}) const override {
918 /// FIXME: a bitfield is not a reasonable abstraction for talking about
919 /// which elements are needed from a scalable vector
920 if (isa<ScalableVectorType>(InTy))
922 auto *Ty = cast<FixedVectorType>(InTy);
923
924 assert(DemandedElts.getBitWidth() == Ty->getNumElements() &&
925 (VL.empty() || VL.size() == Ty->getNumElements()) &&
926 "Vector size mismatch");
927
929
930 for (int i = 0, e = Ty->getNumElements(); i < e; ++i) {
931 if (!DemandedElts[i])
932 continue;
933 if (Insert) {
934 Value *InsertedVal = VL.empty() ? nullptr : VL[i];
935 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
936 CostKind, i, nullptr, InsertedVal);
937 }
938 if (Extract)
939 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
940 CostKind, i, nullptr, nullptr);
941 }
942
943 return Cost;
944 }
945
947 return false;
948 }
949
950 bool
952 unsigned ScalarOpdIdx) const override {
953 return false;
954 }
955
957 int OpdIdx) const override {
958 return OpdIdx == -1;
959 }
960
961 bool
963 int RetIdx) const override {
964 return RetIdx == 0;
965 }
966
967 /// Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
969 bool Extract,
971 if (isa<ScalableVectorType>(InTy))
973 auto *Ty = cast<FixedVectorType>(InTy);
974
975 APInt DemandedElts = APInt::getAllOnes(Ty->getNumElements());
976 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
977 CostKind);
978 }
979
980 /// Estimate the overhead of scalarizing an instruction's
981 /// operands. The (potentially vector) types to use for each of
982 /// argument are passes via Tys.
984 ArrayRef<Type *> Tys, TTI::TargetCostKind CostKind) const override {
986 for (Type *Ty : Tys) {
987 // Disregard things like metadata arguments.
988 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
989 !Ty->isPtrOrPtrVectorTy())
990 continue;
991
992 if (auto *VecTy = dyn_cast<VectorType>(Ty))
993 Cost += getScalarizationOverhead(VecTy, /*Insert*/ false,
994 /*Extract*/ true, CostKind);
995 }
996
997 return Cost;
998 }
999
1000 /// Estimate the overhead of scalarizing the inputs and outputs of an
1001 /// instruction, with return type RetTy and arguments Args of type Tys. If
1002 /// Args are unknown (empty), then the cost associated with one argument is
1003 /// added as a heuristic.
1006 ArrayRef<Type *> Tys,
1009 RetTy, /*Insert*/ true, /*Extract*/ false, CostKind);
1010 if (!Args.empty())
1012 filterConstantAndDuplicatedOperands(Args, Tys), CostKind);
1013 else
1014 // When no information on arguments is provided, we add the cost
1015 // associated with one argument as a heuristic.
1016 Cost += getScalarizationOverhead(RetTy, /*Insert*/ false,
1017 /*Extract*/ true, CostKind);
1018
1019 return Cost;
1020 }
1021
1022 /// Estimate the cost of type-legalization and the legalized type.
1023 std::pair<InstructionCost, MVT> getTypeLegalizationCost(Type *Ty) const {
1024 LLVMContext &C = Ty->getContext();
1025 EVT MTy = getTLI()->getValueType(DL, Ty);
1026
1028 // We keep legalizing the type until we find a legal kind. We assume that
1029 // the only operation that costs anything is the split. After splitting
1030 // we need to handle two types.
1031 while (true) {
1032 TargetLoweringBase::LegalizeKind LK = getTLI()->getTypeConversion(C, MTy);
1033
1035 // Ensure we return a sensible simple VT here, since many callers of
1036 // this function require it.
1037 MVT VT = MTy.isSimple() ? MTy.getSimpleVT() : MVT::i64;
1038 return std::make_pair(InstructionCost::getInvalid(), VT);
1039 }
1040
1041 if (LK.first == TargetLoweringBase::TypeLegal)
1042 return std::make_pair(Cost, MTy.getSimpleVT());
1043
1044 if (LK.first == TargetLoweringBase::TypeSplitVector ||
1046 Cost *= 2;
1047
1048 // Do not loop with f128 type.
1049 if (MTy == LK.second)
1050 return std::make_pair(Cost, MTy.getSimpleVT());
1051
1052 // Keep legalizing the type.
1053 MTy = LK.second;
1054 }
1055 }
1056
1057 unsigned getMaxInterleaveFactor(ElementCount VF) const override { return 1; }
1058
1060 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
1063 ArrayRef<const Value *> Args = {},
1064 const Instruction *CxtI = nullptr) const override {
1065 // Check if any of the operands are vector operands.
1066 const TargetLoweringBase *TLI = getTLI();
1067 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1068 assert(ISD && "Invalid opcode");
1069
1070 // TODO: Handle more cost kinds.
1072 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind,
1073 Opd1Info, Opd2Info,
1074 Args, CxtI);
1075
1076 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
1077
1078 bool IsFloat = Ty->isFPOrFPVectorTy();
1079 // Assume that floating point arithmetic operations cost twice as much as
1080 // integer operations.
1081 InstructionCost OpCost = (IsFloat ? 2 : 1);
1082
1083 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
1084 // The operation is legal. Assume it costs 1.
1085 // TODO: Once we have extract/insert subvector cost we need to use them.
1086 return LT.first * OpCost;
1087 }
1088
1089 if (!TLI->isOperationExpand(ISD, LT.second)) {
1090 // If the operation is custom lowered, then assume that the code is twice
1091 // as expensive.
1092 return LT.first * 2 * OpCost;
1093 }
1094
1095 // An 'Expand' of URem and SRem is special because it may default
1096 // to expanding the operation into a sequence of sub-operations
1097 // i.e. X % Y -> X-(X/Y)*Y.
1098 if (ISD == ISD::UREM || ISD == ISD::SREM) {
1099 bool IsSigned = ISD == ISD::SREM;
1100 if (TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIVREM : ISD::UDIVREM,
1101 LT.second) ||
1102 TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIV : ISD::UDIV,
1103 LT.second)) {
1104 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
1105 InstructionCost DivCost = thisT()->getArithmeticInstrCost(
1106 DivOpc, Ty, CostKind, Opd1Info, Opd2Info);
1107 InstructionCost MulCost =
1108 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty, CostKind);
1109 InstructionCost SubCost =
1110 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty, CostKind);
1111 return DivCost + MulCost + SubCost;
1112 }
1113 }
1114
1115 // We cannot scalarize scalable vectors, so return Invalid.
1118
1119 // Else, assume that we need to scalarize this op.
1120 // TODO: If one of the types get legalized by splitting, handle this
1121 // similarly to what getCastInstrCost() does.
1122 if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
1123 InstructionCost Cost = thisT()->getArithmeticInstrCost(
1124 Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info,
1125 Args, CxtI);
1126 // Return the cost of multiple scalar invocation plus the cost of
1127 // inserting and extracting the values.
1128 SmallVector<Type *> Tys(Args.size(), Ty);
1129 return getScalarizationOverhead(VTy, Args, Tys, CostKind) +
1130 VTy->getNumElements() * Cost;
1131 }
1132
1133 // We don't know anything about this scalar instruction.
1134 return OpCost;
1135 }
1136
1138 ArrayRef<int> Mask,
1139 VectorType *SrcTy, int &Index,
1140 VectorType *&SubTy) const {
1141 if (Mask.empty())
1142 return Kind;
1143 int NumDstElts = Mask.size();
1144 int NumSrcElts = SrcTy->getElementCount().getKnownMinValue();
1145 switch (Kind) {
1147 if (ShuffleVectorInst::isReverseMask(Mask, NumSrcElts))
1148 return TTI::SK_Reverse;
1149 if (ShuffleVectorInst::isZeroEltSplatMask(Mask, NumSrcElts))
1150 return TTI::SK_Broadcast;
1151 if (isSplatMask(Mask, NumSrcElts, Index))
1152 return TTI::SK_Broadcast;
1153 if (ShuffleVectorInst::isExtractSubvectorMask(Mask, NumSrcElts, Index) &&
1154 (Index + NumDstElts) <= NumSrcElts) {
1155 SubTy = FixedVectorType::get(SrcTy->getElementType(), NumDstElts);
1157 }
1158 break;
1159 }
1160 case TTI::SK_PermuteTwoSrc: {
1161 if (all_of(Mask, [NumSrcElts](int M) { return M < NumSrcElts; }))
1163 Index, SubTy);
1164 int NumSubElts;
1165 if (NumDstElts > 2 && ShuffleVectorInst::isInsertSubvectorMask(
1166 Mask, NumSrcElts, NumSubElts, Index)) {
1167 if (Index + NumSubElts > NumSrcElts)
1168 return Kind;
1169 SubTy = FixedVectorType::get(SrcTy->getElementType(), NumSubElts);
1171 }
1172 if (ShuffleVectorInst::isSelectMask(Mask, NumSrcElts))
1173 return TTI::SK_Select;
1174 if (ShuffleVectorInst::isTransposeMask(Mask, NumSrcElts))
1175 return TTI::SK_Transpose;
1176 if (ShuffleVectorInst::isSpliceMask(Mask, NumSrcElts, Index))
1177 return TTI::SK_Splice;
1178 break;
1179 }
1180 case TTI::SK_Select:
1181 case TTI::SK_Reverse:
1182 case TTI::SK_Broadcast:
1183 case TTI::SK_Transpose:
1186 case TTI::SK_Splice:
1187 break;
1188 }
1189 return Kind;
1190 }
1191
1195 VectorType *SubTp, ArrayRef<const Value *> Args = {},
1196 const Instruction *CxtI = nullptr) const override {
1197 switch (improveShuffleKindFromMask(Kind, Mask, SrcTy, Index, SubTp)) {
1198 case TTI::SK_Broadcast:
1199 if (auto *FVT = dyn_cast<FixedVectorType>(SrcTy))
1200 return getBroadcastShuffleOverhead(FVT, CostKind);
1202 case TTI::SK_Select:
1203 case TTI::SK_Splice:
1204 case TTI::SK_Reverse:
1205 case TTI::SK_Transpose:
1208 if (auto *FVT = dyn_cast<FixedVectorType>(SrcTy))
1209 return getPermuteShuffleOverhead(FVT, CostKind);
1212 return getExtractSubvectorOverhead(SrcTy, CostKind, Index,
1213 cast<FixedVectorType>(SubTp));
1215 return getInsertSubvectorOverhead(DstTy, CostKind, Index,
1216 cast<FixedVectorType>(SubTp));
1217 }
1218 llvm_unreachable("Unknown TTI::ShuffleKind");
1219 }
1220
1222 getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1224 const Instruction *I = nullptr) const override {
1225 if (BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I) == 0)
1226 return 0;
1227
1228 const TargetLoweringBase *TLI = getTLI();
1229 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1230 assert(ISD && "Invalid opcode");
1231 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Src);
1232 std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(Dst);
1233
1234 TypeSize SrcSize = SrcLT.second.getSizeInBits();
1235 TypeSize DstSize = DstLT.second.getSizeInBits();
1236 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
1237 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
1238
1239 switch (Opcode) {
1240 default:
1241 break;
1242 case Instruction::Trunc:
1243 // Check for NOOP conversions.
1244 if (TLI->isTruncateFree(SrcLT.second, DstLT.second))
1245 return 0;
1246 [[fallthrough]];
1247 case Instruction::BitCast:
1248 // Bitcast between types that are legalized to the same type are free and
1249 // assume int to/from ptr of the same size is also free.
1250 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
1251 SrcSize == DstSize)
1252 return 0;
1253 break;
1254 case Instruction::FPExt:
1255 if (I && getTLI()->isExtFree(I))
1256 return 0;
1257 break;
1258 case Instruction::ZExt:
1259 if (TLI->isZExtFree(SrcLT.second, DstLT.second))
1260 return 0;
1261 [[fallthrough]];
1262 case Instruction::SExt:
1263 if (I && getTLI()->isExtFree(I))
1264 return 0;
1265
1266 // If this is a zext/sext of a load, return 0 if the corresponding
1267 // extending load exists on target and the result type is legal.
1268 if (CCH == TTI::CastContextHint::Normal) {
1269 EVT ExtVT = EVT::getEVT(Dst);
1270 EVT LoadVT = EVT::getEVT(Src);
1271 unsigned LType =
1272 ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD);
1273 if (DstLT.first == SrcLT.first &&
1274 TLI->isLoadExtLegal(LType, ExtVT, LoadVT))
1275 return 0;
1276 }
1277 break;
1278 case Instruction::AddrSpaceCast:
1279 if (TLI->isFreeAddrSpaceCast(Src->getPointerAddressSpace(),
1280 Dst->getPointerAddressSpace()))
1281 return 0;
1282 break;
1283 }
1284
1285 auto *SrcVTy = dyn_cast<VectorType>(Src);
1286 auto *DstVTy = dyn_cast<VectorType>(Dst);
1287
1288 // If the cast is marked as legal (or promote) then assume low cost.
1289 if (SrcLT.first == DstLT.first &&
1290 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
1291 return SrcLT.first;
1292
1293 // Handle scalar conversions.
1294 if (!SrcVTy && !DstVTy) {
1295 // Just check the op cost. If the operation is legal then assume it costs
1296 // 1.
1297 if (!TLI->isOperationExpand(ISD, DstLT.second))
1298 return 1;
1299
1300 // Assume that illegal scalar instruction are expensive.
1301 return 4;
1302 }
1303
1304 // Check vector-to-vector casts.
1305 if (DstVTy && SrcVTy) {
1306 // If the cast is between same-sized registers, then the check is simple.
1307 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
1308
1309 // Assume that Zext is done using AND.
1310 if (Opcode == Instruction::ZExt)
1311 return SrcLT.first;
1312
1313 // Assume that sext is done using SHL and SRA.
1314 if (Opcode == Instruction::SExt)
1315 return SrcLT.first * 2;
1316
1317 // Just check the op cost. If the operation is legal then assume it
1318 // costs
1319 // 1 and multiply by the type-legalization overhead.
1320 if (!TLI->isOperationExpand(ISD, DstLT.second))
1321 return SrcLT.first * 1;
1322 }
1323
1324 // If we are legalizing by splitting, query the concrete TTI for the cost
1325 // of casting the original vector twice. We also need to factor in the
1326 // cost of the split itself. Count that as 1, to be consistent with
1327 // getTypeLegalizationCost().
1328 bool SplitSrc =
1329 TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
1331 bool SplitDst =
1332 TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
1334 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isVector() &&
1335 DstVTy->getElementCount().isVector()) {
1336 Type *SplitDstTy = VectorType::getHalfElementsVectorType(DstVTy);
1337 Type *SplitSrcTy = VectorType::getHalfElementsVectorType(SrcVTy);
1338 const T *TTI = thisT();
1339 // If both types need to be split then the split is free.
1340 InstructionCost SplitCost =
1341 (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
1342 return SplitCost +
1343 (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
1344 CostKind, I));
1345 }
1346
1347 // Scalarization cost is Invalid, can't assume any num elements.
1348 if (isa<ScalableVectorType>(DstVTy))
1350
1351 // In other cases where the source or destination are illegal, assume
1352 // the operation will get scalarized.
1353 unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
1354 InstructionCost Cost = thisT()->getCastInstrCost(
1355 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind, I);
1356
1357 // Return the cost of multiple scalar invocation plus the cost of
1358 // inserting and extracting the values.
1359 return getScalarizationOverhead(DstVTy, /*Insert*/ true, /*Extract*/ true,
1360 CostKind) +
1361 Num * Cost;
1362 }
1363
1364 // We already handled vector-to-vector and scalar-to-scalar conversions.
1365 // This
1366 // is where we handle bitcast between vectors and scalars. We need to assume
1367 // that the conversion is scalarized in one way or another.
1368 if (Opcode == Instruction::BitCast) {
1369 // Illegal bitcasts are done by storing and loading from a stack slot.
1370 return (SrcVTy ? getScalarizationOverhead(SrcVTy, /*Insert*/ false,
1371 /*Extract*/ true, CostKind)
1372 : 0) +
1373 (DstVTy ? getScalarizationOverhead(DstVTy, /*Insert*/ true,
1374 /*Extract*/ false, CostKind)
1375 : 0);
1376 }
1377
1378 llvm_unreachable("Unhandled cast");
1379 }
1380
1382 getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
1383 unsigned Index,
1384 TTI::TargetCostKind CostKind) const override {
1385 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1386 CostKind, Index, nullptr, nullptr) +
1387 thisT()->getCastInstrCost(Opcode, Dst, VecTy->getElementType(),
1389 }
1390
1393 const Instruction *I = nullptr) const override {
1394 return BaseT::getCFInstrCost(Opcode, CostKind, I);
1395 }
1396
1398 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
1402 const Instruction *I = nullptr) const override {
1403 const TargetLoweringBase *TLI = getTLI();
1404 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1405 assert(ISD && "Invalid opcode");
1406
1407 if (getTLI()->getValueType(DL, ValTy, true) == MVT::Other)
1408 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1409 Op1Info, Op2Info, I);
1410
1411 // Selects on vectors are actually vector selects.
1412 if (ISD == ISD::SELECT) {
1413 assert(CondTy && "CondTy must exist");
1414 if (CondTy->isVectorTy())
1415 ISD = ISD::VSELECT;
1416 }
1417 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1418
1419 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
1420 !TLI->isOperationExpand(ISD, LT.second)) {
1421 // The operation is legal. Assume it costs 1. Multiply
1422 // by the type-legalization overhead.
1423 return LT.first * 1;
1424 }
1425
1426 // Otherwise, assume that the cast is scalarized.
1427 // TODO: If one of the types get legalized by splitting, handle this
1428 // similarly to what getCastInstrCost() does.
1429 if (auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
1430 if (isa<ScalableVectorType>(ValTy))
1432
1433 unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
1434 InstructionCost Cost = thisT()->getCmpSelInstrCost(
1435 Opcode, ValVTy->getScalarType(), CondTy->getScalarType(), VecPred,
1436 CostKind, Op1Info, Op2Info, I);
1437
1438 // Return the cost of multiple scalar invocation plus the cost of
1439 // inserting and extracting the values.
1440 return getScalarizationOverhead(ValVTy, /*Insert*/ true,
1441 /*Extract*/ false, CostKind) +
1442 Num * Cost;
1443 }
1444
1445 // Unknown scalar opcode.
1446 return 1;
1447 }
1448
1451 unsigned Index, const Value *Op0,
1452 const Value *Op1) const override {
1453 return getRegUsageForType(Val->getScalarType());
1454 }
1455
1456 /// \param ScalarUserAndIdx encodes the information about extracts from a
1457 /// vector with 'Scalar' being the value being extracted,'User' being the user
1458 /// of the extract(nullptr if user is not known before vectorization) and
1459 /// 'Idx' being the extract lane.
1462 unsigned Index, Value *Scalar,
1463 ArrayRef<std::tuple<Value *, User *, int>>
1464 ScalarUserAndIdx) const override {
1465 return thisT()->getVectorInstrCost(Opcode, Val, CostKind, Index, nullptr,
1466 nullptr);
1467 }
1468
1471 unsigned Index) const override {
1472 Value *Op0 = nullptr;
1473 Value *Op1 = nullptr;
1474 if (auto *IE = dyn_cast<InsertElementInst>(&I)) {
1475 Op0 = IE->getOperand(0);
1476 Op1 = IE->getOperand(1);
1477 }
1478 return thisT()->getVectorInstrCost(I.getOpcode(), Val, CostKind, Index, Op0,
1479 Op1);
1480 }
1481
1485 unsigned Index) const override {
1486 unsigned NewIndex = -1;
1487 if (auto *FVTy = dyn_cast<FixedVectorType>(Val)) {
1488 assert(Index < FVTy->getNumElements() &&
1489 "Unexpected index from end of vector");
1490 NewIndex = FVTy->getNumElements() - 1 - Index;
1491 }
1492 return thisT()->getVectorInstrCost(Opcode, Val, CostKind, NewIndex, nullptr,
1493 nullptr);
1494 }
1495
1497 getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF,
1498 const APInt &DemandedDstElts,
1499 TTI::TargetCostKind CostKind) const override {
1500 assert(DemandedDstElts.getBitWidth() == (unsigned)VF * ReplicationFactor &&
1501 "Unexpected size of DemandedDstElts.");
1502
1504
1505 auto *SrcVT = FixedVectorType::get(EltTy, VF);
1506 auto *ReplicatedVT = FixedVectorType::get(EltTy, VF * ReplicationFactor);
1507
1508 // The Mask shuffling cost is extract all the elements of the Mask
1509 // and insert each of them Factor times into the wide vector:
1510 //
1511 // E.g. an interleaved group with factor 3:
1512 // %mask = icmp ult <8 x i32> %vec1, %vec2
1513 // %interleaved.mask = shufflevector <8 x i1> %mask, <8 x i1> undef,
1514 // <24 x i32> <0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7>
1515 // The cost is estimated as extract all mask elements from the <8xi1> mask
1516 // vector and insert them factor times into the <24xi1> shuffled mask
1517 // vector.
1518 APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedDstElts, VF);
1519 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
1520 /*Insert*/ false,
1521 /*Extract*/ true, CostKind);
1522 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
1523 /*Insert*/ true,
1524 /*Extract*/ false, CostKind);
1525
1526 return Cost;
1527 }
1528
1530 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
1533 const Instruction *I = nullptr) const override {
1534 assert(!Src->isVoidTy() && "Invalid type");
1535 // Assume types, such as structs, are expensive.
1536 if (getTLI()->getValueType(DL, Src, true) == MVT::Other)
1537 return 4;
1538 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
1539
1540 // Assuming that all loads of legal types cost 1.
1541 InstructionCost Cost = LT.first;
1543 return Cost;
1544
1545 const DataLayout &DL = this->getDataLayout();
1546 if (Src->isVectorTy() &&
1547 // In practice it's not currently possible to have a change in lane
1548 // length for extending loads or truncating stores so both types should
1549 // have the same scalable property.
1550 TypeSize::isKnownLT(DL.getTypeStoreSizeInBits(Src),
1551 LT.second.getSizeInBits())) {
1552 // This is a vector load that legalizes to a larger type than the vector
1553 // itself. Unless the corresponding extending load or truncating store is
1554 // legal, then this will scalarize.
1556 EVT MemVT = getTLI()->getValueType(DL, Src);
1557 if (Opcode == Instruction::Store)
1558 LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
1559 else
1560 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
1561
1562 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
1563 // This is a vector load/store for some illegal type that is scalarized.
1564 // We must account for the cost of building or decomposing the vector.
1566 cast<VectorType>(Src), Opcode != Instruction::Store,
1567 Opcode == Instruction::Store, CostKind);
1568 }
1569 }
1570
1571 return Cost;
1572 }
1573
1575 getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment,
1576 unsigned AddressSpace,
1577 TTI::TargetCostKind CostKind) const override {
1578 // TODO: Pass on AddressSpace when we have test coverage.
1579 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, true, false,
1580 CostKind);
1581 }
1582
1584 getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
1585 bool VariableMask, Align Alignment,
1587 const Instruction *I = nullptr) const override {
1588 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask,
1589 true, CostKind);
1590 }
1591
1593 getExpandCompressMemoryOpCost(unsigned Opcode, Type *DataTy,
1594 bool VariableMask, Align Alignment,
1596 const Instruction *I = nullptr) const override {
1597 // Treat expand load/compress store as gather/scatter operation.
1598 // TODO: implement more precise cost estimation for these intrinsics.
1599 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask,
1600 /*IsGatherScatter*/ true, CostKind);
1601 }
1602
1604 const Value *Ptr, bool VariableMask,
1605 Align Alignment,
1607 const Instruction *I) const override {
1608 // For a target without strided memory operations (or for an illegal
1609 // operation type on one which does), assume we lower to a gather/scatter
1610 // operation. (Which may in turn be scalarized.)
1611 return thisT()->getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1612 Alignment, CostKind, I);
1613 }
1614
1616 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1617 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1618 bool UseMaskForCond = false, bool UseMaskForGaps = false) const override {
1619
1620 // We cannot scalarize scalable vectors, so return Invalid.
1621 if (isa<ScalableVectorType>(VecTy))
1623
1624 auto *VT = cast<FixedVectorType>(VecTy);
1625
1626 unsigned NumElts = VT->getNumElements();
1627 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
1628
1629 unsigned NumSubElts = NumElts / Factor;
1630 auto *SubVT = FixedVectorType::get(VT->getElementType(), NumSubElts);
1631
1632 // Firstly, the cost of load/store operation.
1634 if (UseMaskForCond || UseMaskForGaps)
1635 Cost = thisT()->getMaskedMemoryOpCost(Opcode, VecTy, Alignment,
1637 else
1638 Cost = thisT()->getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace,
1639 CostKind);
1640
1641 // Legalize the vector type, and get the legalized and unlegalized type
1642 // sizes.
1643 MVT VecTyLT = getTypeLegalizationCost(VecTy).second;
1644 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1645 unsigned VecTyLTSize = VecTyLT.getStoreSize();
1646
1647 // Scale the cost of the memory operation by the fraction of legalized
1648 // instructions that will actually be used. We shouldn't account for the
1649 // cost of dead instructions since they will be removed.
1650 //
1651 // E.g., An interleaved load of factor 8:
1652 // %vec = load <16 x i64>, <16 x i64>* %ptr
1653 // %v0 = shufflevector %vec, undef, <0, 8>
1654 //
1655 // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
1656 // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
1657 // type). The other loads are unused.
1658 //
1659 // TODO: Note that legalization can turn masked loads/stores into unmasked
1660 // (legalized) loads/stores. This can be reflected in the cost.
1661 if (Cost.isValid() && VecTySize > VecTyLTSize) {
1662 // The number of loads of a legal type it will take to represent a load
1663 // of the unlegalized vector type.
1664 unsigned NumLegalInsts = divideCeil(VecTySize, VecTyLTSize);
1665
1666 // The number of elements of the unlegalized type that correspond to a
1667 // single legal instruction.
1668 unsigned NumEltsPerLegalInst = divideCeil(NumElts, NumLegalInsts);
1669
1670 // Determine which legal instructions will be used.
1671 BitVector UsedInsts(NumLegalInsts, false);
1672 for (unsigned Index : Indices)
1673 for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1674 UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);
1675
1676 // Scale the cost of the load by the fraction of legal instructions that
1677 // will be used.
1678 Cost = divideCeil(UsedInsts.count() * Cost.getValue(), NumLegalInsts);
1679 }
1680
1681 // Then plus the cost of interleave operation.
1682 assert(Indices.size() <= Factor &&
1683 "Interleaved memory op has too many members");
1684
1685 const APInt DemandedAllSubElts = APInt::getAllOnes(NumSubElts);
1686 const APInt DemandedAllResultElts = APInt::getAllOnes(NumElts);
1687
1688 APInt DemandedLoadStoreElts = APInt::getZero(NumElts);
1689 for (unsigned Index : Indices) {
1690 assert(Index < Factor && "Invalid index for interleaved memory op");
1691 for (unsigned Elm = 0; Elm < NumSubElts; Elm++)
1692 DemandedLoadStoreElts.setBit(Index + Elm * Factor);
1693 }
1694
1695 if (Opcode == Instruction::Load) {
1696 // The interleave cost is similar to extract sub vectors' elements
1697 // from the wide vector, and insert them into sub vectors.
1698 //
1699 // E.g. An interleaved load of factor 2 (with one member of index 0):
1700 // %vec = load <8 x i32>, <8 x i32>* %ptr
1701 // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
1702 // The cost is estimated as extract elements at 0, 2, 4, 6 from the
1703 // <8 x i32> vector and insert them into a <4 x i32> vector.
1704 InstructionCost InsSubCost = thisT()->getScalarizationOverhead(
1705 SubVT, DemandedAllSubElts,
1706 /*Insert*/ true, /*Extract*/ false, CostKind);
1707 Cost += Indices.size() * InsSubCost;
1708 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1709 /*Insert*/ false,
1710 /*Extract*/ true, CostKind);
1711 } else {
1712 // The interleave cost is extract elements from sub vectors, and
1713 // insert them into the wide vector.
1714 //
1715 // E.g. An interleaved store of factor 3 with 2 members at indices 0,1:
1716 // (using VF=4):
1717 // %v0_v1 = shuffle %v0, %v1, <0,4,undef,1,5,undef,2,6,undef,3,7,undef>
1718 // %gaps.mask = <true, true, false, true, true, false,
1719 // true, true, false, true, true, false>
1720 // call llvm.masked.store <12 x i32> %v0_v1, <12 x i32>* %ptr,
1721 // i32 Align, <12 x i1> %gaps.mask
1722 // The cost is estimated as extract all elements (of actual members,
1723 // excluding gaps) from both <4 x i32> vectors and insert into the <12 x
1724 // i32> vector.
1725 InstructionCost ExtSubCost = thisT()->getScalarizationOverhead(
1726 SubVT, DemandedAllSubElts,
1727 /*Insert*/ false, /*Extract*/ true, CostKind);
1728 Cost += ExtSubCost * Indices.size();
1729 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1730 /*Insert*/ true,
1731 /*Extract*/ false, CostKind);
1732 }
1733
1734 if (!UseMaskForCond)
1735 return Cost;
1736
1737 Type *I8Type = Type::getInt8Ty(VT->getContext());
1738
1739 Cost += thisT()->getReplicationShuffleCost(
1740 I8Type, Factor, NumSubElts,
1741 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
1742 CostKind);
1743
1744 // The Gaps mask is invariant and created outside the loop, therefore the
1745 // cost of creating it is not accounted for here. However if we have both
1746 // a MaskForGaps and some other mask that guards the execution of the
1747 // memory access, we need to account for the cost of And-ing the two masks
1748 // inside the loop.
1749 if (UseMaskForGaps) {
1750 auto *MaskVT = FixedVectorType::get(I8Type, NumElts);
1751 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1752 CostKind);
1753 }
1754
1755 return Cost;
1756 }
1757
1758 /// Get intrinsic cost based on arguments.
1761 TTI::TargetCostKind CostKind) const override {
1762 // Check for generically free intrinsics.
1764 return 0;
1765
1766 // Assume that target intrinsics are cheap.
1767 Intrinsic::ID IID = ICA.getID();
1770
1771 // VP Intrinsics should have the same cost as their non-vp counterpart.
1772 // TODO: Adjust the cost to make the vp intrinsic cheaper than its non-vp
1773 // counterpart when the vector length argument is smaller than the maximum
1774 // vector length.
1775 // TODO: Support other kinds of VPIntrinsics
1776 if (VPIntrinsic::isVPIntrinsic(ICA.getID())) {
1777 std::optional<unsigned> FOp =
1779 if (FOp) {
1780 if (ICA.getID() == Intrinsic::vp_load) {
1781 Align Alignment;
1782 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1783 Alignment = VPI->getPointerAlignment().valueOrOne();
1784 unsigned AS = 0;
1785 if (ICA.getArgTypes().size() > 1)
1786 if (auto *PtrTy = dyn_cast<PointerType>(ICA.getArgTypes()[0]))
1787 AS = PtrTy->getAddressSpace();
1788 return thisT()->getMemoryOpCost(*FOp, ICA.getReturnType(), Alignment,
1789 AS, CostKind);
1790 }
1791 if (ICA.getID() == Intrinsic::vp_store) {
1792 Align Alignment;
1793 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1794 Alignment = VPI->getPointerAlignment().valueOrOne();
1795 unsigned AS = 0;
1796 if (ICA.getArgTypes().size() >= 2)
1797 if (auto *PtrTy = dyn_cast<PointerType>(ICA.getArgTypes()[1]))
1798 AS = PtrTy->getAddressSpace();
1799 return thisT()->getMemoryOpCost(*FOp, ICA.getArgTypes()[0], Alignment,
1800 AS, CostKind);
1801 }
1803 ICA.getID() == Intrinsic::vp_fneg) {
1804 return thisT()->getArithmeticInstrCost(*FOp, ICA.getReturnType(),
1805 CostKind);
1806 }
1807 if (VPCastIntrinsic::isVPCast(ICA.getID())) {
1808 return thisT()->getCastInstrCost(
1809 *FOp, ICA.getReturnType(), ICA.getArgTypes()[0],
1811 }
1812 if (VPCmpIntrinsic::isVPCmp(ICA.getID())) {
1813 // We can only handle vp_cmp intrinsics with underlying instructions.
1814 if (ICA.getInst()) {
1815 assert(FOp);
1816 auto *UI = cast<VPCmpIntrinsic>(ICA.getInst());
1817 return thisT()->getCmpSelInstrCost(*FOp, ICA.getArgTypes()[0],
1818 ICA.getReturnType(),
1819 UI->getPredicate(), CostKind);
1820 }
1821 }
1822 }
1823
1824 if (ICA.getID() == Intrinsic::vp_scatter) {
1825 if (ICA.isTypeBasedOnly()) {
1826 IntrinsicCostAttributes MaskedScatter(
1829 ICA.getFlags());
1830 return getTypeBasedIntrinsicInstrCost(MaskedScatter, CostKind);
1831 }
1832 Align Alignment;
1833 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1834 Alignment = VPI->getPointerAlignment().valueOrOne();
1835 bool VarMask = isa<Constant>(ICA.getArgs()[2]);
1836 return thisT()->getGatherScatterOpCost(
1837 Instruction::Store, ICA.getArgTypes()[0], ICA.getArgs()[1], VarMask,
1838 Alignment, CostKind, nullptr);
1839 }
1840 if (ICA.getID() == Intrinsic::vp_gather) {
1841 if (ICA.isTypeBasedOnly()) {
1842 IntrinsicCostAttributes MaskedGather(
1845 ICA.getFlags());
1846 return getTypeBasedIntrinsicInstrCost(MaskedGather, CostKind);
1847 }
1848 Align Alignment;
1849 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1850 Alignment = VPI->getPointerAlignment().valueOrOne();
1851 bool VarMask = isa<Constant>(ICA.getArgs()[1]);
1852 return thisT()->getGatherScatterOpCost(
1853 Instruction::Load, ICA.getReturnType(), ICA.getArgs()[0], VarMask,
1854 Alignment, CostKind, nullptr);
1855 }
1856
1857 if (ICA.getID() == Intrinsic::vp_select ||
1858 ICA.getID() == Intrinsic::vp_merge) {
1859 TTI::OperandValueInfo OpInfoX, OpInfoY;
1860 if (!ICA.isTypeBasedOnly()) {
1861 OpInfoX = TTI::getOperandInfo(ICA.getArgs()[0]);
1862 OpInfoY = TTI::getOperandInfo(ICA.getArgs()[1]);
1863 }
1864 return getCmpSelInstrCost(
1865 Instruction::Select, ICA.getReturnType(), ICA.getArgTypes()[0],
1866 CmpInst::BAD_ICMP_PREDICATE, CostKind, OpInfoX, OpInfoY);
1867 }
1868
1869 std::optional<Intrinsic::ID> FID =
1871
1872 // Not functionally equivalent but close enough for cost modelling.
1873 if (ICA.getID() == Intrinsic::experimental_vp_reverse)
1874 FID = Intrinsic::vector_reverse;
1875
1876 if (FID) {
1877 // Non-vp version will have same arg types except mask and vector
1878 // length.
1879 assert(ICA.getArgTypes().size() >= 2 &&
1880 "Expected VPIntrinsic to have Mask and Vector Length args and "
1881 "types");
1882
1883 ArrayRef<const Value *> NewArgs = ArrayRef(ICA.getArgs());
1884 if (!ICA.isTypeBasedOnly())
1885 NewArgs = NewArgs.drop_back(2);
1887
1888 // VPReduction intrinsics have a start value argument that their non-vp
1889 // counterparts do not have, except for the fadd and fmul non-vp
1890 // counterpart.
1892 *FID != Intrinsic::vector_reduce_fadd &&
1893 *FID != Intrinsic::vector_reduce_fmul) {
1894 if (!ICA.isTypeBasedOnly())
1895 NewArgs = NewArgs.drop_front();
1896 NewTys = NewTys.drop_front();
1897 }
1898
1899 IntrinsicCostAttributes NewICA(*FID, ICA.getReturnType(), NewArgs,
1900 NewTys, ICA.getFlags());
1901 return thisT()->getIntrinsicInstrCost(NewICA, CostKind);
1902 }
1903 }
1904
1905 if (ICA.isTypeBasedOnly())
1907
1908 Type *RetTy = ICA.getReturnType();
1909
1910 ElementCount RetVF = isVectorizedTy(RetTy) ? getVectorizedTypeVF(RetTy)
1912
1913 const IntrinsicInst *I = ICA.getInst();
1914 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
1915 FastMathFlags FMF = ICA.getFlags();
1916 switch (IID) {
1917 default:
1918 break;
1919
1920 case Intrinsic::powi:
1921 if (auto *RHSC = dyn_cast<ConstantInt>(Args[1])) {
1922 bool ShouldOptForSize = I->getParent()->getParent()->hasOptSize();
1923 if (getTLI()->isBeneficialToExpandPowI(RHSC->getSExtValue(),
1924 ShouldOptForSize)) {
1925 // The cost is modeled on the expansion performed by ExpandPowI in
1926 // SelectionDAGBuilder.
1927 APInt Exponent = RHSC->getValue().abs();
1928 unsigned ActiveBits = Exponent.getActiveBits();
1929 unsigned PopCount = Exponent.popcount();
1930 InstructionCost Cost = (ActiveBits + PopCount - 2) *
1931 thisT()->getArithmeticInstrCost(
1932 Instruction::FMul, RetTy, CostKind);
1933 if (RHSC->isNegative())
1934 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv, RetTy,
1935 CostKind);
1936 return Cost;
1937 }
1938 }
1939 break;
1940 case Intrinsic::cttz:
1941 // FIXME: If necessary, this should go in target-specific overrides.
1942 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCttz(RetTy))
1944 break;
1945
1946 case Intrinsic::ctlz:
1947 // FIXME: If necessary, this should go in target-specific overrides.
1948 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCtlz(RetTy))
1950 break;
1951
1952 case Intrinsic::memcpy:
1953 return thisT()->getMemcpyCost(ICA.getInst());
1954
1955 case Intrinsic::masked_scatter: {
1956 const Value *Mask = Args[2];
1957 bool VarMask = !isa<Constant>(Mask);
1958 Align Alignment = I->getParamAlign(1).valueOrOne();
1959 return thisT()->getGatherScatterOpCost(Instruction::Store,
1960 ICA.getArgTypes()[0], Args[1],
1961 VarMask, Alignment, CostKind, I);
1962 }
1963 case Intrinsic::masked_gather: {
1964 const Value *Mask = Args[1];
1965 bool VarMask = !isa<Constant>(Mask);
1966 Align Alignment = I->getParamAlign(0).valueOrOne();
1967 return thisT()->getGatherScatterOpCost(Instruction::Load, RetTy, Args[0],
1968 VarMask, Alignment, CostKind, I);
1969 }
1970 case Intrinsic::masked_compressstore: {
1971 const Value *Data = Args[0];
1972 const Value *Mask = Args[2];
1973 Align Alignment = I->getParamAlign(1).valueOrOne();
1974 return thisT()->getExpandCompressMemoryOpCost(
1975 Instruction::Store, Data->getType(), !isa<Constant>(Mask), Alignment,
1976 CostKind, I);
1977 }
1978 case Intrinsic::masked_expandload: {
1979 const Value *Mask = Args[1];
1980 Align Alignment = I->getParamAlign(0).valueOrOne();
1981 return thisT()->getExpandCompressMemoryOpCost(Instruction::Load, RetTy,
1982 !isa<Constant>(Mask),
1983 Alignment, CostKind, I);
1984 }
1985 case Intrinsic::experimental_vp_strided_store: {
1986 const Value *Data = Args[0];
1987 const Value *Ptr = Args[1];
1988 const Value *Mask = Args[3];
1989 const Value *EVL = Args[4];
1990 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
1991 Type *EltTy = cast<VectorType>(Data->getType())->getElementType();
1992 Align Alignment =
1993 I->getParamAlign(1).value_or(thisT()->DL.getABITypeAlign(EltTy));
1994 return thisT()->getStridedMemoryOpCost(Instruction::Store,
1995 Data->getType(), Ptr, VarMask,
1996 Alignment, CostKind, I);
1997 }
1998 case Intrinsic::experimental_vp_strided_load: {
1999 const Value *Ptr = Args[0];
2000 const Value *Mask = Args[2];
2001 const Value *EVL = Args[3];
2002 bool VarMask = !isa<Constant>(Mask) || !isa<Constant>(EVL);
2003 Type *EltTy = cast<VectorType>(RetTy)->getElementType();
2004 Align Alignment =
2005 I->getParamAlign(0).value_or(thisT()->DL.getABITypeAlign(EltTy));
2006 return thisT()->getStridedMemoryOpCost(Instruction::Load, RetTy, Ptr,
2007 VarMask, Alignment, CostKind, I);
2008 }
2009 case Intrinsic::stepvector: {
2010 if (isa<ScalableVectorType>(RetTy))
2012 // The cost of materialising a constant integer vector.
2014 }
2015 case Intrinsic::vector_extract: {
2016 // FIXME: Handle case where a scalable vector is extracted from a scalable
2017 // vector
2018 if (isa<ScalableVectorType>(RetTy))
2020 unsigned Index = cast<ConstantInt>(Args[1])->getZExtValue();
2021 return thisT()->getShuffleCost(TTI::SK_ExtractSubvector,
2022 cast<VectorType>(RetTy),
2023 cast<VectorType>(Args[0]->getType()), {},
2024 CostKind, Index, cast<VectorType>(RetTy));
2025 }
2026 case Intrinsic::vector_insert: {
2027 // FIXME: Handle case where a scalable vector is inserted into a scalable
2028 // vector
2029 if (isa<ScalableVectorType>(Args[1]->getType()))
2031 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
2032 return thisT()->getShuffleCost(
2034 cast<VectorType>(Args[0]->getType()), {}, CostKind, Index,
2035 cast<VectorType>(Args[1]->getType()));
2036 }
2037 case Intrinsic::vector_splice: {
2038 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
2039 return thisT()->getShuffleCost(TTI::SK_Splice, cast<VectorType>(RetTy),
2040 cast<VectorType>(Args[0]->getType()), {},
2041 CostKind, Index, cast<VectorType>(RetTy));
2042 }
2043 case Intrinsic::vector_reduce_add:
2044 case Intrinsic::vector_reduce_mul:
2045 case Intrinsic::vector_reduce_and:
2046 case Intrinsic::vector_reduce_or:
2047 case Intrinsic::vector_reduce_xor:
2048 case Intrinsic::vector_reduce_smax:
2049 case Intrinsic::vector_reduce_smin:
2050 case Intrinsic::vector_reduce_fmax:
2051 case Intrinsic::vector_reduce_fmin:
2052 case Intrinsic::vector_reduce_fmaximum:
2053 case Intrinsic::vector_reduce_fminimum:
2054 case Intrinsic::vector_reduce_umax:
2055 case Intrinsic::vector_reduce_umin: {
2056 IntrinsicCostAttributes Attrs(IID, RetTy, Args[0]->getType(), FMF, I, 1);
2058 }
2059 case Intrinsic::vector_reduce_fadd:
2060 case Intrinsic::vector_reduce_fmul: {
2062 IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF, I, 1);
2064 }
2065 case Intrinsic::fshl:
2066 case Intrinsic::fshr: {
2067 const Value *X = Args[0];
2068 const Value *Y = Args[1];
2069 const Value *Z = Args[2];
2072 const TTI::OperandValueInfo OpInfoZ = TTI::getOperandInfo(Z);
2073
2074 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
2075 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
2077 Cost +=
2078 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
2079 Cost +=
2080 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);
2081 Cost += thisT()->getArithmeticInstrCost(
2082 BinaryOperator::Shl, RetTy, CostKind, OpInfoX,
2083 {OpInfoZ.Kind, TTI::OP_None});
2084 Cost += thisT()->getArithmeticInstrCost(
2085 BinaryOperator::LShr, RetTy, CostKind, OpInfoY,
2086 {OpInfoZ.Kind, TTI::OP_None});
2087 // Non-constant shift amounts requires a modulo. If the typesize is a
2088 // power-2 then this will be converted to an and, otherwise it will use a
2089 // urem.
2090 if (!OpInfoZ.isConstant())
2091 Cost += thisT()->getArithmeticInstrCost(
2092 isPowerOf2_32(RetTy->getScalarSizeInBits()) ? BinaryOperator::And
2093 : BinaryOperator::URem,
2094 RetTy, CostKind, OpInfoZ,
2095 {TTI::OK_UniformConstantValue, TTI::OP_None});
2096 // For non-rotates (X != Y) we must add shift-by-zero handling costs.
2097 if (X != Y) {
2098 Type *CondTy = RetTy->getWithNewBitWidth(1);
2099 Cost +=
2100 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2102 Cost +=
2103 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2105 }
2106 return Cost;
2107 }
2108 case Intrinsic::experimental_cttz_elts: {
2109 EVT ArgType = getTLI()->getValueType(DL, ICA.getArgTypes()[0], true);
2110
2111 // If we're not expanding the intrinsic then we assume this is cheap
2112 // to implement.
2113 if (!getTLI()->shouldExpandCttzElements(ArgType))
2114 return getTypeLegalizationCost(RetTy).first;
2115
2116 // TODO: The costs below reflect the expansion code in
2117 // SelectionDAGBuilder, but we may want to sacrifice some accuracy in
2118 // favour of compile time.
2119
2120 // Find the smallest "sensible" element type to use for the expansion.
2121 bool ZeroIsPoison = !cast<ConstantInt>(Args[1])->isZero();
2122 ConstantRange VScaleRange(APInt(64, 1), APInt::getZero(64));
2123 if (isa<ScalableVectorType>(ICA.getArgTypes()[0]) && I && I->getCaller())
2124 VScaleRange = getVScaleRange(I->getCaller(), 64);
2125
2126 unsigned EltWidth = getTLI()->getBitWidthForCttzElements(
2127 RetTy, ArgType.getVectorElementCount(), ZeroIsPoison, &VScaleRange);
2128 Type *NewEltTy = IntegerType::getIntNTy(RetTy->getContext(), EltWidth);
2129
2130 // Create the new vector type & get the vector length
2131 Type *NewVecTy = VectorType::get(
2132 NewEltTy, cast<VectorType>(Args[0]->getType())->getElementCount());
2133
2134 IntrinsicCostAttributes StepVecAttrs(Intrinsic::stepvector, NewVecTy, {},
2135 FMF);
2137 thisT()->getIntrinsicInstrCost(StepVecAttrs, CostKind);
2138
2139 Cost +=
2140 thisT()->getArithmeticInstrCost(Instruction::Sub, NewVecTy, CostKind);
2141 Cost += thisT()->getCastInstrCost(Instruction::SExt, NewVecTy,
2142 Args[0]->getType(),
2144 Cost +=
2145 thisT()->getArithmeticInstrCost(Instruction::And, NewVecTy, CostKind);
2146
2147 IntrinsicCostAttributes ReducAttrs(Intrinsic::vector_reduce_umax,
2148 NewEltTy, NewVecTy, FMF, I, 1);
2149 Cost += thisT()->getTypeBasedIntrinsicInstrCost(ReducAttrs, CostKind);
2150 Cost +=
2151 thisT()->getArithmeticInstrCost(Instruction::Sub, NewEltTy, CostKind);
2152
2153 return Cost;
2154 }
2155 case Intrinsic::get_active_lane_mask:
2156 case Intrinsic::experimental_vector_match:
2157 case Intrinsic::experimental_vector_histogram_add:
2158 case Intrinsic::experimental_vector_histogram_uadd_sat:
2159 case Intrinsic::experimental_vector_histogram_umax:
2160 case Intrinsic::experimental_vector_histogram_umin:
2161 return thisT()->getTypeBasedIntrinsicInstrCost(ICA, CostKind);
2162 case Intrinsic::modf:
2163 case Intrinsic::sincos:
2164 case Intrinsic::sincospi: {
2165 std::optional<unsigned> CallRetElementIndex;
2166 // The first element of the modf result is returned by value in the
2167 // libcall.
2168 if (ICA.getID() == Intrinsic::modf)
2169 CallRetElementIndex = 0;
2170
2171 if (auto Cost = getMultipleResultIntrinsicVectorLibCallCost(
2172 ICA, CostKind, CallRetElementIndex))
2173 return *Cost;
2174 // Otherwise, fallback to default scalarization cost.
2175 break;
2176 }
2177 }
2178
2179 // Assume that we need to scalarize this intrinsic.)
2180 // Compute the scalarization overhead based on Args for a vector
2181 // intrinsic.
2182 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
2183 if (RetVF.isVector() && !RetVF.isScalable()) {
2184 ScalarizationCost = 0;
2185 if (!RetTy->isVoidTy()) {
2186 for (Type *VectorTy : getContainedTypes(RetTy)) {
2187 ScalarizationCost += getScalarizationOverhead(
2188 cast<VectorType>(VectorTy),
2189 /*Insert=*/true, /*Extract=*/false, CostKind);
2190 }
2191 }
2192 ScalarizationCost += getOperandsScalarizationOverhead(
2193 filterConstantAndDuplicatedOperands(Args, ICA.getArgTypes()),
2194 CostKind);
2195 }
2196
2197 IntrinsicCostAttributes Attrs(IID, RetTy, ICA.getArgTypes(), FMF, I,
2198 ScalarizationCost);
2199 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
2200 }
2201
2202 /// Get intrinsic cost based on argument types.
2203 /// If ScalarizationCostPassed is std::numeric_limits<unsigned>::max(), the
2204 /// cost of scalarizing the arguments and the return value will be computed
2205 /// based on types.
2209 Intrinsic::ID IID = ICA.getID();
2210 Type *RetTy = ICA.getReturnType();
2211 const SmallVectorImpl<Type *> &Tys = ICA.getArgTypes();
2212 FastMathFlags FMF = ICA.getFlags();
2213 InstructionCost ScalarizationCostPassed = ICA.getScalarizationCost();
2214 bool SkipScalarizationCost = ICA.skipScalarizationCost();
2215
2216 VectorType *VecOpTy = nullptr;
2217 if (!Tys.empty()) {
2218 // The vector reduction operand is operand 0 except for fadd/fmul.
2219 // Their operand 0 is a scalar start value, so the vector op is operand 1.
2220 unsigned VecTyIndex = 0;
2221 if (IID == Intrinsic::vector_reduce_fadd ||
2222 IID == Intrinsic::vector_reduce_fmul)
2223 VecTyIndex = 1;
2224 assert(Tys.size() > VecTyIndex && "Unexpected IntrinsicCostAttributes");
2225 VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
2226 }
2227
2228 // Library call cost - other than size, make it expensive.
2229 unsigned SingleCallCost = CostKind == TTI::TCK_CodeSize ? 1 : 10;
2230 unsigned ISD = 0;
2231 switch (IID) {
2232 default: {
2233 // Scalable vectors cannot be scalarized, so return Invalid.
2234 if (isa<ScalableVectorType>(RetTy) || any_of(Tys, [](const Type *Ty) {
2235 return isa<ScalableVectorType>(Ty);
2236 }))
2238
2239 // Assume that we need to scalarize this intrinsic.
2240 InstructionCost ScalarizationCost =
2241 SkipScalarizationCost ? ScalarizationCostPassed : 0;
2242 unsigned ScalarCalls = 1;
2243 Type *ScalarRetTy = RetTy;
2244 if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
2245 if (!SkipScalarizationCost)
2246 ScalarizationCost = getScalarizationOverhead(
2247 RetVTy, /*Insert*/ true, /*Extract*/ false, CostKind);
2248 ScalarCalls = std::max(ScalarCalls,
2250 ScalarRetTy = RetTy->getScalarType();
2251 }
2252 SmallVector<Type *, 4> ScalarTys;
2253 for (Type *Ty : Tys) {
2254 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
2255 if (!SkipScalarizationCost)
2256 ScalarizationCost += getScalarizationOverhead(
2257 VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
2258 ScalarCalls = std::max(ScalarCalls,
2260 Ty = Ty->getScalarType();
2261 }
2262 ScalarTys.push_back(Ty);
2263 }
2264 if (ScalarCalls == 1)
2265 return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
2266
2267 IntrinsicCostAttributes ScalarAttrs(IID, ScalarRetTy, ScalarTys, FMF);
2268 InstructionCost ScalarCost =
2269 thisT()->getIntrinsicInstrCost(ScalarAttrs, CostKind);
2270
2271 return ScalarCalls * ScalarCost + ScalarizationCost;
2272 }
2273 // Look for intrinsics that can be lowered directly or turned into a scalar
2274 // intrinsic call.
2275 case Intrinsic::sqrt:
2276 ISD = ISD::FSQRT;
2277 break;
2278 case Intrinsic::sin:
2279 ISD = ISD::FSIN;
2280 break;
2281 case Intrinsic::cos:
2282 ISD = ISD::FCOS;
2283 break;
2284 case Intrinsic::sincos:
2285 ISD = ISD::FSINCOS;
2286 break;
2287 case Intrinsic::sincospi:
2288 ISD = ISD::FSINCOSPI;
2289 break;
2290 case Intrinsic::modf:
2291 ISD = ISD::FMODF;
2292 break;
2293 case Intrinsic::tan:
2294 ISD = ISD::FTAN;
2295 break;
2296 case Intrinsic::asin:
2297 ISD = ISD::FASIN;
2298 break;
2299 case Intrinsic::acos:
2300 ISD = ISD::FACOS;
2301 break;
2302 case Intrinsic::atan:
2303 ISD = ISD::FATAN;
2304 break;
2305 case Intrinsic::atan2:
2306 ISD = ISD::FATAN2;
2307 break;
2308 case Intrinsic::sinh:
2309 ISD = ISD::FSINH;
2310 break;
2311 case Intrinsic::cosh:
2312 ISD = ISD::FCOSH;
2313 break;
2314 case Intrinsic::tanh:
2315 ISD = ISD::FTANH;
2316 break;
2317 case Intrinsic::exp:
2318 ISD = ISD::FEXP;
2319 break;
2320 case Intrinsic::exp2:
2321 ISD = ISD::FEXP2;
2322 break;
2323 case Intrinsic::exp10:
2324 ISD = ISD::FEXP10;
2325 break;
2326 case Intrinsic::log:
2327 ISD = ISD::FLOG;
2328 break;
2329 case Intrinsic::log10:
2330 ISD = ISD::FLOG10;
2331 break;
2332 case Intrinsic::log2:
2333 ISD = ISD::FLOG2;
2334 break;
2335 case Intrinsic::ldexp:
2336 ISD = ISD::FLDEXP;
2337 break;
2338 case Intrinsic::fabs:
2339 ISD = ISD::FABS;
2340 break;
2341 case Intrinsic::canonicalize:
2343 break;
2344 case Intrinsic::minnum:
2345 ISD = ISD::FMINNUM;
2346 break;
2347 case Intrinsic::maxnum:
2348 ISD = ISD::FMAXNUM;
2349 break;
2350 case Intrinsic::minimum:
2351 ISD = ISD::FMINIMUM;
2352 break;
2353 case Intrinsic::maximum:
2354 ISD = ISD::FMAXIMUM;
2355 break;
2356 case Intrinsic::minimumnum:
2357 ISD = ISD::FMINIMUMNUM;
2358 break;
2359 case Intrinsic::maximumnum:
2360 ISD = ISD::FMAXIMUMNUM;
2361 break;
2362 case Intrinsic::copysign:
2364 break;
2365 case Intrinsic::floor:
2366 ISD = ISD::FFLOOR;
2367 break;
2368 case Intrinsic::ceil:
2369 ISD = ISD::FCEIL;
2370 break;
2371 case Intrinsic::trunc:
2372 ISD = ISD::FTRUNC;
2373 break;
2374 case Intrinsic::nearbyint:
2375 ISD = ISD::FNEARBYINT;
2376 break;
2377 case Intrinsic::rint:
2378 ISD = ISD::FRINT;
2379 break;
2380 case Intrinsic::lrint:
2381 ISD = ISD::LRINT;
2382 break;
2383 case Intrinsic::llrint:
2384 ISD = ISD::LLRINT;
2385 break;
2386 case Intrinsic::round:
2387 ISD = ISD::FROUND;
2388 break;
2389 case Intrinsic::roundeven:
2390 ISD = ISD::FROUNDEVEN;
2391 break;
2392 case Intrinsic::lround:
2393 ISD = ISD::LROUND;
2394 break;
2395 case Intrinsic::llround:
2396 ISD = ISD::LLROUND;
2397 break;
2398 case Intrinsic::pow:
2399 ISD = ISD::FPOW;
2400 break;
2401 case Intrinsic::fma:
2402 ISD = ISD::FMA;
2403 break;
2404 case Intrinsic::fmuladd:
2405 ISD = ISD::FMA;
2406 break;
2407 case Intrinsic::experimental_constrained_fmuladd:
2409 break;
2410 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
2411 case Intrinsic::lifetime_start:
2412 case Intrinsic::lifetime_end:
2413 case Intrinsic::sideeffect:
2414 case Intrinsic::pseudoprobe:
2415 case Intrinsic::arithmetic_fence:
2416 return 0;
2417 case Intrinsic::masked_store: {
2418 Type *Ty = Tys[0];
2419 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2420 return thisT()->getMaskedMemoryOpCost(Instruction::Store, Ty, TyAlign, 0,
2421 CostKind);
2422 }
2423 case Intrinsic::masked_load: {
2424 Type *Ty = RetTy;
2425 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
2426 return thisT()->getMaskedMemoryOpCost(Instruction::Load, Ty, TyAlign, 0,
2427 CostKind);
2428 }
2429 case Intrinsic::experimental_vp_strided_store: {
2430 auto *Ty = cast<VectorType>(ICA.getArgTypes()[0]);
2431 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2432 return thisT()->getStridedMemoryOpCost(
2433 Instruction::Store, Ty, /*Ptr=*/nullptr, /*VariableMask=*/true,
2434 Alignment, CostKind, ICA.getInst());
2435 }
2436 case Intrinsic::experimental_vp_strided_load: {
2437 auto *Ty = cast<VectorType>(ICA.getReturnType());
2438 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());
2439 return thisT()->getStridedMemoryOpCost(
2440 Instruction::Load, Ty, /*Ptr=*/nullptr, /*VariableMask=*/true,
2441 Alignment, CostKind, ICA.getInst());
2442 }
2443 case Intrinsic::vector_reduce_add:
2444 case Intrinsic::vector_reduce_mul:
2445 case Intrinsic::vector_reduce_and:
2446 case Intrinsic::vector_reduce_or:
2447 case Intrinsic::vector_reduce_xor:
2448 return thisT()->getArithmeticReductionCost(
2449 getArithmeticReductionInstruction(IID), VecOpTy, std::nullopt,
2450 CostKind);
2451 case Intrinsic::vector_reduce_fadd:
2452 case Intrinsic::vector_reduce_fmul:
2453 return thisT()->getArithmeticReductionCost(
2454 getArithmeticReductionInstruction(IID), VecOpTy, FMF, CostKind);
2455 case Intrinsic::vector_reduce_smax:
2456 case Intrinsic::vector_reduce_smin:
2457 case Intrinsic::vector_reduce_umax:
2458 case Intrinsic::vector_reduce_umin:
2459 case Intrinsic::vector_reduce_fmax:
2460 case Intrinsic::vector_reduce_fmin:
2461 case Intrinsic::vector_reduce_fmaximum:
2462 case Intrinsic::vector_reduce_fminimum:
2463 return thisT()->getMinMaxReductionCost(getMinMaxReductionIntrinsicOp(IID),
2464 VecOpTy, ICA.getFlags(), CostKind);
2465 case Intrinsic::experimental_vector_match: {
2466 auto *SearchTy = cast<VectorType>(ICA.getArgTypes()[0]);
2467 auto *NeedleTy = cast<FixedVectorType>(ICA.getArgTypes()[1]);
2468 unsigned SearchSize = NeedleTy->getNumElements();
2469
2470 // If we're not expanding the intrinsic then we assume this is cheap to
2471 // implement.
2472 EVT SearchVT = getTLI()->getValueType(DL, SearchTy);
2473 if (!getTLI()->shouldExpandVectorMatch(SearchVT, SearchSize))
2474 return getTypeLegalizationCost(RetTy).first;
2475
2476 // Approximate the cost based on the expansion code in
2477 // SelectionDAGBuilder.
2479 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, NeedleTy,
2480 CostKind, 1, nullptr, nullptr);
2481 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SearchTy,
2482 CostKind, 0, nullptr, nullptr);
2483 Cost += thisT()->getShuffleCost(TTI::SK_Broadcast, SearchTy, SearchTy, {},
2484 CostKind, 0, nullptr);
2485 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SearchTy, RetTy,
2487 Cost +=
2488 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
2489 Cost *= SearchSize;
2490 Cost +=
2491 thisT()->getArithmeticInstrCost(BinaryOperator::And, RetTy, CostKind);
2492 return Cost;
2493 }
2494 case Intrinsic::vector_reverse:
2495 return thisT()->getShuffleCost(TTI::SK_Reverse, cast<VectorType>(RetTy),
2496 cast<VectorType>(ICA.getArgTypes()[0]), {},
2497 CostKind, 0, cast<VectorType>(RetTy));
2498 case Intrinsic::experimental_vector_histogram_add:
2499 case Intrinsic::experimental_vector_histogram_uadd_sat:
2500 case Intrinsic::experimental_vector_histogram_umax:
2501 case Intrinsic::experimental_vector_histogram_umin: {
2503 Type *EltTy = ICA.getArgTypes()[1];
2504
2505 // Targets with scalable vectors must handle this on their own.
2506 if (!PtrsTy)
2508
2509 Align Alignment = thisT()->DL.getABITypeAlign(EltTy);
2511 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, PtrsTy,
2512 CostKind, 1, nullptr, nullptr);
2513 Cost += thisT()->getMemoryOpCost(Instruction::Load, EltTy, Alignment, 0,
2514 CostKind);
2515 switch (IID) {
2516 default:
2517 llvm_unreachable("Unhandled histogram update operation.");
2518 case Intrinsic::experimental_vector_histogram_add:
2519 Cost +=
2520 thisT()->getArithmeticInstrCost(Instruction::Add, EltTy, CostKind);
2521 break;
2522 case Intrinsic::experimental_vector_histogram_uadd_sat: {
2523 IntrinsicCostAttributes UAddSat(Intrinsic::uadd_sat, EltTy, {EltTy});
2524 Cost += thisT()->getIntrinsicInstrCost(UAddSat, CostKind);
2525 break;
2526 }
2527 case Intrinsic::experimental_vector_histogram_umax: {
2528 IntrinsicCostAttributes UMax(Intrinsic::umax, EltTy, {EltTy});
2529 Cost += thisT()->getIntrinsicInstrCost(UMax, CostKind);
2530 break;
2531 }
2532 case Intrinsic::experimental_vector_histogram_umin: {
2533 IntrinsicCostAttributes UMin(Intrinsic::umin, EltTy, {EltTy});
2534 Cost += thisT()->getIntrinsicInstrCost(UMin, CostKind);
2535 break;
2536 }
2537 }
2538 Cost += thisT()->getMemoryOpCost(Instruction::Store, EltTy, Alignment, 0,
2539 CostKind);
2540 Cost *= PtrsTy->getNumElements();
2541 return Cost;
2542 }
2543 case Intrinsic::get_active_lane_mask: {
2544 Type *ArgTy = ICA.getArgTypes()[0];
2545 EVT ResVT = getTLI()->getValueType(DL, RetTy, true);
2546 EVT ArgVT = getTLI()->getValueType(DL, ArgTy, true);
2547
2548 // If we're not expanding the intrinsic then we assume this is cheap
2549 // to implement.
2550 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgVT))
2551 return getTypeLegalizationCost(RetTy).first;
2552
2553 // Create the expanded types that will be used to calculate the uadd_sat
2554 // operation.
2555 Type *ExpRetTy =
2556 VectorType::get(ArgTy, cast<VectorType>(RetTy)->getElementCount());
2557 IntrinsicCostAttributes Attrs(Intrinsic::uadd_sat, ExpRetTy, {}, FMF);
2559 thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
2560 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy, RetTy,
2562 return Cost;
2563 }
2564 case Intrinsic::experimental_memset_pattern:
2565 // This cost is set to match the cost of the memset_pattern16 libcall.
2566 // It should likely be re-evaluated after migration to this intrinsic
2567 // is complete.
2568 return TTI::TCC_Basic * 4;
2569 case Intrinsic::abs:
2570 ISD = ISD::ABS;
2571 break;
2572 case Intrinsic::fshl:
2573 ISD = ISD::FSHL;
2574 break;
2575 case Intrinsic::fshr:
2576 ISD = ISD::FSHR;
2577 break;
2578 case Intrinsic::smax:
2579 ISD = ISD::SMAX;
2580 break;
2581 case Intrinsic::smin:
2582 ISD = ISD::SMIN;
2583 break;
2584 case Intrinsic::umax:
2585 ISD = ISD::UMAX;
2586 break;
2587 case Intrinsic::umin:
2588 ISD = ISD::UMIN;
2589 break;
2590 case Intrinsic::sadd_sat:
2591 ISD = ISD::SADDSAT;
2592 break;
2593 case Intrinsic::ssub_sat:
2594 ISD = ISD::SSUBSAT;
2595 break;
2596 case Intrinsic::uadd_sat:
2597 ISD = ISD::UADDSAT;
2598 break;
2599 case Intrinsic::usub_sat:
2600 ISD = ISD::USUBSAT;
2601 break;
2602 case Intrinsic::smul_fix:
2603 ISD = ISD::SMULFIX;
2604 break;
2605 case Intrinsic::umul_fix:
2606 ISD = ISD::UMULFIX;
2607 break;
2608 case Intrinsic::sadd_with_overflow:
2609 ISD = ISD::SADDO;
2610 break;
2611 case Intrinsic::ssub_with_overflow:
2612 ISD = ISD::SSUBO;
2613 break;
2614 case Intrinsic::uadd_with_overflow:
2615 ISD = ISD::UADDO;
2616 break;
2617 case Intrinsic::usub_with_overflow:
2618 ISD = ISD::USUBO;
2619 break;
2620 case Intrinsic::smul_with_overflow:
2621 ISD = ISD::SMULO;
2622 break;
2623 case Intrinsic::umul_with_overflow:
2624 ISD = ISD::UMULO;
2625 break;
2626 case Intrinsic::fptosi_sat:
2627 case Intrinsic::fptoui_sat: {
2628 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Tys[0]);
2629 std::pair<InstructionCost, MVT> RetLT = getTypeLegalizationCost(RetTy);
2630
2631 // For cast instructions, types are different between source and
2632 // destination. Also need to check if the source type can be legalize.
2633 if (!SrcLT.first.isValid() || !RetLT.first.isValid())
2635 ISD = IID == Intrinsic::fptosi_sat ? ISD::FP_TO_SINT_SAT
2637 break;
2638 }
2639 case Intrinsic::ctpop:
2640 ISD = ISD::CTPOP;
2641 // In case of legalization use TCC_Expensive. This is cheaper than a
2642 // library call but still not a cheap instruction.
2643 SingleCallCost = TargetTransformInfo::TCC_Expensive;
2644 break;
2645 case Intrinsic::ctlz:
2646 ISD = ISD::CTLZ;
2647 break;
2648 case Intrinsic::cttz:
2649 ISD = ISD::CTTZ;
2650 break;
2651 case Intrinsic::bswap:
2652 ISD = ISD::BSWAP;
2653 break;
2654 case Intrinsic::bitreverse:
2656 break;
2657 case Intrinsic::ucmp:
2658 ISD = ISD::UCMP;
2659 break;
2660 case Intrinsic::scmp:
2661 ISD = ISD::SCMP;
2662 break;
2663 }
2664
2665 auto *ST = dyn_cast<StructType>(RetTy);
2666 Type *LegalizeTy = ST ? ST->getContainedType(0) : RetTy;
2667 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(LegalizeTy);
2668
2669 const TargetLoweringBase *TLI = getTLI();
2670
2671 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
2672 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
2673 TLI->isFAbsFree(LT.second)) {
2674 return 0;
2675 }
2676
2677 // The operation is legal. Assume it costs 1.
2678 // If the type is split to multiple registers, assume that there is some
2679 // overhead to this.
2680 // TODO: Once we have extract/insert subvector cost we need to use them.
2681 if (LT.first > 1)
2682 return (LT.first * 2);
2683 else
2684 return (LT.first * 1);
2685 } else if (TLI->isOperationCustom(ISD, LT.second)) {
2686 // If the operation is custom lowered then assume
2687 // that the code is twice as expensive.
2688 return (LT.first * 2);
2689 }
2690
2691 switch (IID) {
2692 case Intrinsic::fmuladd: {
2693 // If we can't lower fmuladd into an FMA estimate the cost as a floating
2694 // point mul followed by an add.
2695
2696 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
2697 CostKind) +
2698 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
2699 CostKind);
2700 }
2701 case Intrinsic::experimental_constrained_fmuladd: {
2702 IntrinsicCostAttributes FMulAttrs(
2703 Intrinsic::experimental_constrained_fmul, RetTy, Tys);
2704 IntrinsicCostAttributes FAddAttrs(
2705 Intrinsic::experimental_constrained_fadd, RetTy, Tys);
2706 return thisT()->getIntrinsicInstrCost(FMulAttrs, CostKind) +
2707 thisT()->getIntrinsicInstrCost(FAddAttrs, CostKind);
2708 }
2709 case Intrinsic::smin:
2710 case Intrinsic::smax:
2711 case Intrinsic::umin:
2712 case Intrinsic::umax: {
2713 // minmax(X,Y) = select(icmp(X,Y),X,Y)
2714 Type *CondTy = RetTy->getWithNewBitWidth(1);
2715 bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
2716 CmpInst::Predicate Pred =
2717 IsUnsigned ? CmpInst::ICMP_UGT : CmpInst::ICMP_SGT;
2719 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2720 Pred, CostKind);
2721 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2722 Pred, CostKind);
2723 return Cost;
2724 }
2725 case Intrinsic::sadd_with_overflow:
2726 case Intrinsic::ssub_with_overflow: {
2727 Type *SumTy = RetTy->getContainedType(0);
2728 Type *OverflowTy = RetTy->getContainedType(1);
2729 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
2730 ? BinaryOperator::Add
2731 : BinaryOperator::Sub;
2732
2733 // Add:
2734 // Overflow -> (Result < LHS) ^ (RHS < 0)
2735 // Sub:
2736 // Overflow -> (Result < LHS) ^ (RHS > 0)
2738 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
2739 Cost +=
2740 2 * thisT()->getCmpSelInstrCost(Instruction::ICmp, SumTy, OverflowTy,
2742 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
2743 CostKind);
2744 return Cost;
2745 }
2746 case Intrinsic::uadd_with_overflow:
2747 case Intrinsic::usub_with_overflow: {
2748 Type *SumTy = RetTy->getContainedType(0);
2749 Type *OverflowTy = RetTy->getContainedType(1);
2750 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
2751 ? BinaryOperator::Add
2752 : BinaryOperator::Sub;
2753 CmpInst::Predicate Pred = IID == Intrinsic::uadd_with_overflow
2756
2758 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
2759 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy,
2760 OverflowTy, Pred, CostKind);
2761 return Cost;
2762 }
2763 case Intrinsic::smul_with_overflow:
2764 case Intrinsic::umul_with_overflow: {
2765 Type *MulTy = RetTy->getContainedType(0);
2766 Type *OverflowTy = RetTy->getContainedType(1);
2767 unsigned ExtSize = MulTy->getScalarSizeInBits() * 2;
2768 Type *ExtTy = MulTy->getWithNewBitWidth(ExtSize);
2769 bool IsSigned = IID == Intrinsic::smul_with_overflow;
2770
2771 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
2773
2775 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH, CostKind);
2776 Cost +=
2777 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2778 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
2779 CCH, CostKind);
2780 Cost += thisT()->getArithmeticInstrCost(
2781 Instruction::LShr, ExtTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2783
2784 if (IsSigned)
2785 Cost += thisT()->getArithmeticInstrCost(
2786 Instruction::AShr, MulTy, CostKind,
2789
2790 Cost += thisT()->getCmpSelInstrCost(
2791 BinaryOperator::ICmp, MulTy, OverflowTy, CmpInst::ICMP_NE, CostKind);
2792 return Cost;
2793 }
2794 case Intrinsic::sadd_sat:
2795 case Intrinsic::ssub_sat: {
2796 // Assume a default expansion.
2797 Type *CondTy = RetTy->getWithNewBitWidth(1);
2798
2799 Type *OpTy = StructType::create({RetTy, CondTy});
2800 Intrinsic::ID OverflowOp = IID == Intrinsic::sadd_sat
2801 ? Intrinsic::sadd_with_overflow
2802 : Intrinsic::ssub_with_overflow;
2804
2805 // SatMax -> Overflow && SumDiff < 0
2806 // SatMin -> Overflow && SumDiff >= 0
2808 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
2809 nullptr, ScalarizationCostPassed);
2810 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2811 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2812 Pred, CostKind);
2813 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
2814 CondTy, Pred, CostKind);
2815 return Cost;
2816 }
2817 case Intrinsic::uadd_sat:
2818 case Intrinsic::usub_sat: {
2819 Type *CondTy = RetTy->getWithNewBitWidth(1);
2820
2821 Type *OpTy = StructType::create({RetTy, CondTy});
2822 Intrinsic::ID OverflowOp = IID == Intrinsic::uadd_sat
2823 ? Intrinsic::uadd_with_overflow
2824 : Intrinsic::usub_with_overflow;
2825
2827 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
2828 nullptr, ScalarizationCostPassed);
2829 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2830 Cost +=
2831 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2833 return Cost;
2834 }
2835 case Intrinsic::smul_fix:
2836 case Intrinsic::umul_fix: {
2837 unsigned ExtSize = RetTy->getScalarSizeInBits() * 2;
2838 Type *ExtTy = RetTy->getWithNewBitWidth(ExtSize);
2839
2840 unsigned ExtOp =
2841 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
2843
2845 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH, CostKind);
2846 Cost +=
2847 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2848 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
2849 CCH, CostKind);
2850 Cost += thisT()->getArithmeticInstrCost(
2851 Instruction::LShr, RetTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2853 Cost += thisT()->getArithmeticInstrCost(
2854 Instruction::Shl, RetTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2856 Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy, CostKind);
2857 return Cost;
2858 }
2859 case Intrinsic::abs: {
2860 // abs(X) = select(icmp(X,0),X,sub(0,X))
2861 Type *CondTy = RetTy->getWithNewBitWidth(1);
2864 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2865 Pred, CostKind);
2866 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2867 Pred, CostKind);
2868 // TODO: Should we add an OperandValueProperties::OP_Zero property?
2869 Cost += thisT()->getArithmeticInstrCost(
2870 BinaryOperator::Sub, RetTy, CostKind,
2872 return Cost;
2873 }
2874 case Intrinsic::fshl:
2875 case Intrinsic::fshr: {
2876 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
2877 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
2878 Type *CondTy = RetTy->getWithNewBitWidth(1);
2880 Cost +=
2881 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
2882 Cost +=
2883 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);
2884 Cost +=
2885 thisT()->getArithmeticInstrCost(BinaryOperator::Shl, RetTy, CostKind);
2886 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::LShr, RetTy,
2887 CostKind);
2888 // Non-constant shift amounts requires a modulo. If the typesize is a
2889 // power-2 then this will be converted to an and, otherwise it will use a
2890 // urem.
2891 Cost += thisT()->getArithmeticInstrCost(
2892 isPowerOf2_32(RetTy->getScalarSizeInBits()) ? BinaryOperator::And
2893 : BinaryOperator::URem,
2894 RetTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},
2895 {TTI::OK_UniformConstantValue, TTI::OP_None});
2896 // Shift-by-zero handling.
2897 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2899 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2901 return Cost;
2902 }
2903 case Intrinsic::fptosi_sat:
2904 case Intrinsic::fptoui_sat: {
2905 if (Tys.empty())
2906 break;
2907 Type *FromTy = Tys[0];
2908 bool IsSigned = IID == Intrinsic::fptosi_sat;
2909
2911 IntrinsicCostAttributes Attrs1(Intrinsic::minnum, FromTy,
2912 {FromTy, FromTy});
2913 Cost += thisT()->getIntrinsicInstrCost(Attrs1, CostKind);
2914 IntrinsicCostAttributes Attrs2(Intrinsic::maxnum, FromTy,
2915 {FromTy, FromTy});
2916 Cost += thisT()->getIntrinsicInstrCost(Attrs2, CostKind);
2917 Cost += thisT()->getCastInstrCost(
2918 IsSigned ? Instruction::FPToSI : Instruction::FPToUI, RetTy, FromTy,
2920 if (IsSigned) {
2921 Type *CondTy = RetTy->getWithNewBitWidth(1);
2922 Cost += thisT()->getCmpSelInstrCost(
2923 BinaryOperator::FCmp, FromTy, CondTy, CmpInst::FCMP_UNO, CostKind);
2924 Cost += thisT()->getCmpSelInstrCost(
2925 BinaryOperator::Select, RetTy, CondTy, CmpInst::FCMP_UNO, CostKind);
2926 }
2927 return Cost;
2928 }
2929 case Intrinsic::ucmp:
2930 case Intrinsic::scmp: {
2931 Type *CmpTy = Tys[0];
2932 Type *CondTy = RetTy->getWithNewBitWidth(1);
2934 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2936 CostKind) +
2937 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,
2939 CostKind);
2940
2941 EVT VT = TLI->getValueType(DL, CmpTy, true);
2943 // x < y ? -1 : (x > y ? 1 : 0)
2944 Cost += 2 * thisT()->getCmpSelInstrCost(
2945 BinaryOperator::Select, RetTy, CondTy,
2947 } else {
2948 // zext(x > y) - zext(x < y)
2949 Cost +=
2950 2 * thisT()->getCastInstrCost(CastInst::ZExt, RetTy, CondTy,
2952 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,
2953 CostKind);
2954 }
2955 return Cost;
2956 }
2957 case Intrinsic::maximumnum:
2958 case Intrinsic::minimumnum: {
2959 // On platform that support FMAXNUM_IEEE/FMINNUM_IEEE, we expand
2960 // maximumnum/minimumnum to
2961 // ARG0 = fcanonicalize ARG0, ARG0 // to quiet ARG0
2962 // ARG1 = fcanonicalize ARG1, ARG1 // to quiet ARG1
2963 // RESULT = MAXNUM_IEEE ARG0, ARG1 // or MINNUM_IEEE
2964 // FIXME: In LangRef, we claimed FMAXNUM has the same behaviour of
2965 // FMAXNUM_IEEE, while the backend hasn't migrated the code yet.
2966 // Finally, we will remove FMAXNUM_IEEE and FMINNUM_IEEE.
2967 int IeeeISD =
2968 IID == Intrinsic::maximumnum ? ISD::FMAXNUM_IEEE : ISD::FMINNUM_IEEE;
2969 if (TLI->isOperationLegal(IeeeISD, LT.second)) {
2970 IntrinsicCostAttributes FCanonicalizeAttrs(Intrinsic::canonicalize,
2971 RetTy, Tys[0]);
2972 InstructionCost FCanonicalizeCost =
2973 thisT()->getIntrinsicInstrCost(FCanonicalizeAttrs, CostKind);
2974 return LT.first + FCanonicalizeCost * 2;
2975 }
2976 break;
2977 }
2978 default:
2979 break;
2980 }
2981
2982 // Else, assume that we need to scalarize this intrinsic. For math builtins
2983 // this will emit a costly libcall, adding call overhead and spills. Make it
2984 // very expensive.
2985 if (isVectorizedTy(RetTy)) {
2986 ArrayRef<Type *> RetVTys = getContainedTypes(RetTy);
2987
2988 // Scalable vectors cannot be scalarized, so return Invalid.
2989 if (any_of(concat<Type *const>(RetVTys, Tys),
2990 [](Type *Ty) { return isa<ScalableVectorType>(Ty); }))
2992
2993 InstructionCost ScalarizationCost = ScalarizationCostPassed;
2994 if (!SkipScalarizationCost) {
2995 ScalarizationCost = 0;
2996 for (Type *RetVTy : RetVTys) {
2997 ScalarizationCost += getScalarizationOverhead(
2998 cast<VectorType>(RetVTy), /*Insert=*/true,
2999 /*Extract=*/false, CostKind);
3000 }
3001 }
3002
3003 unsigned ScalarCalls = getVectorizedTypeVF(RetTy).getFixedValue();
3004 SmallVector<Type *, 4> ScalarTys;
3005 for (Type *Ty : Tys) {
3006 if (Ty->isVectorTy())
3007 Ty = Ty->getScalarType();
3008 ScalarTys.push_back(Ty);
3009 }
3010 IntrinsicCostAttributes Attrs(IID, toScalarizedTy(RetTy), ScalarTys, FMF);
3011 InstructionCost ScalarCost =
3012 thisT()->getIntrinsicInstrCost(Attrs, CostKind);
3013 for (Type *Ty : Tys) {
3014 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
3015 if (!ICA.skipScalarizationCost())
3016 ScalarizationCost += getScalarizationOverhead(
3017 VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
3018 ScalarCalls = std::max(ScalarCalls,
3020 }
3021 }
3022 return ScalarCalls * ScalarCost + ScalarizationCost;
3023 }
3024
3025 // This is going to be turned into a library call, make it expensive.
3026 return SingleCallCost;
3027 }
3028
3029 /// Compute a cost of the given call instruction.
3030 ///
3031 /// Compute the cost of calling function F with return type RetTy and
3032 /// argument types Tys. F might be nullptr, in this case the cost of an
3033 /// arbitrary call with the specified signature will be returned.
3034 /// This is used, for instance, when we estimate call of a vector
3035 /// counterpart of the given function.
3036 /// \param F Called function, might be nullptr.
3037 /// \param RetTy Return value types.
3038 /// \param Tys Argument types.
3039 /// \returns The cost of Call instruction.
3042 TTI::TargetCostKind CostKind) const override {
3043 return 10;
3044 }
3045
3046 unsigned getNumberOfParts(Type *Tp) const override {
3047 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
3048 if (!LT.first.isValid())
3049 return 0;
3050 // Try to find actual number of parts for non-power-of-2 elements as
3051 // ceil(num-of-elements/num-of-subtype-elements).
3052 if (auto *FTp = dyn_cast<FixedVectorType>(Tp);
3053 Tp && LT.second.isFixedLengthVector() &&
3054 !has_single_bit(FTp->getNumElements())) {
3055 if (auto *SubTp = dyn_cast_if_present<FixedVectorType>(
3056 EVT(LT.second).getTypeForEVT(Tp->getContext()));
3057 SubTp && SubTp->getElementType() == FTp->getElementType())
3058 return divideCeil(FTp->getNumElements(), SubTp->getNumElements());
3059 }
3060 return LT.first.getValue();
3061 }
3062
3065 TTI::TargetCostKind) const override {
3066 return 0;
3067 }
3068
3069 /// Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
3070 /// We're assuming that reduction operation are performing the following way:
3071 ///
3072 /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
3073 /// <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
3074 /// \----------------v-------------/ \----------v------------/
3075 /// n/2 elements n/2 elements
3076 /// %red1 = op <n x t> %val, <n x t> val1
3077 /// After this operation we have a vector %red1 where only the first n/2
3078 /// elements are meaningful, the second n/2 elements are undefined and can be
3079 /// dropped. All other operations are actually working with the vector of
3080 /// length n/2, not n, though the real vector length is still n.
3081 /// %val2 = shufflevector<n x t> %red1, <n x t> %undef,
3082 /// <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
3083 /// \----------------v-------------/ \----------v------------/
3084 /// n/4 elements 3*n/4 elements
3085 /// %red2 = op <n x t> %red1, <n x t> val2 - working with the vector of
3086 /// length n/2, the resulting vector has length n/4 etc.
3087 ///
3088 /// The cost model should take into account that the actual length of the
3089 /// vector is reduced on each iteration.
3092 // Targets must implement a default value for the scalable case, since
3093 // we don't know how many lanes the vector has.
3096
3097 Type *ScalarTy = Ty->getElementType();
3098 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
3099 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
3100 ScalarTy == IntegerType::getInt1Ty(Ty->getContext()) &&
3101 NumVecElts >= 2) {
3102 // Or reduction for i1 is represented as:
3103 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
3104 // %res = cmp ne iReduxWidth %val, 0
3105 // And reduction for i1 is represented as:
3106 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
3107 // %res = cmp eq iReduxWidth %val, 11111
3108 Type *ValTy = IntegerType::get(Ty->getContext(), NumVecElts);
3109 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
3111 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
3114 }
3115 unsigned NumReduxLevels = Log2_32(NumVecElts);
3116 InstructionCost ArithCost = 0;
3117 InstructionCost ShuffleCost = 0;
3118 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3119 unsigned LongVectorCount = 0;
3120 unsigned MVTLen =
3121 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3122 while (NumVecElts > MVTLen) {
3123 NumVecElts /= 2;
3124 VectorType *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
3125 ShuffleCost += thisT()->getShuffleCost(
3126 TTI::SK_ExtractSubvector, SubTy, Ty, {}, CostKind, NumVecElts, SubTy);
3127 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy, CostKind);
3128 Ty = SubTy;
3129 ++LongVectorCount;
3130 }
3131
3132 NumReduxLevels -= LongVectorCount;
3133
3134 // The minimal length of the vector is limited by the real length of vector
3135 // operations performed on the current platform. That's why several final
3136 // reduction operations are performed on the vectors with the same
3137 // architecture-dependent length.
3138
3139 // By default reductions need one shuffle per reduction level.
3140 ShuffleCost +=
3141 NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
3142 Ty, {}, CostKind, 0, Ty);
3143 ArithCost +=
3144 NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty, CostKind);
3145 return ShuffleCost + ArithCost +
3146 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3147 CostKind, 0, nullptr, nullptr);
3148 }
3149
3150 /// Try to calculate the cost of performing strict (in-order) reductions,
3151 /// which involves doing a sequence of floating point additions in lane
3152 /// order, starting with an initial value. For example, consider a scalar
3153 /// initial value 'InitVal' of type float and a vector of type <4 x float>:
3154 ///
3155 /// Vector = <float %v0, float %v1, float %v2, float %v3>
3156 ///
3157 /// %add1 = %InitVal + %v0
3158 /// %add2 = %add1 + %v1
3159 /// %add3 = %add2 + %v2
3160 /// %add4 = %add3 + %v3
3161 ///
3162 /// As a simple estimate we can say the cost of such a reduction is 4 times
3163 /// the cost of a scalar FP addition. We can only estimate the costs for
3164 /// fixed-width vectors here because for scalable vectors we do not know the
3165 /// runtime number of operations.
3168 // Targets must implement a default value for the scalable case, since
3169 // we don't know how many lanes the vector has.
3172
3173 auto *VTy = cast<FixedVectorType>(Ty);
3175 VTy, /*Insert=*/false, /*Extract=*/true, CostKind);
3176 InstructionCost ArithCost = thisT()->getArithmeticInstrCost(
3177 Opcode, VTy->getElementType(), CostKind);
3178 ArithCost *= VTy->getNumElements();
3179
3180 return ExtractCost + ArithCost;
3181 }
3182
3185 std::optional<FastMathFlags> FMF,
3186 TTI::TargetCostKind CostKind) const override {
3187 assert(Ty && "Unknown reduction vector type");
3189 return getOrderedReductionCost(Opcode, Ty, CostKind);
3190 return getTreeReductionCost(Opcode, Ty, CostKind);
3191 }
3192
3193 /// Try to calculate op costs for min/max reduction operations.
3194 /// \param CondTy Conditional type for the Select instruction.
3197 TTI::TargetCostKind CostKind) const override {
3198 // Targets must implement a default value for the scalable case, since
3199 // we don't know how many lanes the vector has.
3202
3203 Type *ScalarTy = Ty->getElementType();
3204 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
3205 unsigned NumReduxLevels = Log2_32(NumVecElts);
3206 InstructionCost MinMaxCost = 0;
3207 InstructionCost ShuffleCost = 0;
3208 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
3209 unsigned LongVectorCount = 0;
3210 unsigned MVTLen =
3211 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
3212 while (NumVecElts > MVTLen) {
3213 NumVecElts /= 2;
3214 auto *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
3215
3216 ShuffleCost += thisT()->getShuffleCost(
3217 TTI::SK_ExtractSubvector, SubTy, Ty, {}, CostKind, NumVecElts, SubTy);
3218
3219 IntrinsicCostAttributes Attrs(IID, SubTy, {SubTy, SubTy}, FMF);
3220 MinMaxCost += getIntrinsicInstrCost(Attrs, CostKind);
3221 Ty = SubTy;
3222 ++LongVectorCount;
3223 }
3224
3225 NumReduxLevels -= LongVectorCount;
3226
3227 // The minimal length of the vector is limited by the real length of vector
3228 // operations performed on the current platform. That's why several final
3229 // reduction opertions are perfomed on the vectors with the same
3230 // architecture-dependent length.
3231 ShuffleCost +=
3232 NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
3233 Ty, {}, CostKind, 0, Ty);
3234 IntrinsicCostAttributes Attrs(IID, Ty, {Ty, Ty}, FMF);
3235 MinMaxCost += NumReduxLevels * getIntrinsicInstrCost(Attrs, CostKind);
3236 // The last min/max should be in vector registers and we counted it above.
3237 // So just need a single extractelement.
3238 return ShuffleCost + MinMaxCost +
3239 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
3240 CostKind, 0, nullptr, nullptr);
3241 }
3242
3244 getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy,
3245 VectorType *Ty, std::optional<FastMathFlags> FMF,
3246 TTI::TargetCostKind CostKind) const override {
3247 if (auto *FTy = dyn_cast<FixedVectorType>(Ty);
3248 FTy && IsUnsigned && Opcode == Instruction::Add &&
3249 FTy->getElementType() == IntegerType::getInt1Ty(Ty->getContext())) {
3250 // Represent vector_reduce_add(ZExt(<n x i1>)) as
3251 // ZExtOrTrunc(ctpop(bitcast <n x i1> to in)).
3252 auto *IntTy =
3253 IntegerType::get(ResTy->getContext(), FTy->getNumElements());
3254 IntrinsicCostAttributes ICA(Intrinsic::ctpop, IntTy, {IntTy},
3255 FMF ? *FMF : FastMathFlags());
3256 return thisT()->getCastInstrCost(Instruction::BitCast, IntTy, FTy,
3258 thisT()->getIntrinsicInstrCost(ICA, CostKind);
3259 }
3260 // Without any native support, this is equivalent to the cost of
3261 // vecreduce.opcode(ext(Ty A)).
3262 VectorType *ExtTy = VectorType::get(ResTy, Ty);
3263 InstructionCost RedCost =
3264 thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF, CostKind);
3265 InstructionCost ExtCost = thisT()->getCastInstrCost(
3266 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3268
3269 return RedCost + ExtCost;
3270 }
3271
3273 getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy,
3274 VectorType *Ty,
3275 TTI::TargetCostKind CostKind) const override {
3276 // Without any native support, this is equivalent to the cost of
3277 // vecreduce.add(mul(ext(Ty A), ext(Ty B))) or
3278 // vecreduce.add(mul(A, B)).
3279 assert((RedOpcode == Instruction::Add || RedOpcode == Instruction::Sub) &&
3280 "The reduction opcode is expected to be Add or Sub.");
3281 VectorType *ExtTy = VectorType::get(ResTy, Ty);
3282 InstructionCost RedCost = thisT()->getArithmeticReductionCost(
3283 RedOpcode, ExtTy, std::nullopt, CostKind);
3284 InstructionCost ExtCost = thisT()->getCastInstrCost(
3285 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
3287
3288 InstructionCost MulCost =
3289 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
3290
3291 return RedCost + MulCost + 2 * ExtCost;
3292 }
3293
3295
3296 /// @}
3297};
3298
3299/// Concrete BasicTTIImpl that can be used if no further customization
3300/// is needed.
3301class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
3302 using BaseT = BasicTTIImplBase<BasicTTIImpl>;
3303
3304 friend class BasicTTIImplBase<BasicTTIImpl>;
3305
3306 const TargetSubtargetInfo *ST;
3307 const TargetLoweringBase *TLI;
3308
3309 const TargetSubtargetInfo *getST() const { return ST; }
3310 const TargetLoweringBase *getTLI() const { return TLI; }
3311
3312public:
3313 explicit BasicTTIImpl(const TargetMachine *TM, const Function &F);
3314};
3315
3316} // end namespace llvm
3317
3318#endif // LLVM_CODEGEN_BASICTTIIMPL_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
static const Function * getCalledFunction(const Value *V)
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
static unsigned getNumElements(Type *Ty)
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
This file provides helpers for the implementation of a TargetTransformInfo-conforming class.
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1331
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1202
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1489
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition APInt.h:1131
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition ArrayRef.h:196
size_t size() const
size - Get the array size.
Definition ArrayRef.h:143
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition ArrayRef.h:202
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
InstructionCost getFPOpCost(Type *Ty) const override
bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override
Try to calculate op costs for min/max reduction operations.
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty) const override
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const override
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
bool shouldBuildLookupTables() const override
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}) const override
Estimate the overhead of scalarizing an instruction.
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isProfitableToHoist(Instruction *I) const override
unsigned getNumberOfParts(Type *Tp) const override
unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const override
InstructionCost getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const override
InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
bool useAA() const override
unsigned getPrefetchDistance() const override
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *SrcTy, int &Index, VectorType *&SubTy) const
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const override
bool isLegalAddScalableImmediate(int64_t Imm) const override
unsigned getAssumedAddrSpace(const Value *V) const override
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const override
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0) const override
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override
bool areInlineCompatible(const Function *Caller, const Function *Callee) const override
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty) const override
bool haveFastSqrt(Type *Ty) const override
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const override
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Scalar, ArrayRef< std::tuple< Value *, User *, int > > ScalarUserAndIdx) const override
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const override
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const override
unsigned adjustInliningThreshold(const CallBase *CB) const override
unsigned getInliningThresholdMultiplier() const override
InstructionCost getExpandCompressMemoryOpCost(unsigned Opcode, Type *DataTy, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset)
bool shouldBuildRelLookupTables() const override
bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx) const override
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override
unsigned getEpilogueVectorizationMinVF() const override
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const override
InstructionCost getVectorSplitCost() const
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
std::optional< unsigned > getMaxVScale() const override
unsigned getFlatAddressSpace() const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Compute a cost of the given call instruction.
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
~BasicTTIImplBase() override=default
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
unsigned getMaxPrefetchIterationsAhead() const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
Get intrinsic cost based on argument types.
bool hasBranchDivergence(const Function *F=nullptr) const override
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const override
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override
std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const override
bool shouldPrefetchAddressSpace(unsigned AS) const override
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const override
unsigned getCacheLineSize() const override
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
bool shouldDropLSRSolutionIfLessProfitable() const override
int getInlinerVectorBonusPercent() const override
bool isVScaleKnownToBeAPowerOfTwo() const override
InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind) const override
InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
bool isLegalAddImmediate(int64_t imm) const override
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
bool isSingleThreaded() const override
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind) const
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
bool isProfitableLSRChainElement(Instruction *I) const override
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override
bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx) const override
bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx) const override
std::optional< unsigned > getVScaleForTuning() const override
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const override
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *, const SCEV *, TTI::TargetCostKind) const override
bool isSourceOfDivergence(const Value *V) const override
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const override
bool isAlwaysUniform(const Value *V) const override
bool isLegalICmpImmediate(int64_t imm) const override
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const override
unsigned getRegUsageForType(Type *Ty) const override
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool isTypeLegal(Type *Ty) const override
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind) const override
bool enableWritePrefetching() const override
bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const override
InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Estimate the overhead of scalarizing an instruction's operands.
bool isNumRegsMajorCostOfLSR() const override
BasicTTIImpl(const TargetMachine *TM, const Function &F)
size_type count() const
count - Returns the number of bits which are set.
Definition BitVector.h:181
BitVector & set()
Definition BitVector.h:370
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:982
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
static CmpInst::Predicate getGTPredicate(Intrinsic::ID ID)
static CmpInst::Predicate getLTPredicate(Intrinsic::ID ID)
This class represents a range of values.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:325
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:310
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:321
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
Container class for subtarget features.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition Type.cpp:803
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition Function.h:352
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
const TargetLibraryInfo * getLibInfo() const
const SmallVectorImpl< Type * > & getArgTypes() const
const SmallVectorImpl< const Value * > & getArgs() const
InstructionCost getScalarizationCost() const
const IntrinsicInst * getInst() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Machine Value Type.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for applied optimization remarks.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Analysis providing profile information.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:31
static StackOffset getScalable(int64_t Scalable)
Definition TypeSize.h:41
static StackOffset getFixed(int64_t Fixed)
Definition TypeSize.h:40
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition Type.cpp:620
Multiway switch.
Provides information about what library functions are available for the current target.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool preferSelectsOverBooleanArithmetic(EVT VT) const
Should we prefer selects to doing arithmetic on boolean types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool isSuitableForBitTests(const DenseMap< const BasicBlock *, unsigned int > &DestCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
Primary interface to the complete machine description for the target machine.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual bool isProfitableLSRChainElement(Instruction *I) const
virtual const DataLayout & getDataLayout() const
virtual std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const
virtual std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
virtual bool shouldDropLSRSolutionIfLessProfitable() const
virtual bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const
virtual bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const
virtual std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
virtual std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
virtual unsigned getEpilogueVectorizationMinVF() const
virtual bool isLoweredToCall(const Function *F) const
virtual InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info, TTI::OperandValueInfo Opd2Info, ArrayRef< const Value * > Args, const Instruction *CxtI=nullptr) const
virtual InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const
virtual bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const
virtual InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I) const
virtual InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
virtual InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, const Instruction *I) const
virtual TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
static bool requiresOrderedReduction(std::optional< FastMathFlags > FMF)
A helper function to determine the type of reduction algorithm used for a given Opcode and set of Fas...
@ TCC_Expensive
The cost of a 'div' instruction on x86.
@ TCC_Basic
The cost of a typical 'add' instruction.
MemIndexedMode
The type of load/store indexing.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
@ SK_InsertSubvector
InsertSubvector. Index indicates start offset.
@ SK_Select
Selects elements from the corresponding lane of either source operand.
@ SK_PermuteSingleSrc
Shuffle elements of single source vector with any shuffle mask.
@ SK_Transpose
Transpose two vectors.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_PermuteTwoSrc
Merge elements from two source vectors into one with any shuffle mask.
@ SK_Reverse
Reverse the order of the vector.
@ SK_ExtractSubvector
ExtractSubvector Index indicates start offset.
CastContextHint
Represents a hint about the context in which a cast is used.
@ None
The cast is not used with a load/store of any kind.
@ Normal
The cast is used with a normal load/store.
CacheLevel
The possible cache levels.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:413
LLVM_ABI bool isArch64Bit() const
Test whether the architecture is 64-bit.
Definition Triple.cpp:1791
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Definition Triple.h:613
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:344
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:295
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:294
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:301
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
Definition Type.h:381
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
Value * getOperand(unsigned i) const
Definition User.h:232
static LLVM_ABI bool isVPBinOp(Intrinsic::ID ID)
static LLVM_ABI bool isVPCast(Intrinsic::ID ID)
static LLVM_ABI bool isVPCmp(Intrinsic::ID ID)
static LLVM_ABI std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
static LLVM_ABI std::optional< Intrinsic::ID > getFunctionalIntrinsicIDForVP(Intrinsic::ID ID)
static LLVM_ABI bool isVPIntrinsic(Intrinsic::ID)
static LLVM_ABI bool isVPReduction(Intrinsic::ID ID)
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
Provides info so a possible vectorization of a function can be computed.
bool isMasked() const
Base class of all SIMD vector types.
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:201
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:217
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
Definition APInt.cpp:3009
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...
Definition ISDOpcodes.h:24
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:771
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
Definition ISDOpcodes.h:387
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:511
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:410
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition ISDOpcodes.h:744
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:275
@ SSUBO
Same for subtraction.
Definition ISDOpcodes.h:347
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
Definition ISDOpcodes.h:534
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition ISDOpcodes.h:369
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:784
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition ISDOpcodes.h:343
@ SMULO
Same for multiplication.
Definition ISDOpcodes.h:351
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition ISDOpcodes.h:724
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition ISDOpcodes.h:793
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
Definition ISDOpcodes.h:732
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
Definition ISDOpcodes.h:933
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:527
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition ISDOpcodes.h:360
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
LLVM_ABI bool isTargetIntrinsic(ID IID)
isTargetIntrinsic - Returns true if IID is an intrinsic specific to a certain target.
LLVM_ABI Libcall getSINCOSPI(EVT RetVT)
getSINCOSPI - Return the SINCOSPI_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getMODF(EVT RetVT)
getMODF - Return the MODF_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getSINCOS(EVT RetVT)
getSINCOS - Return the SINCOS_* value for the given types, or UNKNOWN_LIBCALL if there is none.
DiagnosticInfoOptimizationBase::Argument NV
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:839
InstructionCost Cost
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2472
Type * toScalarizedTy(Type *Ty)
A helper for converting vectorized types to scalarized (non-vector) types.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
LLVM_ABI unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID)
Returns the arithmetic instruction opcode used when expanding a reduction.
bool isVectorizedTy(Type *Ty)
Returns true if Ty is a vector type or a struct of vector types where all vector types share the same...
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
Definition STLExtras.h:1150
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:147
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
ElementCount getVectorizedTypeVF(Type *Ty)
Returns the number of vector elements for a vectorized type.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr int PoisonMaskElem
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:189
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
cl::opt< unsigned > PartialUnrollingThreshold
LLVM_ABI bool isVectorizedStructTy(StructType *StructTy)
Returns true if StructTy is an unpacked literal struct where all elements are vectors of matching ele...
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:137
ElementCount getVectorElementCount() const
Definition ValueTypes.h:350
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition ValueTypes.h:65
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition ValueTypes.h:323
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Attributes of a target dependent hardware loop.
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
bool AllowPeeling
Allow peeling off loop iterations.
bool AllowLoopNestsPeeling
Allow peeling off loop iterations for loop nests.
bool PeelProfiledIterations
Allow peeling basing on profile.
unsigned PeelCount
A forced peeling factor (the number of bodied of the original loop that should be peeled off before t...
Parameters that control the generic loop unrolling transformation.
bool UpperBound
Allow using trip count upper bound to unroll loops.
unsigned PartialOptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size, like OptSizeThreshold,...
unsigned PartialThreshold
The cost threshold for the unrolled loop, like Threshold, but used for partial/runtime unrolling (set...
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...
unsigned OptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size (set to UINT_MAX to disable).