LLVM 18.0.0git
BasicTTIImpl.h
Go to the documentation of this file.
1//===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file provides a helper that implements much of the TTI interface in
11/// terms of the target-independent code generator and TargetLowering
12/// interfaces.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
18
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/ArrayRef.h"
21#include "llvm/ADT/BitVector.h"
33#include "llvm/IR/BasicBlock.h"
34#include "llvm/IR/Constant.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/InstrTypes.h"
39#include "llvm/IR/Instruction.h"
41#include "llvm/IR/Intrinsics.h"
42#include "llvm/IR/Operator.h"
43#include "llvm/IR/Type.h"
44#include "llvm/IR/Value.h"
51#include <algorithm>
52#include <cassert>
53#include <cstdint>
54#include <limits>
55#include <optional>
56#include <utility>
57
58namespace llvm {
59
60class Function;
61class GlobalValue;
62class LLVMContext;
63class ScalarEvolution;
64class SCEV;
65class TargetMachine;
66
67extern cl::opt<unsigned> PartialUnrollingThreshold;
68
69/// Base class which can be used to help build a TTI implementation.
70///
71/// This class provides as much implementation of the TTI interface as is
72/// possible using the target independent parts of the code generator.
73///
74/// In order to subclass it, your class must implement a getST() method to
75/// return the subtarget, and a getTLI() method to return the target lowering.
76/// We need these methods implemented in the derived class so that this class
77/// doesn't have to duplicate storage for them.
78template <typename T>
80private:
83
84 /// Helper function to access this as a T.
85 T *thisT() { return static_cast<T *>(this); }
86
87 /// Estimate a cost of Broadcast as an extract and sequence of insert
88 /// operations.
89 InstructionCost getBroadcastShuffleOverhead(FixedVectorType *VTy,
92 // Broadcast cost is equal to the cost of extracting the zero'th element
93 // plus the cost of inserting it into every element of the result vector.
94 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
95 CostKind, 0, nullptr, nullptr);
96
97 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
98 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
99 CostKind, i, nullptr, nullptr);
100 }
101 return Cost;
102 }
103
104 /// Estimate a cost of shuffle as a sequence of extract and insert
105 /// operations.
106 InstructionCost getPermuteShuffleOverhead(FixedVectorType *VTy,
109 // Shuffle cost is equal to the cost of extracting element from its argument
110 // plus the cost of inserting them onto the result vector.
111
112 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
113 // index 0 of first vector, index 1 of second vector,index 2 of first
114 // vector and finally index 3 of second vector and insert them at index
115 // <0,1,2,3> of result vector.
116 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
117 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
118 CostKind, i, nullptr, nullptr);
119 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
120 CostKind, i, nullptr, nullptr);
121 }
122 return Cost;
123 }
124
125 /// Estimate a cost of subvector extraction as a sequence of extract and
126 /// insert operations.
127 InstructionCost getExtractSubvectorOverhead(VectorType *VTy,
129 int Index,
130 FixedVectorType *SubVTy) {
131 assert(VTy && SubVTy &&
132 "Can only extract subvectors from vectors");
133 int NumSubElts = SubVTy->getNumElements();
134 assert((!isa<FixedVectorType>(VTy) ||
135 (Index + NumSubElts) <=
136 (int)cast<FixedVectorType>(VTy)->getNumElements()) &&
137 "SK_ExtractSubvector index out of range");
138
140 // Subvector extraction cost is equal to the cost of extracting element from
141 // the source type plus the cost of inserting them into the result vector
142 // type.
143 for (int i = 0; i != NumSubElts; ++i) {
144 Cost +=
145 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
146 CostKind, i + Index, nullptr, nullptr);
147 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
148 CostKind, i, nullptr, nullptr);
149 }
150 return Cost;
151 }
152
153 /// Estimate a cost of subvector insertion as a sequence of extract and
154 /// insert operations.
155 InstructionCost getInsertSubvectorOverhead(VectorType *VTy,
157 int Index,
158 FixedVectorType *SubVTy) {
159 assert(VTy && SubVTy &&
160 "Can only insert subvectors into vectors");
161 int NumSubElts = SubVTy->getNumElements();
162 assert((!isa<FixedVectorType>(VTy) ||
163 (Index + NumSubElts) <=
164 (int)cast<FixedVectorType>(VTy)->getNumElements()) &&
165 "SK_InsertSubvector index out of range");
166
168 // Subvector insertion cost is equal to the cost of extracting element from
169 // the source type plus the cost of inserting them into the result vector
170 // type.
171 for (int i = 0; i != NumSubElts; ++i) {
172 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
173 CostKind, i, nullptr, nullptr);
174 Cost +=
175 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, CostKind,
176 i + Index, nullptr, nullptr);
177 }
178 return Cost;
179 }
180
181 /// Local query method delegates up to T which *must* implement this!
182 const TargetSubtargetInfo *getST() const {
183 return static_cast<const T *>(this)->getST();
184 }
185
186 /// Local query method delegates up to T which *must* implement this!
187 const TargetLoweringBase *getTLI() const {
188 return static_cast<const T *>(this)->getTLI();
189 }
190
191 static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
192 switch (M) {
194 return ISD::UNINDEXED;
195 case TTI::MIM_PreInc:
196 return ISD::PRE_INC;
197 case TTI::MIM_PreDec:
198 return ISD::PRE_DEC;
199 case TTI::MIM_PostInc:
200 return ISD::POST_INC;
201 case TTI::MIM_PostDec:
202 return ISD::POST_DEC;
203 }
204 llvm_unreachable("Unexpected MemIndexedMode");
205 }
206
207 InstructionCost getCommonMaskedMemoryOpCost(unsigned Opcode, Type *DataTy,
208 Align Alignment,
209 bool VariableMask,
210 bool IsGatherScatter,
212 // We cannot scalarize scalable vectors, so return Invalid.
213 if (isa<ScalableVectorType>(DataTy))
215
216 auto *VT = cast<FixedVectorType>(DataTy);
217 // Assume the target does not have support for gather/scatter operations
218 // and provide a rough estimate.
219 //
220 // First, compute the cost of the individual memory operations.
221 InstructionCost AddrExtractCost =
222 IsGatherScatter
223 ? getVectorInstrCost(Instruction::ExtractElement,
225 PointerType::get(VT->getElementType(), 0),
226 VT->getNumElements()),
227 CostKind, -1, nullptr, nullptr)
228 : 0;
229 InstructionCost LoadCost =
230 VT->getNumElements() *
231 (AddrExtractCost +
232 getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind));
233
234 // Next, compute the cost of packing the result in a vector.
235 InstructionCost PackingCost =
236 getScalarizationOverhead(VT, Opcode != Instruction::Store,
237 Opcode == Instruction::Store, CostKind);
238
239 InstructionCost ConditionalCost = 0;
240 if (VariableMask) {
241 // Compute the cost of conditionally executing the memory operations with
242 // variable masks. This includes extracting the individual conditions, a
243 // branches and PHIs to combine the results.
244 // NOTE: Estimating the cost of conditionally executing the memory
245 // operations accurately is quite difficult and the current solution
246 // provides a very rough estimate only.
247 ConditionalCost =
248 VT->getNumElements() *
250 Instruction::ExtractElement,
252 VT->getNumElements()),
253 CostKind, -1, nullptr, nullptr) +
254 getCFInstrCost(Instruction::Br, CostKind) +
255 getCFInstrCost(Instruction::PHI, CostKind));
256 }
257
258 return LoadCost + PackingCost + ConditionalCost;
259 }
260
261protected:
263 : BaseT(DL) {}
264 virtual ~BasicTTIImplBase() = default;
265
267
268public:
269 /// \name Scalar TTI Implementations
270 /// @{
272 unsigned AddressSpace, Align Alignment,
273 unsigned *Fast) const {
275 return getTLI()->allowsMisalignedMemoryAccesses(
277 }
278
279 bool hasBranchDivergence(const Function *F = nullptr) { return false; }
280
281 bool isSourceOfDivergence(const Value *V) { return false; }
282
283 bool isAlwaysUniform(const Value *V) { return false; }
284
285 bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
286 return false;
287 }
288
289 bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const {
290 return true;
291 }
292
294 // Return an invalid address space.
295 return -1;
296 }
297
299 Intrinsic::ID IID) const {
300 return false;
301 }
302
303 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
304 return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
305 }
306
307 unsigned getAssumedAddrSpace(const Value *V) const {
308 return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
309 }
310
311 bool isSingleThreaded() const {
312 return getTLI()->getTargetMachine().Options.ThreadModel ==
314 }
315
316 std::pair<const Value *, unsigned>
318 return getTLI()->getTargetMachine().getPredicatedAddrSpace(V);
319 }
320
322 Value *NewV) const {
323 return nullptr;
324 }
325
326 bool isLegalAddImmediate(int64_t imm) {
327 return getTLI()->isLegalAddImmediate(imm);
328 }
329
330 bool isLegalICmpImmediate(int64_t imm) {
331 return getTLI()->isLegalICmpImmediate(imm);
332 }
333
334 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
335 bool HasBaseReg, int64_t Scale,
336 unsigned AddrSpace, Instruction *I = nullptr) {
338 AM.BaseGV = BaseGV;
339 AM.BaseOffs = BaseOffset;
340 AM.HasBaseReg = HasBaseReg;
341 AM.Scale = Scale;
342 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
343 }
344
345 int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) {
346 return getTLI()->getPreferredLargeGEPBaseOffset(MinOffset, MaxOffset);
347 }
348
349 unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
350 Type *ScalarValTy) const {
351 auto &&IsSupportedByTarget = [this, ScalarMemTy, ScalarValTy](unsigned VF) {
352 auto *SrcTy = FixedVectorType::get(ScalarMemTy, VF / 2);
353 EVT VT = getTLI()->getValueType(DL, SrcTy);
354 if (getTLI()->isOperationLegal(ISD::STORE, VT) ||
355 getTLI()->isOperationCustom(ISD::STORE, VT))
356 return true;
357
358 EVT ValVT =
359 getTLI()->getValueType(DL, FixedVectorType::get(ScalarValTy, VF / 2));
360 EVT LegalizedVT =
361 getTLI()->getTypeToTransformTo(ScalarMemTy->getContext(), VT);
362 return getTLI()->isTruncStoreLegal(LegalizedVT, ValVT);
363 };
364 while (VF > 2 && IsSupportedByTarget(VF))
365 VF /= 2;
366 return VF;
367 }
368
370 const DataLayout &DL) const {
371 EVT VT = getTLI()->getValueType(DL, Ty);
372 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
373 }
374
376 const DataLayout &DL) const {
377 EVT VT = getTLI()->getValueType(DL, Ty);
378 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
379 }
380
383 }
384
387 }
388
392 }
393
396 }
397
399 int64_t BaseOffset, bool HasBaseReg,
400 int64_t Scale, unsigned AddrSpace) {
402 AM.BaseGV = BaseGV;
403 AM.BaseOffs = BaseOffset;
404 AM.HasBaseReg = HasBaseReg;
405 AM.Scale = Scale;
406 if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace))
407 return 0;
408 return -1;
409 }
410
411 bool isTruncateFree(Type *Ty1, Type *Ty2) {
412 return getTLI()->isTruncateFree(Ty1, Ty2);
413 }
414
416 return getTLI()->isProfitableToHoist(I);
417 }
418
419 bool useAA() const { return getST()->useAA(); }
420
421 bool isTypeLegal(Type *Ty) {
422 EVT VT = getTLI()->getValueType(DL, Ty);
423 return getTLI()->isTypeLegal(VT);
424 }
425
426 unsigned getRegUsageForType(Type *Ty) {
427 EVT ETy = getTLI()->getValueType(DL, Ty);
428 return getTLI()->getNumRegisters(Ty->getContext(), ETy);
429 }
430
434 return BaseT::getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind);
435 }
436
438 unsigned &JumpTableSize,
440 BlockFrequencyInfo *BFI) {
441 /// Try to find the estimated number of clusters. Note that the number of
442 /// clusters identified in this function could be different from the actual
443 /// numbers found in lowering. This function ignore switches that are
444 /// lowered with a mix of jump table / bit test / BTree. This function was
445 /// initially intended to be used when estimating the cost of switch in
446 /// inline cost heuristic, but it's a generic cost model to be used in other
447 /// places (e.g., in loop unrolling).
448 unsigned N = SI.getNumCases();
449 const TargetLoweringBase *TLI = getTLI();
450 const DataLayout &DL = this->getDataLayout();
451
452 JumpTableSize = 0;
453 bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
454
455 // Early exit if both a jump table and bit test are not allowed.
456 if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
457 return N;
458
459 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
460 APInt MinCaseVal = MaxCaseVal;
461 for (auto CI : SI.cases()) {
462 const APInt &CaseVal = CI.getCaseValue()->getValue();
463 if (CaseVal.sgt(MaxCaseVal))
464 MaxCaseVal = CaseVal;
465 if (CaseVal.slt(MinCaseVal))
466 MinCaseVal = CaseVal;
467 }
468
469 // Check if suitable for a bit test
470 if (N <= DL.getIndexSizeInBits(0u)) {
472 for (auto I : SI.cases())
473 Dests.insert(I.getCaseSuccessor());
474
475 if (TLI->isSuitableForBitTests(Dests.size(), N, MinCaseVal, MaxCaseVal,
476 DL))
477 return 1;
478 }
479
480 // Check if suitable for a jump table.
481 if (IsJTAllowed) {
482 if (N < 2 || N < TLI->getMinimumJumpTableEntries())
483 return N;
484 uint64_t Range =
485 (MaxCaseVal - MinCaseVal)
486 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
487 // Check whether a range of clusters is dense enough for a jump table
488 if (TLI->isSuitableForJumpTable(&SI, N, Range, PSI, BFI)) {
489 JumpTableSize = Range;
490 return 1;
491 }
492 }
493 return N;
494 }
495
497 const TargetLoweringBase *TLI = getTLI();
498 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
499 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
500 }
501
503 const TargetMachine &TM = getTLI()->getTargetMachine();
504 // If non-PIC mode, do not generate a relative lookup table.
505 if (!TM.isPositionIndependent())
506 return false;
507
508 /// Relative lookup table entries consist of 32-bit offsets.
509 /// Do not generate relative lookup tables for large code models
510 /// in 64-bit achitectures where 32-bit offsets might not be enough.
511 if (TM.getCodeModel() == CodeModel::Medium ||
512 TM.getCodeModel() == CodeModel::Large)
513 return false;
514
515 Triple TargetTriple = TM.getTargetTriple();
516 if (!TargetTriple.isArch64Bit())
517 return false;
518
519 // TODO: Triggers issues on aarch64 on darwin, so temporarily disable it
520 // there.
521 if (TargetTriple.getArch() == Triple::aarch64 && TargetTriple.isOSDarwin())
522 return false;
523
524 return true;
525 }
526
527 bool haveFastSqrt(Type *Ty) {
528 const TargetLoweringBase *TLI = getTLI();
529 EVT VT = TLI->getValueType(DL, Ty);
530 return TLI->isTypeLegal(VT) &&
532 }
533
535 return true;
536 }
537
539 // Check whether FADD is available, as a proxy for floating-point in
540 // general.
541 const TargetLoweringBase *TLI = getTLI();
542 EVT VT = TLI->getValueType(DL, Ty);
546 }
547
548 unsigned getInliningThresholdMultiplier() const { return 1; }
549 unsigned adjustInliningThreshold(const CallBase *CB) { return 0; }
550 unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const {
551 return 0;
552 }
553
554 int getInlinerVectorBonusPercent() const { return 150; }
555
559 // This unrolling functionality is target independent, but to provide some
560 // motivation for its intended use, for x86:
561
562 // According to the Intel 64 and IA-32 Architectures Optimization Reference
563 // Manual, Intel Core models and later have a loop stream detector (and
564 // associated uop queue) that can benefit from partial unrolling.
565 // The relevant requirements are:
566 // - The loop must have no more than 4 (8 for Nehalem and later) branches
567 // taken, and none of them may be calls.
568 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
569
570 // According to the Software Optimization Guide for AMD Family 15h
571 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
572 // and loop buffer which can benefit from partial unrolling.
573 // The relevant requirements are:
574 // - The loop must have fewer than 16 branches
575 // - The loop must have less than 40 uops in all executed loop branches
576
577 // The number of taken branches in a loop is hard to estimate here, and
578 // benchmarking has revealed that it is better not to be conservative when
579 // estimating the branch count. As a result, we'll ignore the branch limits
580 // until someone finds a case where it matters in practice.
581
582 unsigned MaxOps;
583 const TargetSubtargetInfo *ST = getST();
584 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
586 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
587 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
588 else
589 return;
590
591 // Scan the loop: don't unroll loops with calls.
592 for (BasicBlock *BB : L->blocks()) {
593 for (Instruction &I : *BB) {
594 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
595 if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
596 if (!thisT()->isLoweredToCall(F))
597 continue;
598 }
599
600 if (ORE) {
601 ORE->emit([&]() {
602 return OptimizationRemark("TTI", "DontUnroll", L->getStartLoc(),
603 L->getHeader())
604 << "advising against unrolling the loop because it "
605 "contains a "
606 << ore::NV("Call", &I);
607 });
608 }
609 return;
610 }
611 }
612 }
613
614 // Enable runtime and partial unrolling up to the specified size.
615 // Enable using trip count upper bound to unroll loops.
616 UP.Partial = UP.Runtime = UP.UpperBound = true;
617 UP.PartialThreshold = MaxOps;
618
619 // Avoid unrolling when optimizing for size.
620 UP.OptSizeThreshold = 0;
622
623 // Set number of instructions optimized when "back edge"
624 // becomes "fall through" to default value of 2.
625 UP.BEInsns = 2;
626 }
627
630 PP.PeelCount = 0;
631 PP.AllowPeeling = true;
632 PP.AllowLoopNestsPeeling = false;
633 PP.PeelProfiledIterations = true;
634 }
635
637 AssumptionCache &AC,
638 TargetLibraryInfo *LibInfo,
639 HardwareLoopInfo &HWLoopInfo) {
640 return BaseT::isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
641 }
642
645 }
646
648 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) {
649 return BaseT::getPreferredTailFoldingStyle(IVUpdateMayOverflow);
650 }
651
652 std::optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
653 IntrinsicInst &II) {
654 return BaseT::instCombineIntrinsic(IC, II);
655 }
656
657 std::optional<Value *>
659 APInt DemandedMask, KnownBits &Known,
660 bool &KnownBitsComputed) {
661 return BaseT::simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
662 KnownBitsComputed);
663 }
664
666 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
667 APInt &UndefElts2, APInt &UndefElts3,
668 std::function<void(Instruction *, unsigned, APInt, APInt &)>
669 SimplifyAndSetOp) {
671 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
672 SimplifyAndSetOp);
673 }
674
675 virtual std::optional<unsigned>
677 return std::optional<unsigned>(
678 getST()->getCacheSize(static_cast<unsigned>(Level)));
679 }
680
681 virtual std::optional<unsigned>
683 std::optional<unsigned> TargetResult =
684 getST()->getCacheAssociativity(static_cast<unsigned>(Level));
685
686 if (TargetResult)
687 return TargetResult;
688
689 return BaseT::getCacheAssociativity(Level);
690 }
691
692 virtual unsigned getCacheLineSize() const {
693 return getST()->getCacheLineSize();
694 }
695
696 virtual unsigned getPrefetchDistance() const {
697 return getST()->getPrefetchDistance();
698 }
699
700 virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses,
701 unsigned NumStridedMemAccesses,
702 unsigned NumPrefetches,
703 bool HasCall) const {
704 return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
705 NumPrefetches, HasCall);
706 }
707
708 virtual unsigned getMaxPrefetchIterationsAhead() const {
709 return getST()->getMaxPrefetchIterationsAhead();
710 }
711
712 virtual bool enableWritePrefetching() const {
713 return getST()->enableWritePrefetching();
714 }
715
716 virtual bool shouldPrefetchAddressSpace(unsigned AS) const {
717 return getST()->shouldPrefetchAddressSpace(AS);
718 }
719
720 /// @}
721
722 /// \name Vector TTI Implementations
723 /// @{
724
726 return TypeSize::getFixed(32);
727 }
728
729 std::optional<unsigned> getMaxVScale() const { return std::nullopt; }
730 std::optional<unsigned> getVScaleForTuning() const { return std::nullopt; }
731 bool isVScaleKnownToBeAPowerOfTwo() const { return false; }
732
733 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
734 /// are set if the demanded result elements need to be inserted and/or
735 /// extracted from vectors.
737 const APInt &DemandedElts,
738 bool Insert, bool Extract,
740 /// FIXME: a bitfield is not a reasonable abstraction for talking about
741 /// which elements are needed from a scalable vector
742 if (isa<ScalableVectorType>(InTy))
744 auto *Ty = cast<FixedVectorType>(InTy);
745
746 assert(DemandedElts.getBitWidth() == Ty->getNumElements() &&
747 "Vector size mismatch");
748
750
751 for (int i = 0, e = Ty->getNumElements(); i < e; ++i) {
752 if (!DemandedElts[i])
753 continue;
754 if (Insert)
755 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
756 CostKind, i, nullptr, nullptr);
757 if (Extract)
758 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
759 CostKind, i, nullptr, nullptr);
760 }
761
762 return Cost;
763 }
764
765 /// Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
767 bool Extract,
769 if (isa<ScalableVectorType>(InTy))
771 auto *Ty = cast<FixedVectorType>(InTy);
772
773 APInt DemandedElts = APInt::getAllOnes(Ty->getNumElements());
774 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
775 CostKind);
776 }
777
778 /// Estimate the overhead of scalarizing an instructions unique
779 /// non-constant operands. The (potentially vector) types to use for each of
780 /// argument are passes via Tys.
785 assert(Args.size() == Tys.size() && "Expected matching Args and Tys");
786
788 SmallPtrSet<const Value*, 4> UniqueOperands;
789 for (int I = 0, E = Args.size(); I != E; I++) {
790 // Disregard things like metadata arguments.
791 const Value *A = Args[I];
792 Type *Ty = Tys[I];
793 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
794 !Ty->isPtrOrPtrVectorTy())
795 continue;
796
797 if (!isa<Constant>(A) && UniqueOperands.insert(A).second) {
798 if (auto *VecTy = dyn_cast<VectorType>(Ty))
799 Cost += getScalarizationOverhead(VecTy, /*Insert*/ false,
800 /*Extract*/ true, CostKind);
801 }
802 }
803
804 return Cost;
805 }
806
807 /// Estimate the overhead of scalarizing the inputs and outputs of an
808 /// instruction, with return type RetTy and arguments Args of type Tys. If
809 /// Args are unknown (empty), then the cost associated with one argument is
810 /// added as a heuristic.
816 RetTy, /*Insert*/ true, /*Extract*/ false, CostKind);
817 if (!Args.empty())
819 else
820 // When no information on arguments is provided, we add the cost
821 // associated with one argument as a heuristic.
822 Cost += getScalarizationOverhead(RetTy, /*Insert*/ false,
823 /*Extract*/ true, CostKind);
824
825 return Cost;
826 }
827
828 /// Estimate the cost of type-legalization and the legalized type.
829 std::pair<InstructionCost, MVT> getTypeLegalizationCost(Type *Ty) const {
830 LLVMContext &C = Ty->getContext();
831 EVT MTy = getTLI()->getValueType(DL, Ty);
832
834 // We keep legalizing the type until we find a legal kind. We assume that
835 // the only operation that costs anything is the split. After splitting
836 // we need to handle two types.
837 while (true) {
839
841 // Ensure we return a sensible simple VT here, since many callers of
842 // this function require it.
843 MVT VT = MTy.isSimple() ? MTy.getSimpleVT() : MVT::i64;
844 return std::make_pair(InstructionCost::getInvalid(), VT);
845 }
846
847 if (LK.first == TargetLoweringBase::TypeLegal)
848 return std::make_pair(Cost, MTy.getSimpleVT());
849
850 if (LK.first == TargetLoweringBase::TypeSplitVector ||
852 Cost *= 2;
853
854 // Do not loop with f128 type.
855 if (MTy == LK.second)
856 return std::make_pair(Cost, MTy.getSimpleVT());
857
858 // Keep legalizing the type.
859 MTy = LK.second;
860 }
861 }
862
863 unsigned getMaxInterleaveFactor(ElementCount VF) { return 1; }
864
869 ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
870 const Instruction *CxtI = nullptr) {
871 // Check if any of the operands are vector operands.
872 const TargetLoweringBase *TLI = getTLI();
873 int ISD = TLI->InstructionOpcodeToISD(Opcode);
874 assert(ISD && "Invalid opcode");
875
876 // TODO: Handle more cost kinds.
879 Opd1Info, Opd2Info,
880 Args, CxtI);
881
882 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
883
884 bool IsFloat = Ty->isFPOrFPVectorTy();
885 // Assume that floating point arithmetic operations cost twice as much as
886 // integer operations.
887 InstructionCost OpCost = (IsFloat ? 2 : 1);
888
889 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
890 // The operation is legal. Assume it costs 1.
891 // TODO: Once we have extract/insert subvector cost we need to use them.
892 return LT.first * OpCost;
893 }
894
895 if (!TLI->isOperationExpand(ISD, LT.second)) {
896 // If the operation is custom lowered, then assume that the code is twice
897 // as expensive.
898 return LT.first * 2 * OpCost;
899 }
900
901 // An 'Expand' of URem and SRem is special because it may default
902 // to expanding the operation into a sequence of sub-operations
903 // i.e. X % Y -> X-(X/Y)*Y.
904 if (ISD == ISD::UREM || ISD == ISD::SREM) {
905 bool IsSigned = ISD == ISD::SREM;
906 if (TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIVREM : ISD::UDIVREM,
907 LT.second) ||
908 TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIV : ISD::UDIV,
909 LT.second)) {
910 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
911 InstructionCost DivCost = thisT()->getArithmeticInstrCost(
912 DivOpc, Ty, CostKind, Opd1Info, Opd2Info);
913 InstructionCost MulCost =
914 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty, CostKind);
915 InstructionCost SubCost =
916 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty, CostKind);
917 return DivCost + MulCost + SubCost;
918 }
919 }
920
921 // We cannot scalarize scalable vectors, so return Invalid.
922 if (isa<ScalableVectorType>(Ty))
924
925 // Else, assume that we need to scalarize this op.
926 // TODO: If one of the types get legalized by splitting, handle this
927 // similarly to what getCastInstrCost() does.
928 if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
929 InstructionCost Cost = thisT()->getArithmeticInstrCost(
930 Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info,
931 Args, CxtI);
932 // Return the cost of multiple scalar invocation plus the cost of
933 // inserting and extracting the values.
934 SmallVector<Type *> Tys(Args.size(), Ty);
935 return getScalarizationOverhead(VTy, Args, Tys, CostKind) +
936 VTy->getNumElements() * Cost;
937 }
938
939 // We don't know anything about this scalar instruction.
940 return OpCost;
941 }
942
944 ArrayRef<int> Mask,
945 VectorType *Ty, int &Index,
946 VectorType *&SubTy) const {
947 if (Mask.empty())
948 return Kind;
949 int NumSrcElts = Ty->getElementCount().getKnownMinValue();
950 switch (Kind) {
952 if (ShuffleVectorInst::isReverseMask(Mask, NumSrcElts))
953 return TTI::SK_Reverse;
954 if (ShuffleVectorInst::isZeroEltSplatMask(Mask, NumSrcElts))
955 return TTI::SK_Broadcast;
956 if (ShuffleVectorInst::isExtractSubvectorMask(Mask, NumSrcElts, Index) &&
957 (Index + Mask.size()) <= (size_t)NumSrcElts) {
958 SubTy = FixedVectorType::get(Ty->getElementType(), Mask.size());
960 }
961 break;
963 int NumSubElts;
964 if (Mask.size() > 2 && ShuffleVectorInst::isInsertSubvectorMask(
965 Mask, NumSrcElts, NumSubElts, Index)) {
966 if (Index + NumSubElts > NumSrcElts)
967 return Kind;
968 SubTy = FixedVectorType::get(Ty->getElementType(), NumSubElts);
970 }
971 if (ShuffleVectorInst::isSelectMask(Mask, NumSrcElts))
972 return TTI::SK_Select;
973 if (ShuffleVectorInst::isTransposeMask(Mask, NumSrcElts))
974 return TTI::SK_Transpose;
975 if (ShuffleVectorInst::isSpliceMask(Mask, NumSrcElts, Index))
976 return TTI::SK_Splice;
977 break;
978 }
979 case TTI::SK_Select:
980 case TTI::SK_Reverse:
985 case TTI::SK_Splice:
986 break;
987 }
988 return Kind;
989 }
990
992 ArrayRef<int> Mask,
994 VectorType *SubTp,
995 ArrayRef<const Value *> Args = std::nullopt) {
996 switch (improveShuffleKindFromMask(Kind, Mask, Tp, Index, SubTp)) {
998 if (auto *FVT = dyn_cast<FixedVectorType>(Tp))
999 return getBroadcastShuffleOverhead(FVT, CostKind);
1001 case TTI::SK_Select:
1002 case TTI::SK_Splice:
1003 case TTI::SK_Reverse:
1004 case TTI::SK_Transpose:
1007 if (auto *FVT = dyn_cast<FixedVectorType>(Tp))
1008 return getPermuteShuffleOverhead(FVT, CostKind);
1011 return getExtractSubvectorOverhead(Tp, CostKind, Index,
1012 cast<FixedVectorType>(SubTp));
1014 return getInsertSubvectorOverhead(Tp, CostKind, Index,
1015 cast<FixedVectorType>(SubTp));
1016 }
1017 llvm_unreachable("Unknown TTI::ShuffleKind");
1018 }
1019
1023 const Instruction *I = nullptr) {
1024 if (BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I) == 0)
1025 return 0;
1026
1027 const TargetLoweringBase *TLI = getTLI();
1028 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1029 assert(ISD && "Invalid opcode");
1030 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Src);
1031 std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(Dst);
1032
1033 TypeSize SrcSize = SrcLT.second.getSizeInBits();
1034 TypeSize DstSize = DstLT.second.getSizeInBits();
1035 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
1036 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
1037
1038 switch (Opcode) {
1039 default:
1040 break;
1041 case Instruction::Trunc:
1042 // Check for NOOP conversions.
1043 if (TLI->isTruncateFree(SrcLT.second, DstLT.second))
1044 return 0;
1045 [[fallthrough]];
1046 case Instruction::BitCast:
1047 // Bitcast between types that are legalized to the same type are free and
1048 // assume int to/from ptr of the same size is also free.
1049 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
1050 SrcSize == DstSize)
1051 return 0;
1052 break;
1053 case Instruction::FPExt:
1054 if (I && getTLI()->isExtFree(I))
1055 return 0;
1056 break;
1057 case Instruction::ZExt:
1058 if (TLI->isZExtFree(SrcLT.second, DstLT.second))
1059 return 0;
1060 [[fallthrough]];
1061 case Instruction::SExt:
1062 if (I && getTLI()->isExtFree(I))
1063 return 0;
1064
1065 // If this is a zext/sext of a load, return 0 if the corresponding
1066 // extending load exists on target and the result type is legal.
1067 if (CCH == TTI::CastContextHint::Normal) {
1068 EVT ExtVT = EVT::getEVT(Dst);
1069 EVT LoadVT = EVT::getEVT(Src);
1070 unsigned LType =
1071 ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD);
1072 if (DstLT.first == SrcLT.first &&
1073 TLI->isLoadExtLegal(LType, ExtVT, LoadVT))
1074 return 0;
1075 }
1076 break;
1077 case Instruction::AddrSpaceCast:
1078 if (TLI->isFreeAddrSpaceCast(Src->getPointerAddressSpace(),
1079 Dst->getPointerAddressSpace()))
1080 return 0;
1081 break;
1082 }
1083
1084 auto *SrcVTy = dyn_cast<VectorType>(Src);
1085 auto *DstVTy = dyn_cast<VectorType>(Dst);
1086
1087 // If the cast is marked as legal (or promote) then assume low cost.
1088 if (SrcLT.first == DstLT.first &&
1089 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
1090 return SrcLT.first;
1091
1092 // Handle scalar conversions.
1093 if (!SrcVTy && !DstVTy) {
1094 // Just check the op cost. If the operation is legal then assume it costs
1095 // 1.
1096 if (!TLI->isOperationExpand(ISD, DstLT.second))
1097 return 1;
1098
1099 // Assume that illegal scalar instruction are expensive.
1100 return 4;
1101 }
1102
1103 // Check vector-to-vector casts.
1104 if (DstVTy && SrcVTy) {
1105 // If the cast is between same-sized registers, then the check is simple.
1106 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
1107
1108 // Assume that Zext is done using AND.
1109 if (Opcode == Instruction::ZExt)
1110 return SrcLT.first;
1111
1112 // Assume that sext is done using SHL and SRA.
1113 if (Opcode == Instruction::SExt)
1114 return SrcLT.first * 2;
1115
1116 // Just check the op cost. If the operation is legal then assume it
1117 // costs
1118 // 1 and multiply by the type-legalization overhead.
1119 if (!TLI->isOperationExpand(ISD, DstLT.second))
1120 return SrcLT.first * 1;
1121 }
1122
1123 // If we are legalizing by splitting, query the concrete TTI for the cost
1124 // of casting the original vector twice. We also need to factor in the
1125 // cost of the split itself. Count that as 1, to be consistent with
1126 // getTypeLegalizationCost().
1127 bool SplitSrc =
1128 TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
1130 bool SplitDst =
1131 TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
1133 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isVector() &&
1134 DstVTy->getElementCount().isVector()) {
1135 Type *SplitDstTy = VectorType::getHalfElementsVectorType(DstVTy);
1136 Type *SplitSrcTy = VectorType::getHalfElementsVectorType(SrcVTy);
1137 T *TTI = static_cast<T *>(this);
1138 // If both types need to be split then the split is free.
1139 InstructionCost SplitCost =
1140 (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
1141 return SplitCost +
1142 (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
1143 CostKind, I));
1144 }
1145
1146 // Scalarization cost is Invalid, can't assume any num elements.
1147 if (isa<ScalableVectorType>(DstVTy))
1149
1150 // In other cases where the source or destination are illegal, assume
1151 // the operation will get scalarized.
1152 unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
1153 InstructionCost Cost = thisT()->getCastInstrCost(
1154 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind, I);
1155
1156 // Return the cost of multiple scalar invocation plus the cost of
1157 // inserting and extracting the values.
1158 return getScalarizationOverhead(DstVTy, /*Insert*/ true, /*Extract*/ true,
1159 CostKind) +
1160 Num * Cost;
1161 }
1162
1163 // We already handled vector-to-vector and scalar-to-scalar conversions.
1164 // This
1165 // is where we handle bitcast between vectors and scalars. We need to assume
1166 // that the conversion is scalarized in one way or another.
1167 if (Opcode == Instruction::BitCast) {
1168 // Illegal bitcasts are done by storing and loading from a stack slot.
1169 return (SrcVTy ? getScalarizationOverhead(SrcVTy, /*Insert*/ false,
1170 /*Extract*/ true, CostKind)
1171 : 0) +
1172 (DstVTy ? getScalarizationOverhead(DstVTy, /*Insert*/ true,
1173 /*Extract*/ false, CostKind)
1174 : 0);
1175 }
1176
1177 llvm_unreachable("Unhandled cast");
1178 }
1179
1181 VectorType *VecTy, unsigned Index) {
1183 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1184 CostKind, Index, nullptr, nullptr) +
1185 thisT()->getCastInstrCost(Opcode, Dst, VecTy->getElementType(),
1187 }
1188
1190 const Instruction *I = nullptr) {
1192 }
1193
1195 CmpInst::Predicate VecPred,
1197 const Instruction *I = nullptr) {
1198 const TargetLoweringBase *TLI = getTLI();
1199 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1200 assert(ISD && "Invalid opcode");
1201
1202 // TODO: Handle other cost kinds.
1204 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1205 I);
1206
1207 // Selects on vectors are actually vector selects.
1208 if (ISD == ISD::SELECT) {
1209 assert(CondTy && "CondTy must exist");
1210 if (CondTy->isVectorTy())
1211 ISD = ISD::VSELECT;
1212 }
1213 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1214
1215 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
1216 !TLI->isOperationExpand(ISD, LT.second)) {
1217 // The operation is legal. Assume it costs 1. Multiply
1218 // by the type-legalization overhead.
1219 return LT.first * 1;
1220 }
1221
1222 // Otherwise, assume that the cast is scalarized.
1223 // TODO: If one of the types get legalized by splitting, handle this
1224 // similarly to what getCastInstrCost() does.
1225 if (auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
1226 if (isa<ScalableVectorType>(ValTy))
1228
1229 unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
1230 if (CondTy)
1231 CondTy = CondTy->getScalarType();
1232 InstructionCost Cost = thisT()->getCmpSelInstrCost(
1233 Opcode, ValVTy->getScalarType(), CondTy, VecPred, CostKind, I);
1234
1235 // Return the cost of multiple scalar invocation plus the cost of
1236 // inserting and extracting the values.
1237 return getScalarizationOverhead(ValVTy, /*Insert*/ true,
1238 /*Extract*/ false, CostKind) +
1239 Num * Cost;
1240 }
1241
1242 // Unknown scalar opcode.
1243 return 1;
1244 }
1245
1248 unsigned Index, Value *Op0, Value *Op1) {
1249 return getRegUsageForType(Val->getScalarType());
1250 }
1251
1254 unsigned Index) {
1255 Value *Op0 = nullptr;
1256 Value *Op1 = nullptr;
1257 if (auto *IE = dyn_cast<InsertElementInst>(&I)) {
1258 Op0 = IE->getOperand(0);
1259 Op1 = IE->getOperand(1);
1260 }
1261 return thisT()->getVectorInstrCost(I.getOpcode(), Val, CostKind, Index, Op0,
1262 Op1);
1263 }
1264
1265 InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
1266 int VF,
1267 const APInt &DemandedDstElts,
1269 assert(DemandedDstElts.getBitWidth() == (unsigned)VF * ReplicationFactor &&
1270 "Unexpected size of DemandedDstElts.");
1271
1273
1274 auto *SrcVT = FixedVectorType::get(EltTy, VF);
1275 auto *ReplicatedVT = FixedVectorType::get(EltTy, VF * ReplicationFactor);
1276
1277 // The Mask shuffling cost is extract all the elements of the Mask
1278 // and insert each of them Factor times into the wide vector:
1279 //
1280 // E.g. an interleaved group with factor 3:
1281 // %mask = icmp ult <8 x i32> %vec1, %vec2
1282 // %interleaved.mask = shufflevector <8 x i1> %mask, <8 x i1> undef,
1283 // <24 x i32> <0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7>
1284 // The cost is estimated as extract all mask elements from the <8xi1> mask
1285 // vector and insert them factor times into the <24xi1> shuffled mask
1286 // vector.
1287 APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedDstElts, VF);
1288 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
1289 /*Insert*/ false,
1290 /*Extract*/ true, CostKind);
1291 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
1292 /*Insert*/ true,
1293 /*Extract*/ false, CostKind);
1294
1295 return Cost;
1296 }
1297
1299 getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
1302 const Instruction *I = nullptr) {
1303 assert(!Src->isVoidTy() && "Invalid type");
1304 // Assume types, such as structs, are expensive.
1305 if (getTLI()->getValueType(DL, Src, true) == MVT::Other)
1306 return 4;
1307 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
1308
1309 // Assuming that all loads of legal types cost 1.
1310 InstructionCost Cost = LT.first;
1312 return Cost;
1313
1314 const DataLayout &DL = this->getDataLayout();
1315 if (Src->isVectorTy() &&
1316 // In practice it's not currently possible to have a change in lane
1317 // length for extending loads or truncating stores so both types should
1318 // have the same scalable property.
1320 LT.second.getSizeInBits())) {
1321 // This is a vector load that legalizes to a larger type than the vector
1322 // itself. Unless the corresponding extending load or truncating store is
1323 // legal, then this will scalarize.
1325 EVT MemVT = getTLI()->getValueType(DL, Src);
1326 if (Opcode == Instruction::Store)
1327 LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
1328 else
1329 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
1330
1331 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
1332 // This is a vector load/store for some illegal type that is scalarized.
1333 // We must account for the cost of building or decomposing the vector.
1335 cast<VectorType>(Src), Opcode != Instruction::Store,
1336 Opcode == Instruction::Store, CostKind);
1337 }
1338 }
1339
1340 return Cost;
1341 }
1342
1344 Align Alignment, unsigned AddressSpace,
1346 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, true, false,
1347 CostKind);
1348 }
1349
1351 const Value *Ptr, bool VariableMask,
1352 Align Alignment,
1354 const Instruction *I = nullptr) {
1355 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask,
1356 true, CostKind);
1357 }
1358
1360 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1361 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1362 bool UseMaskForCond = false, bool UseMaskForGaps = false) {
1363
1364 // We cannot scalarize scalable vectors, so return Invalid.
1365 if (isa<ScalableVectorType>(VecTy))
1367
1368 auto *VT = cast<FixedVectorType>(VecTy);
1369
1370 unsigned NumElts = VT->getNumElements();
1371 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
1372
1373 unsigned NumSubElts = NumElts / Factor;
1374 auto *SubVT = FixedVectorType::get(VT->getElementType(), NumSubElts);
1375
1376 // Firstly, the cost of load/store operation.
1378 if (UseMaskForCond || UseMaskForGaps)
1379 Cost = thisT()->getMaskedMemoryOpCost(Opcode, VecTy, Alignment,
1381 else
1382 Cost = thisT()->getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace,
1383 CostKind);
1384
1385 // Legalize the vector type, and get the legalized and unlegalized type
1386 // sizes.
1387 MVT VecTyLT = getTypeLegalizationCost(VecTy).second;
1388 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
1389 unsigned VecTyLTSize = VecTyLT.getStoreSize();
1390
1391 // Scale the cost of the memory operation by the fraction of legalized
1392 // instructions that will actually be used. We shouldn't account for the
1393 // cost of dead instructions since they will be removed.
1394 //
1395 // E.g., An interleaved load of factor 8:
1396 // %vec = load <16 x i64>, <16 x i64>* %ptr
1397 // %v0 = shufflevector %vec, undef, <0, 8>
1398 //
1399 // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
1400 // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
1401 // type). The other loads are unused.
1402 //
1403 // TODO: Note that legalization can turn masked loads/stores into unmasked
1404 // (legalized) loads/stores. This can be reflected in the cost.
1405 if (Cost.isValid() && VecTySize > VecTyLTSize) {
1406 // The number of loads of a legal type it will take to represent a load
1407 // of the unlegalized vector type.
1408 unsigned NumLegalInsts = divideCeil(VecTySize, VecTyLTSize);
1409
1410 // The number of elements of the unlegalized type that correspond to a
1411 // single legal instruction.
1412 unsigned NumEltsPerLegalInst = divideCeil(NumElts, NumLegalInsts);
1413
1414 // Determine which legal instructions will be used.
1415 BitVector UsedInsts(NumLegalInsts, false);
1416 for (unsigned Index : Indices)
1417 for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1418 UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);
1419
1420 // Scale the cost of the load by the fraction of legal instructions that
1421 // will be used.
1422 Cost = divideCeil(UsedInsts.count() * *Cost.getValue(), NumLegalInsts);
1423 }
1424
1425 // Then plus the cost of interleave operation.
1426 assert(Indices.size() <= Factor &&
1427 "Interleaved memory op has too many members");
1428
1429 const APInt DemandedAllSubElts = APInt::getAllOnes(NumSubElts);
1430 const APInt DemandedAllResultElts = APInt::getAllOnes(NumElts);
1431
1432 APInt DemandedLoadStoreElts = APInt::getZero(NumElts);
1433 for (unsigned Index : Indices) {
1434 assert(Index < Factor && "Invalid index for interleaved memory op");
1435 for (unsigned Elm = 0; Elm < NumSubElts; Elm++)
1436 DemandedLoadStoreElts.setBit(Index + Elm * Factor);
1437 }
1438
1439 if (Opcode == Instruction::Load) {
1440 // The interleave cost is similar to extract sub vectors' elements
1441 // from the wide vector, and insert them into sub vectors.
1442 //
1443 // E.g. An interleaved load of factor 2 (with one member of index 0):
1444 // %vec = load <8 x i32>, <8 x i32>* %ptr
1445 // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
1446 // The cost is estimated as extract elements at 0, 2, 4, 6 from the
1447 // <8 x i32> vector and insert them into a <4 x i32> vector.
1448 InstructionCost InsSubCost = thisT()->getScalarizationOverhead(
1449 SubVT, DemandedAllSubElts,
1450 /*Insert*/ true, /*Extract*/ false, CostKind);
1451 Cost += Indices.size() * InsSubCost;
1452 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1453 /*Insert*/ false,
1454 /*Extract*/ true, CostKind);
1455 } else {
1456 // The interleave cost is extract elements from sub vectors, and
1457 // insert them into the wide vector.
1458 //
1459 // E.g. An interleaved store of factor 3 with 2 members at indices 0,1:
1460 // (using VF=4):
1461 // %v0_v1 = shuffle %v0, %v1, <0,4,undef,1,5,undef,2,6,undef,3,7,undef>
1462 // %gaps.mask = <true, true, false, true, true, false,
1463 // true, true, false, true, true, false>
1464 // call llvm.masked.store <12 x i32> %v0_v1, <12 x i32>* %ptr,
1465 // i32 Align, <12 x i1> %gaps.mask
1466 // The cost is estimated as extract all elements (of actual members,
1467 // excluding gaps) from both <4 x i32> vectors and insert into the <12 x
1468 // i32> vector.
1469 InstructionCost ExtSubCost = thisT()->getScalarizationOverhead(
1470 SubVT, DemandedAllSubElts,
1471 /*Insert*/ false, /*Extract*/ true, CostKind);
1472 Cost += ExtSubCost * Indices.size();
1473 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
1474 /*Insert*/ true,
1475 /*Extract*/ false, CostKind);
1476 }
1477
1478 if (!UseMaskForCond)
1479 return Cost;
1480
1481 Type *I8Type = Type::getInt8Ty(VT->getContext());
1482
1483 Cost += thisT()->getReplicationShuffleCost(
1484 I8Type, Factor, NumSubElts,
1485 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
1486 CostKind);
1487
1488 // The Gaps mask is invariant and created outside the loop, therefore the
1489 // cost of creating it is not accounted for here. However if we have both
1490 // a MaskForGaps and some other mask that guards the execution of the
1491 // memory access, we need to account for the cost of And-ing the two masks
1492 // inside the loop.
1493 if (UseMaskForGaps) {
1494 auto *MaskVT = FixedVectorType::get(I8Type, NumElts);
1495 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1496 CostKind);
1497 }
1498
1499 return Cost;
1500 }
1501
1502 /// Get intrinsic cost based on arguments.
1505 // Check for generically free intrinsics.
1507 return 0;
1508
1509 // Assume that target intrinsics are cheap.
1510 Intrinsic::ID IID = ICA.getID();
1513
1514 if (ICA.isTypeBasedOnly())
1516
1517 Type *RetTy = ICA.getReturnType();
1518
1519 ElementCount RetVF =
1520 (RetTy->isVectorTy() ? cast<VectorType>(RetTy)->getElementCount()
1522 const IntrinsicInst *I = ICA.getInst();
1523 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
1524 FastMathFlags FMF = ICA.getFlags();
1525 switch (IID) {
1526 default:
1527 break;
1528
1529 case Intrinsic::powi:
1530 if (auto *RHSC = dyn_cast<ConstantInt>(Args[1])) {
1531 bool ShouldOptForSize = I->getParent()->getParent()->hasOptSize();
1532 if (getTLI()->isBeneficialToExpandPowI(RHSC->getSExtValue(),
1533 ShouldOptForSize)) {
1534 // The cost is modeled on the expansion performed by ExpandPowI in
1535 // SelectionDAGBuilder.
1536 APInt Exponent = RHSC->getValue().abs();
1537 unsigned ActiveBits = Exponent.getActiveBits();
1538 unsigned PopCount = Exponent.popcount();
1539 InstructionCost Cost = (ActiveBits + PopCount - 2) *
1540 thisT()->getArithmeticInstrCost(
1541 Instruction::FMul, RetTy, CostKind);
1542 if (RHSC->isNegative())
1543 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv, RetTy,
1544 CostKind);
1545 return Cost;
1546 }
1547 }
1548 break;
1549 case Intrinsic::cttz:
1550 // FIXME: If necessary, this should go in target-specific overrides.
1551 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCttz(RetTy))
1553 break;
1554
1555 case Intrinsic::ctlz:
1556 // FIXME: If necessary, this should go in target-specific overrides.
1557 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCtlz(RetTy))
1559 break;
1560
1561 case Intrinsic::memcpy:
1562 return thisT()->getMemcpyCost(ICA.getInst());
1563
1564 case Intrinsic::masked_scatter: {
1565 const Value *Mask = Args[3];
1566 bool VarMask = !isa<Constant>(Mask);
1567 Align Alignment = cast<ConstantInt>(Args[2])->getAlignValue();
1568 return thisT()->getGatherScatterOpCost(Instruction::Store,
1569 ICA.getArgTypes()[0], Args[1],
1570 VarMask, Alignment, CostKind, I);
1571 }
1572 case Intrinsic::masked_gather: {
1573 const Value *Mask = Args[2];
1574 bool VarMask = !isa<Constant>(Mask);
1575 Align Alignment = cast<ConstantInt>(Args[1])->getAlignValue();
1576 return thisT()->getGatherScatterOpCost(Instruction::Load, RetTy, Args[0],
1577 VarMask, Alignment, CostKind, I);
1578 }
1579 case Intrinsic::experimental_stepvector: {
1580 if (isa<ScalableVectorType>(RetTy))
1582 // The cost of materialising a constant integer vector.
1584 }
1585 case Intrinsic::vector_extract: {
1586 // FIXME: Handle case where a scalable vector is extracted from a scalable
1587 // vector
1588 if (isa<ScalableVectorType>(RetTy))
1590 unsigned Index = cast<ConstantInt>(Args[1])->getZExtValue();
1591 return thisT()->getShuffleCost(
1592 TTI::SK_ExtractSubvector, cast<VectorType>(Args[0]->getType()),
1593 std::nullopt, CostKind, Index, cast<VectorType>(RetTy));
1594 }
1595 case Intrinsic::vector_insert: {
1596 // FIXME: Handle case where a scalable vector is inserted into a scalable
1597 // vector
1598 if (isa<ScalableVectorType>(Args[1]->getType()))
1600 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
1601 return thisT()->getShuffleCost(
1602 TTI::SK_InsertSubvector, cast<VectorType>(Args[0]->getType()),
1603 std::nullopt, CostKind, Index, cast<VectorType>(Args[1]->getType()));
1604 }
1605 case Intrinsic::experimental_vector_reverse: {
1606 return thisT()->getShuffleCost(
1607 TTI::SK_Reverse, cast<VectorType>(Args[0]->getType()), std::nullopt,
1608 CostKind, 0, cast<VectorType>(RetTy));
1609 }
1610 case Intrinsic::experimental_vector_splice: {
1611 unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
1612 return thisT()->getShuffleCost(
1613 TTI::SK_Splice, cast<VectorType>(Args[0]->getType()), std::nullopt,
1614 CostKind, Index, cast<VectorType>(RetTy));
1615 }
1616 case Intrinsic::vector_reduce_add:
1617 case Intrinsic::vector_reduce_mul:
1618 case Intrinsic::vector_reduce_and:
1619 case Intrinsic::vector_reduce_or:
1620 case Intrinsic::vector_reduce_xor:
1621 case Intrinsic::vector_reduce_smax:
1622 case Intrinsic::vector_reduce_smin:
1623 case Intrinsic::vector_reduce_fmax:
1624 case Intrinsic::vector_reduce_fmin:
1625 case Intrinsic::vector_reduce_fmaximum:
1626 case Intrinsic::vector_reduce_fminimum:
1627 case Intrinsic::vector_reduce_umax:
1628 case Intrinsic::vector_reduce_umin: {
1629 IntrinsicCostAttributes Attrs(IID, RetTy, Args[0]->getType(), FMF, I, 1);
1631 }
1632 case Intrinsic::vector_reduce_fadd:
1633 case Intrinsic::vector_reduce_fmul: {
1635 IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF, I, 1);
1637 }
1638 case Intrinsic::fshl:
1639 case Intrinsic::fshr: {
1640 const Value *X = Args[0];
1641 const Value *Y = Args[1];
1642 const Value *Z = Args[2];
1645 const TTI::OperandValueInfo OpInfoZ = TTI::getOperandInfo(Z);
1646 const TTI::OperandValueInfo OpInfoBW =
1648 isPowerOf2_32(RetTy->getScalarSizeInBits()) ? TTI::OP_PowerOf2
1649 : TTI::OP_None};
1650
1651 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
1652 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
1654 Cost +=
1655 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
1656 Cost +=
1657 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);
1658 Cost += thisT()->getArithmeticInstrCost(
1659 BinaryOperator::Shl, RetTy, CostKind, OpInfoX,
1660 {OpInfoZ.Kind, TTI::OP_None});
1661 Cost += thisT()->getArithmeticInstrCost(
1662 BinaryOperator::LShr, RetTy, CostKind, OpInfoY,
1663 {OpInfoZ.Kind, TTI::OP_None});
1664 // Non-constant shift amounts requires a modulo.
1665 if (!OpInfoZ.isConstant())
1666 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::URem, RetTy,
1667 CostKind, OpInfoZ, OpInfoBW);
1668 // For non-rotates (X != Y) we must add shift-by-zero handling costs.
1669 if (X != Y) {
1670 Type *CondTy = RetTy->getWithNewBitWidth(1);
1671 Cost +=
1672 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
1674 Cost +=
1675 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
1677 }
1678 return Cost;
1679 }
1680 case Intrinsic::get_active_lane_mask: {
1681 EVT ResVT = getTLI()->getValueType(DL, RetTy, true);
1682 EVT ArgType = getTLI()->getValueType(DL, ICA.getArgTypes()[0], true);
1683
1684 // If we're not expanding the intrinsic then we assume this is cheap
1685 // to implement.
1686 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgType)) {
1687 return getTypeLegalizationCost(RetTy).first;
1688 }
1689
1690 // Create the expanded types that will be used to calculate the uadd_sat
1691 // operation.
1692 Type *ExpRetTy = VectorType::get(
1693 ICA.getArgTypes()[0], cast<VectorType>(RetTy)->getElementCount());
1694 IntrinsicCostAttributes Attrs(Intrinsic::uadd_sat, ExpRetTy, {}, FMF);
1696 thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
1697 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy, RetTy,
1699 return Cost;
1700 }
1701 }
1702
1703 // VP Intrinsics should have the same cost as their non-vp counterpart.
1704 // TODO: Adjust the cost to make the vp intrinsic cheaper than its non-vp
1705 // counterpart when the vector length argument is smaller than the maximum
1706 // vector length.
1707 // TODO: Support other kinds of VPIntrinsics
1708 if (VPIntrinsic::isVPIntrinsic(ICA.getID())) {
1709 std::optional<unsigned> FOp =
1711 if (FOp) {
1712 if (ICA.getID() == Intrinsic::vp_load) {
1713 Align Alignment;
1714 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1715 Alignment = VPI->getPointerAlignment().valueOrOne();
1716 unsigned AS = 0;
1717 if (ICA.getArgs().size() > 1)
1718 if (auto *PtrTy =
1719 dyn_cast<PointerType>(ICA.getArgs()[0]->getType()))
1720 AS = PtrTy->getAddressSpace();
1721 return thisT()->getMemoryOpCost(*FOp, ICA.getReturnType(), Alignment,
1722 AS, CostKind);
1723 }
1724 if (ICA.getID() == Intrinsic::vp_store) {
1725 Align Alignment;
1726 if (auto *VPI = dyn_cast_or_null<VPIntrinsic>(ICA.getInst()))
1727 Alignment = VPI->getPointerAlignment().valueOrOne();
1728 unsigned AS = 0;
1729 if (ICA.getArgs().size() >= 2)
1730 if (auto *PtrTy =
1731 dyn_cast<PointerType>(ICA.getArgs()[1]->getType()))
1732 AS = PtrTy->getAddressSpace();
1733 return thisT()->getMemoryOpCost(*FOp, Args[0]->getType(), Alignment,
1734 AS, CostKind);
1735 }
1737 return thisT()->getArithmeticInstrCost(*FOp, ICA.getReturnType(),
1738 CostKind);
1739 }
1740 }
1741
1742 std::optional<Intrinsic::ID> FID =
1744 if (FID) {
1745 // Non-vp version will have same Args/Tys except mask and vector length.
1746 assert(ICA.getArgs().size() >= 2 && ICA.getArgTypes().size() >= 2 &&
1747 "Expected VPIntrinsic to have Mask and Vector Length args and "
1748 "types");
1750
1751 // VPReduction intrinsics have a start value argument that their non-vp
1752 // counterparts do not have, except for the fadd and fmul non-vp
1753 // counterpart.
1755 *FID != Intrinsic::vector_reduce_fadd &&
1756 *FID != Intrinsic::vector_reduce_fmul)
1757 NewTys = NewTys.drop_front();
1758
1759 IntrinsicCostAttributes NewICA(*FID, ICA.getReturnType(), NewTys,
1760 ICA.getFlags());
1761 return thisT()->getIntrinsicInstrCost(NewICA, CostKind);
1762 }
1763 }
1764
1765 // Assume that we need to scalarize this intrinsic.)
1766 // Compute the scalarization overhead based on Args for a vector
1767 // intrinsic.
1768 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
1769 if (RetVF.isVector() && !RetVF.isScalable()) {
1770 ScalarizationCost = 0;
1771 if (!RetTy->isVoidTy())
1772 ScalarizationCost += getScalarizationOverhead(
1773 cast<VectorType>(RetTy),
1774 /*Insert*/ true, /*Extract*/ false, CostKind);
1775 ScalarizationCost +=
1777 }
1778
1779 IntrinsicCostAttributes Attrs(IID, RetTy, ICA.getArgTypes(), FMF, I,
1780 ScalarizationCost);
1781 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
1782 }
1783
1784 /// Get intrinsic cost based on argument types.
1785 /// If ScalarizationCostPassed is std::numeric_limits<unsigned>::max(), the
1786 /// cost of scalarizing the arguments and the return value will be computed
1787 /// based on types.
1791 Intrinsic::ID IID = ICA.getID();
1792 Type *RetTy = ICA.getReturnType();
1793 const SmallVectorImpl<Type *> &Tys = ICA.getArgTypes();
1794 FastMathFlags FMF = ICA.getFlags();
1795 InstructionCost ScalarizationCostPassed = ICA.getScalarizationCost();
1796 bool SkipScalarizationCost = ICA.skipScalarizationCost();
1797
1798 VectorType *VecOpTy = nullptr;
1799 if (!Tys.empty()) {
1800 // The vector reduction operand is operand 0 except for fadd/fmul.
1801 // Their operand 0 is a scalar start value, so the vector op is operand 1.
1802 unsigned VecTyIndex = 0;
1803 if (IID == Intrinsic::vector_reduce_fadd ||
1804 IID == Intrinsic::vector_reduce_fmul)
1805 VecTyIndex = 1;
1806 assert(Tys.size() > VecTyIndex && "Unexpected IntrinsicCostAttributes");
1807 VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
1808 }
1809
1810 // Library call cost - other than size, make it expensive.
1811 unsigned SingleCallCost = CostKind == TTI::TCK_CodeSize ? 1 : 10;
1812 unsigned ISD = 0;
1813 switch (IID) {
1814 default: {
1815 // Scalable vectors cannot be scalarized, so return Invalid.
1816 if (isa<ScalableVectorType>(RetTy) || any_of(Tys, [](const Type *Ty) {
1817 return isa<ScalableVectorType>(Ty);
1818 }))
1820
1821 // Assume that we need to scalarize this intrinsic.
1822 InstructionCost ScalarizationCost =
1823 SkipScalarizationCost ? ScalarizationCostPassed : 0;
1824 unsigned ScalarCalls = 1;
1825 Type *ScalarRetTy = RetTy;
1826 if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
1827 if (!SkipScalarizationCost)
1828 ScalarizationCost = getScalarizationOverhead(
1829 RetVTy, /*Insert*/ true, /*Extract*/ false, CostKind);
1830 ScalarCalls = std::max(ScalarCalls,
1831 cast<FixedVectorType>(RetVTy)->getNumElements());
1832 ScalarRetTy = RetTy->getScalarType();
1833 }
1834 SmallVector<Type *, 4> ScalarTys;
1835 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
1836 Type *Ty = Tys[i];
1837 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1838 if (!SkipScalarizationCost)
1839 ScalarizationCost += getScalarizationOverhead(
1840 VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
1841 ScalarCalls = std::max(ScalarCalls,
1842 cast<FixedVectorType>(VTy)->getNumElements());
1843 Ty = Ty->getScalarType();
1844 }
1845 ScalarTys.push_back(Ty);
1846 }
1847 if (ScalarCalls == 1)
1848 return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
1849
1850 IntrinsicCostAttributes ScalarAttrs(IID, ScalarRetTy, ScalarTys, FMF);
1851 InstructionCost ScalarCost =
1852 thisT()->getIntrinsicInstrCost(ScalarAttrs, CostKind);
1853
1854 return ScalarCalls * ScalarCost + ScalarizationCost;
1855 }
1856 // Look for intrinsics that can be lowered directly or turned into a scalar
1857 // intrinsic call.
1858 case Intrinsic::sqrt:
1859 ISD = ISD::FSQRT;
1860 break;
1861 case Intrinsic::sin:
1862 ISD = ISD::FSIN;
1863 break;
1864 case Intrinsic::cos:
1865 ISD = ISD::FCOS;
1866 break;
1867 case Intrinsic::exp:
1868 ISD = ISD::FEXP;
1869 break;
1870 case Intrinsic::exp2:
1871 ISD = ISD::FEXP2;
1872 break;
1873 case Intrinsic::exp10:
1874 ISD = ISD::FEXP10;
1875 break;
1876 case Intrinsic::log:
1877 ISD = ISD::FLOG;
1878 break;
1879 case Intrinsic::log10:
1880 ISD = ISD::FLOG10;
1881 break;
1882 case Intrinsic::log2:
1883 ISD = ISD::FLOG2;
1884 break;
1885 case Intrinsic::fabs:
1886 ISD = ISD::FABS;
1887 break;
1888 case Intrinsic::canonicalize:
1889 ISD = ISD::FCANONICALIZE;
1890 break;
1891 case Intrinsic::minnum:
1892 ISD = ISD::FMINNUM;
1893 break;
1894 case Intrinsic::maxnum:
1895 ISD = ISD::FMAXNUM;
1896 break;
1897 case Intrinsic::minimum:
1898 ISD = ISD::FMINIMUM;
1899 break;
1900 case Intrinsic::maximum:
1901 ISD = ISD::FMAXIMUM;
1902 break;
1903 case Intrinsic::copysign:
1904 ISD = ISD::FCOPYSIGN;
1905 break;
1906 case Intrinsic::floor:
1907 ISD = ISD::FFLOOR;
1908 break;
1909 case Intrinsic::ceil:
1910 ISD = ISD::FCEIL;
1911 break;
1912 case Intrinsic::trunc:
1913 ISD = ISD::FTRUNC;
1914 break;
1915 case Intrinsic::nearbyint:
1916 ISD = ISD::FNEARBYINT;
1917 break;
1918 case Intrinsic::rint:
1919 ISD = ISD::FRINT;
1920 break;
1921 case Intrinsic::lrint:
1922 ISD = ISD::LRINT;
1923 break;
1924 case Intrinsic::llrint:
1925 ISD = ISD::LLRINT;
1926 break;
1927 case Intrinsic::round:
1928 ISD = ISD::FROUND;
1929 break;
1930 case Intrinsic::roundeven:
1931 ISD = ISD::FROUNDEVEN;
1932 break;
1933 case Intrinsic::pow:
1934 ISD = ISD::FPOW;
1935 break;
1936 case Intrinsic::fma:
1937 ISD = ISD::FMA;
1938 break;
1939 case Intrinsic::fmuladd:
1940 ISD = ISD::FMA;
1941 break;
1942 case Intrinsic::experimental_constrained_fmuladd:
1943 ISD = ISD::STRICT_FMA;
1944 break;
1945 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
1946 case Intrinsic::lifetime_start:
1947 case Intrinsic::lifetime_end:
1948 case Intrinsic::sideeffect:
1949 case Intrinsic::pseudoprobe:
1950 case Intrinsic::arithmetic_fence:
1951 return 0;
1952 case Intrinsic::masked_store: {
1953 Type *Ty = Tys[0];
1954 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
1955 return thisT()->getMaskedMemoryOpCost(Instruction::Store, Ty, TyAlign, 0,
1956 CostKind);
1957 }
1958 case Intrinsic::masked_load: {
1959 Type *Ty = RetTy;
1960 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
1961 return thisT()->getMaskedMemoryOpCost(Instruction::Load, Ty, TyAlign, 0,
1962 CostKind);
1963 }
1964 case Intrinsic::vector_reduce_add:
1965 return thisT()->getArithmeticReductionCost(Instruction::Add, VecOpTy,
1966 std::nullopt, CostKind);
1967 case Intrinsic::vector_reduce_mul:
1968 return thisT()->getArithmeticReductionCost(Instruction::Mul, VecOpTy,
1969 std::nullopt, CostKind);
1970 case Intrinsic::vector_reduce_and:
1971 return thisT()->getArithmeticReductionCost(Instruction::And, VecOpTy,
1972 std::nullopt, CostKind);
1973 case Intrinsic::vector_reduce_or:
1974 return thisT()->getArithmeticReductionCost(Instruction::Or, VecOpTy,
1975 std::nullopt, CostKind);
1976 case Intrinsic::vector_reduce_xor:
1977 return thisT()->getArithmeticReductionCost(Instruction::Xor, VecOpTy,
1978 std::nullopt, CostKind);
1979 case Intrinsic::vector_reduce_fadd:
1980 return thisT()->getArithmeticReductionCost(Instruction::FAdd, VecOpTy,
1981 FMF, CostKind);
1982 case Intrinsic::vector_reduce_fmul:
1983 return thisT()->getArithmeticReductionCost(Instruction::FMul, VecOpTy,
1984 FMF, CostKind);
1985 case Intrinsic::vector_reduce_smax:
1986 return thisT()->getMinMaxReductionCost(Intrinsic::smax, VecOpTy,
1987 ICA.getFlags(), CostKind);
1988 case Intrinsic::vector_reduce_smin:
1989 return thisT()->getMinMaxReductionCost(Intrinsic::smin, VecOpTy,
1990 ICA.getFlags(), CostKind);
1991 case Intrinsic::vector_reduce_umax:
1992 return thisT()->getMinMaxReductionCost(Intrinsic::umax, VecOpTy,
1993 ICA.getFlags(), CostKind);
1994 case Intrinsic::vector_reduce_umin:
1995 return thisT()->getMinMaxReductionCost(Intrinsic::umin, VecOpTy,
1996 ICA.getFlags(), CostKind);
1997 case Intrinsic::vector_reduce_fmax:
1998 return thisT()->getMinMaxReductionCost(Intrinsic::maxnum, VecOpTy,
1999 ICA.getFlags(), CostKind);
2000 case Intrinsic::vector_reduce_fmin:
2001 return thisT()->getMinMaxReductionCost(Intrinsic::minnum, VecOpTy,
2002 ICA.getFlags(), CostKind);
2003 case Intrinsic::vector_reduce_fmaximum:
2004 return thisT()->getMinMaxReductionCost(Intrinsic::maximum, VecOpTy,
2005 ICA.getFlags(), CostKind);
2006 case Intrinsic::vector_reduce_fminimum:
2007 return thisT()->getMinMaxReductionCost(Intrinsic::minimum, VecOpTy,
2008 ICA.getFlags(), CostKind);
2009 case Intrinsic::abs: {
2010 // abs(X) = select(icmp(X,0),X,sub(0,X))
2011 Type *CondTy = RetTy->getWithNewBitWidth(1);
2014 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2015 Pred, CostKind);
2016 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2017 Pred, CostKind);
2018 // TODO: Should we add an OperandValueProperties::OP_Zero property?
2019 Cost += thisT()->getArithmeticInstrCost(
2020 BinaryOperator::Sub, RetTy, CostKind, {TTI::OK_UniformConstantValue, TTI::OP_None});
2021 return Cost;
2022 }
2023 case Intrinsic::smax:
2024 case Intrinsic::smin:
2025 case Intrinsic::umax:
2026 case Intrinsic::umin: {
2027 // minmax(X,Y) = select(icmp(X,Y),X,Y)
2028 Type *CondTy = RetTy->getWithNewBitWidth(1);
2029 bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
2030 CmpInst::Predicate Pred =
2031 IsUnsigned ? CmpInst::ICMP_UGT : CmpInst::ICMP_SGT;
2033 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2034 Pred, CostKind);
2035 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2036 Pred, CostKind);
2037 return Cost;
2038 }
2039 case Intrinsic::sadd_sat:
2040 case Intrinsic::ssub_sat: {
2041 Type *CondTy = RetTy->getWithNewBitWidth(1);
2042
2043 Type *OpTy = StructType::create({RetTy, CondTy});
2044 Intrinsic::ID OverflowOp = IID == Intrinsic::sadd_sat
2045 ? Intrinsic::sadd_with_overflow
2046 : Intrinsic::ssub_with_overflow;
2048
2049 // SatMax -> Overflow && SumDiff < 0
2050 // SatMin -> Overflow && SumDiff >= 0
2052 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
2053 nullptr, ScalarizationCostPassed);
2054 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2055 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
2056 Pred, CostKind);
2057 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
2058 CondTy, Pred, CostKind);
2059 return Cost;
2060 }
2061 case Intrinsic::uadd_sat:
2062 case Intrinsic::usub_sat: {
2063 Type *CondTy = RetTy->getWithNewBitWidth(1);
2064
2065 Type *OpTy = StructType::create({RetTy, CondTy});
2066 Intrinsic::ID OverflowOp = IID == Intrinsic::uadd_sat
2067 ? Intrinsic::uadd_with_overflow
2068 : Intrinsic::usub_with_overflow;
2069
2071 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
2072 nullptr, ScalarizationCostPassed);
2073 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2074 Cost +=
2075 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2077 return Cost;
2078 }
2079 case Intrinsic::smul_fix:
2080 case Intrinsic::umul_fix: {
2081 unsigned ExtSize = RetTy->getScalarSizeInBits() * 2;
2082 Type *ExtTy = RetTy->getWithNewBitWidth(ExtSize);
2083
2084 unsigned ExtOp =
2085 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
2087
2089 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH, CostKind);
2090 Cost +=
2091 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2092 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
2093 CCH, CostKind);
2094 Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, RetTy,
2095 CostKind,
2098 Cost += thisT()->getArithmeticInstrCost(Instruction::Shl, RetTy, CostKind,
2101 Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy, CostKind);
2102 return Cost;
2103 }
2104 case Intrinsic::sadd_with_overflow:
2105 case Intrinsic::ssub_with_overflow: {
2106 Type *SumTy = RetTy->getContainedType(0);
2107 Type *OverflowTy = RetTy->getContainedType(1);
2108 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
2109 ? BinaryOperator::Add
2110 : BinaryOperator::Sub;
2111
2112 // Add:
2113 // Overflow -> (Result < LHS) ^ (RHS < 0)
2114 // Sub:
2115 // Overflow -> (Result < LHS) ^ (RHS > 0)
2117 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
2118 Cost += 2 * thisT()->getCmpSelInstrCost(
2119 Instruction::ICmp, SumTy, OverflowTy,
2121 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
2122 CostKind);
2123 return Cost;
2124 }
2125 case Intrinsic::uadd_with_overflow:
2126 case Intrinsic::usub_with_overflow: {
2127 Type *SumTy = RetTy->getContainedType(0);
2128 Type *OverflowTy = RetTy->getContainedType(1);
2129 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
2130 ? BinaryOperator::Add
2131 : BinaryOperator::Sub;
2132 CmpInst::Predicate Pred = IID == Intrinsic::uadd_with_overflow
2135
2137 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
2138 Cost +=
2139 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy, OverflowTy,
2140 Pred, CostKind);
2141 return Cost;
2142 }
2143 case Intrinsic::smul_with_overflow:
2144 case Intrinsic::umul_with_overflow: {
2145 Type *MulTy = RetTy->getContainedType(0);
2146 Type *OverflowTy = RetTy->getContainedType(1);
2147 unsigned ExtSize = MulTy->getScalarSizeInBits() * 2;
2148 Type *ExtTy = MulTy->getWithNewBitWidth(ExtSize);
2149 bool IsSigned = IID == Intrinsic::smul_with_overflow;
2150
2151 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
2153
2155 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH, CostKind);
2156 Cost +=
2157 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2158 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
2159 CCH, CostKind);
2160 Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, ExtTy,
2161 CostKind,
2164
2165 if (IsSigned)
2166 Cost += thisT()->getArithmeticInstrCost(Instruction::AShr, MulTy,
2167 CostKind,
2170
2171 Cost += thisT()->getCmpSelInstrCost(
2172 BinaryOperator::ICmp, MulTy, OverflowTy, CmpInst::ICMP_NE, CostKind);
2173 return Cost;
2174 }
2175 case Intrinsic::fptosi_sat:
2176 case Intrinsic::fptoui_sat: {
2177 if (Tys.empty())
2178 break;
2179 Type *FromTy = Tys[0];
2180 bool IsSigned = IID == Intrinsic::fptosi_sat;
2181
2183 IntrinsicCostAttributes Attrs1(Intrinsic::minnum, FromTy,
2184 {FromTy, FromTy});
2185 Cost += thisT()->getIntrinsicInstrCost(Attrs1, CostKind);
2186 IntrinsicCostAttributes Attrs2(Intrinsic::maxnum, FromTy,
2187 {FromTy, FromTy});
2188 Cost += thisT()->getIntrinsicInstrCost(Attrs2, CostKind);
2189 Cost += thisT()->getCastInstrCost(
2190 IsSigned ? Instruction::FPToSI : Instruction::FPToUI, RetTy, FromTy,
2192 if (IsSigned) {
2193 Type *CondTy = RetTy->getWithNewBitWidth(1);
2194 Cost += thisT()->getCmpSelInstrCost(
2195 BinaryOperator::FCmp, FromTy, CondTy, CmpInst::FCMP_UNO, CostKind);
2196 Cost += thisT()->getCmpSelInstrCost(
2197 BinaryOperator::Select, RetTy, CondTy, CmpInst::FCMP_UNO, CostKind);
2198 }
2199 return Cost;
2200 }
2201 case Intrinsic::ctpop:
2202 ISD = ISD::CTPOP;
2203 // In case of legalization use TCC_Expensive. This is cheaper than a
2204 // library call but still not a cheap instruction.
2205 SingleCallCost = TargetTransformInfo::TCC_Expensive;
2206 break;
2207 case Intrinsic::ctlz:
2208 ISD = ISD::CTLZ;
2209 break;
2210 case Intrinsic::cttz:
2211 ISD = ISD::CTTZ;
2212 break;
2213 case Intrinsic::bswap:
2214 ISD = ISD::BSWAP;
2215 break;
2216 case Intrinsic::bitreverse:
2217 ISD = ISD::BITREVERSE;
2218 break;
2219 }
2220
2221 const TargetLoweringBase *TLI = getTLI();
2222 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(RetTy);
2223
2224 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
2225 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
2226 TLI->isFAbsFree(LT.second)) {
2227 return 0;
2228 }
2229
2230 // The operation is legal. Assume it costs 1.
2231 // If the type is split to multiple registers, assume that there is some
2232 // overhead to this.
2233 // TODO: Once we have extract/insert subvector cost we need to use them.
2234 if (LT.first > 1)
2235 return (LT.first * 2);
2236 else
2237 return (LT.first * 1);
2238 } else if (!TLI->isOperationExpand(ISD, LT.second)) {
2239 // If the operation is custom lowered then assume
2240 // that the code is twice as expensive.
2241 return (LT.first * 2);
2242 }
2243
2244 // If we can't lower fmuladd into an FMA estimate the cost as a floating
2245 // point mul followed by an add.
2246 if (IID == Intrinsic::fmuladd)
2247 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
2248 CostKind) +
2249 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
2250 CostKind);
2251 if (IID == Intrinsic::experimental_constrained_fmuladd) {
2252 IntrinsicCostAttributes FMulAttrs(
2253 Intrinsic::experimental_constrained_fmul, RetTy, Tys);
2254 IntrinsicCostAttributes FAddAttrs(
2255 Intrinsic::experimental_constrained_fadd, RetTy, Tys);
2256 return thisT()->getIntrinsicInstrCost(FMulAttrs, CostKind) +
2257 thisT()->getIntrinsicInstrCost(FAddAttrs, CostKind);
2258 }
2259
2260 // Else, assume that we need to scalarize this intrinsic. For math builtins
2261 // this will emit a costly libcall, adding call overhead and spills. Make it
2262 // very expensive.
2263 if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
2264 // Scalable vectors cannot be scalarized, so return Invalid.
2265 if (isa<ScalableVectorType>(RetTy) || any_of(Tys, [](const Type *Ty) {
2266 return isa<ScalableVectorType>(Ty);
2267 }))
2269
2270 InstructionCost ScalarizationCost =
2271 SkipScalarizationCost
2272 ? ScalarizationCostPassed
2273 : getScalarizationOverhead(RetVTy, /*Insert*/ true,
2274 /*Extract*/ false, CostKind);
2275
2276 unsigned ScalarCalls = cast<FixedVectorType>(RetVTy)->getNumElements();
2277 SmallVector<Type *, 4> ScalarTys;
2278 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
2279 Type *Ty = Tys[i];
2280 if (Ty->isVectorTy())
2281 Ty = Ty->getScalarType();
2282 ScalarTys.push_back(Ty);
2283 }
2284 IntrinsicCostAttributes Attrs(IID, RetTy->getScalarType(), ScalarTys, FMF);
2285 InstructionCost ScalarCost =
2286 thisT()->getIntrinsicInstrCost(Attrs, CostKind);
2287 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
2288 if (auto *VTy = dyn_cast<VectorType>(Tys[i])) {
2289 if (!ICA.skipScalarizationCost())
2290 ScalarizationCost += getScalarizationOverhead(
2291 VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
2292 ScalarCalls = std::max(ScalarCalls,
2293 cast<FixedVectorType>(VTy)->getNumElements());
2294 }
2295 }
2296 return ScalarCalls * ScalarCost + ScalarizationCost;
2297 }
2298
2299 // This is going to be turned into a library call, make it expensive.
2300 return SingleCallCost;
2301 }
2302
2303 /// Compute a cost of the given call instruction.
2304 ///
2305 /// Compute the cost of calling function F with return type RetTy and
2306 /// argument types Tys. F might be nullptr, in this case the cost of an
2307 /// arbitrary call with the specified signature will be returned.
2308 /// This is used, for instance, when we estimate call of a vector
2309 /// counterpart of the given function.
2310 /// \param F Called function, might be nullptr.
2311 /// \param RetTy Return value types.
2312 /// \param Tys Argument types.
2313 /// \returns The cost of Call instruction.
2315 ArrayRef<Type *> Tys,
2317 return 10;
2318 }
2319
2320 unsigned getNumberOfParts(Type *Tp) {
2321 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
2322 return LT.first.isValid() ? *LT.first.getValue() : 0;
2323 }
2324
2326 const SCEV *) {
2327 return 0;
2328 }
2329
2330 /// Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
2331 /// We're assuming that reduction operation are performing the following way:
2332 ///
2333 /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
2334 /// <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
2335 /// \----------------v-------------/ \----------v------------/
2336 /// n/2 elements n/2 elements
2337 /// %red1 = op <n x t> %val, <n x t> val1
2338 /// After this operation we have a vector %red1 where only the first n/2
2339 /// elements are meaningful, the second n/2 elements are undefined and can be
2340 /// dropped. All other operations are actually working with the vector of
2341 /// length n/2, not n, though the real vector length is still n.
2342 /// %val2 = shufflevector<n x t> %red1, <n x t> %undef,
2343 /// <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
2344 /// \----------------v-------------/ \----------v------------/
2345 /// n/4 elements 3*n/4 elements
2346 /// %red2 = op <n x t> %red1, <n x t> val2 - working with the vector of
2347 /// length n/2, the resulting vector has length n/4 etc.
2348 ///
2349 /// The cost model should take into account that the actual length of the
2350 /// vector is reduced on each iteration.
2353 // Targets must implement a default value for the scalable case, since
2354 // we don't know how many lanes the vector has.
2355 if (isa<ScalableVectorType>(Ty))
2357
2358 Type *ScalarTy = Ty->getElementType();
2359 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
2360 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
2361 ScalarTy == IntegerType::getInt1Ty(Ty->getContext()) &&
2362 NumVecElts >= 2) {
2363 // Or reduction for i1 is represented as:
2364 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
2365 // %res = cmp ne iReduxWidth %val, 0
2366 // And reduction for i1 is represented as:
2367 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
2368 // %res = cmp eq iReduxWidth %val, 11111
2369 Type *ValTy = IntegerType::get(Ty->getContext(), NumVecElts);
2370 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
2372 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
2375 }
2376 unsigned NumReduxLevels = Log2_32(NumVecElts);
2377 InstructionCost ArithCost = 0;
2378 InstructionCost ShuffleCost = 0;
2379 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
2380 unsigned LongVectorCount = 0;
2381 unsigned MVTLen =
2382 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
2383 while (NumVecElts > MVTLen) {
2384 NumVecElts /= 2;
2385 VectorType *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
2386 ShuffleCost +=
2387 thisT()->getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt,
2388 CostKind, NumVecElts, SubTy);
2389 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy, CostKind);
2390 Ty = SubTy;
2391 ++LongVectorCount;
2392 }
2393
2394 NumReduxLevels -= LongVectorCount;
2395
2396 // The minimal length of the vector is limited by the real length of vector
2397 // operations performed on the current platform. That's why several final
2398 // reduction operations are performed on the vectors with the same
2399 // architecture-dependent length.
2400
2401 // By default reductions need one shuffle per reduction level.
2402 ShuffleCost +=
2403 NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
2404 std::nullopt, CostKind, 0, Ty);
2405 ArithCost +=
2406 NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty, CostKind);
2407 return ShuffleCost + ArithCost +
2408 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
2409 CostKind, 0, nullptr, nullptr);
2410 }
2411
2412 /// Try to calculate the cost of performing strict (in-order) reductions,
2413 /// which involves doing a sequence of floating point additions in lane
2414 /// order, starting with an initial value. For example, consider a scalar
2415 /// initial value 'InitVal' of type float and a vector of type <4 x float>:
2416 ///
2417 /// Vector = <float %v0, float %v1, float %v2, float %v3>
2418 ///
2419 /// %add1 = %InitVal + %v0
2420 /// %add2 = %add1 + %v1
2421 /// %add3 = %add2 + %v2
2422 /// %add4 = %add3 + %v3
2423 ///
2424 /// As a simple estimate we can say the cost of such a reduction is 4 times
2425 /// the cost of a scalar FP addition. We can only estimate the costs for
2426 /// fixed-width vectors here because for scalable vectors we do not know the
2427 /// runtime number of operations.
2430 // Targets must implement a default value for the scalable case, since
2431 // we don't know how many lanes the vector has.
2432 if (isa<ScalableVectorType>(Ty))
2434
2435 auto *VTy = cast<FixedVectorType>(Ty);
2437 VTy, /*Insert=*/false, /*Extract=*/true, CostKind);
2438 InstructionCost ArithCost = thisT()->getArithmeticInstrCost(
2439 Opcode, VTy->getElementType(), CostKind);
2440 ArithCost *= VTy->getNumElements();
2441
2442 return ExtractCost + ArithCost;
2443 }
2444
2446 std::optional<FastMathFlags> FMF,
2448 assert(Ty && "Unknown reduction vector type");
2452 }
2453
2454 /// Try to calculate op costs for min/max reduction operations.
2455 /// \param CondTy Conditional type for the Select instruction.
2457 FastMathFlags FMF,
2459 // Targets must implement a default value for the scalable case, since
2460 // we don't know how many lanes the vector has.
2461 if (isa<ScalableVectorType>(Ty))
2463
2464 Type *ScalarTy = Ty->getElementType();
2465 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
2466 unsigned NumReduxLevels = Log2_32(NumVecElts);
2467 InstructionCost MinMaxCost = 0;
2468 InstructionCost ShuffleCost = 0;
2469 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
2470 unsigned LongVectorCount = 0;
2471 unsigned MVTLen =
2472 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
2473 while (NumVecElts > MVTLen) {
2474 NumVecElts /= 2;
2475 auto *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
2476
2477 ShuffleCost +=
2478 thisT()->getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt,
2479 CostKind, NumVecElts, SubTy);
2480
2481 IntrinsicCostAttributes Attrs(IID, SubTy, {SubTy, SubTy}, FMF);
2482 MinMaxCost += getIntrinsicInstrCost(Attrs, CostKind);
2483 Ty = SubTy;
2484 ++LongVectorCount;
2485 }
2486
2487 NumReduxLevels -= LongVectorCount;
2488
2489 // The minimal length of the vector is limited by the real length of vector
2490 // operations performed on the current platform. That's why several final
2491 // reduction opertions are perfomed on the vectors with the same
2492 // architecture-dependent length.
2493 ShuffleCost +=
2494 NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
2495 std::nullopt, CostKind, 0, Ty);
2496 IntrinsicCostAttributes Attrs(IID, Ty, {Ty, Ty}, FMF);
2497 MinMaxCost += NumReduxLevels * getIntrinsicInstrCost(Attrs, CostKind);
2498 // The last min/max should be in vector registers and we counted it above.
2499 // So just need a single extractelement.
2500 return ShuffleCost + MinMaxCost +
2501 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
2502 CostKind, 0, nullptr, nullptr);
2503 }
2504
2506 Type *ResTy, VectorType *Ty,
2507 FastMathFlags FMF,
2509 // Without any native support, this is equivalent to the cost of
2510 // vecreduce.opcode(ext(Ty A)).
2511 VectorType *ExtTy = VectorType::get(ResTy, Ty);
2512 InstructionCost RedCost =
2513 thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF, CostKind);
2514 InstructionCost ExtCost = thisT()->getCastInstrCost(
2515 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
2517
2518 return RedCost + ExtCost;
2519 }
2520
2522 VectorType *Ty,
2524 // Without any native support, this is equivalent to the cost of
2525 // vecreduce.add(mul(ext(Ty A), ext(Ty B))) or
2526 // vecreduce.add(mul(A, B)).
2527 VectorType *ExtTy = VectorType::get(ResTy, Ty);
2528 InstructionCost RedCost = thisT()->getArithmeticReductionCost(
2529 Instruction::Add, ExtTy, std::nullopt, CostKind);
2530 InstructionCost ExtCost = thisT()->getCastInstrCost(
2531 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
2533
2534 InstructionCost MulCost =
2535 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
2536
2537 return RedCost + MulCost + 2 * ExtCost;
2538 }
2539
2541
2542 /// @}
2543};
2544
2545/// Concrete BasicTTIImpl that can be used if no further customization
2546/// is needed.
2547class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
2549
2550 friend class BasicTTIImplBase<BasicTTIImpl>;
2551
2552 const TargetSubtargetInfo *ST;
2553 const TargetLoweringBase *TLI;
2554
2555 const TargetSubtargetInfo *getST() const { return ST; }
2556 const TargetLoweringBase *getTLI() const { return TLI; }
2557
2558public:
2559 explicit BasicTTIImpl(const TargetMachine *TM, const Function &F);
2560};
2561
2562} // end namespace llvm
2563
2564#endif // LLVM_CODEGEN_BASICTTIIMPL_H
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
return RetTy
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
static const Function * getCalledFunction(const Value *V, bool &IsNoBuiltin)
LLVMContext & Context
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
This file describes how to lower LLVM code to machine code.
This file provides helpers for the implementation of a TargetTransformInfo-conforming class.
This pass exposes codegen information to IR-level passes.
static constexpr uint32_t Opcode
Definition: aarch32.h:200
Class for arbitrary precision integers.
Definition: APInt.h:76
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:212
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition: APInt.h:1302
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition: APInt.h:1173
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1433
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition: APInt.h:1102
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:178
an instruction to allocate memory on the stack
Definition: Instructions.h:58
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:204
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition: ArrayRef.h:210
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
Base class which can be used to help build a TTI implementation.
Definition: BasicTTIImpl.h:79
bool isTypeLegal(Type *Ty)
Definition: BasicTTIImpl.h:421
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on arguments.
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Definition: BasicTTIImpl.h:285
virtual unsigned getPrefetchDistance() const
Definition: BasicTTIImpl.h:696
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace)
Definition: BasicTTIImpl.h:398
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE)
Definition: BasicTTIImpl.h:556
unsigned getMaxInterleaveFactor(ElementCount VF)
Definition: BasicTTIImpl.h:863
unsigned getNumberOfParts(Type *Tp)
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index)
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const
Definition: BasicTTIImpl.h:725
std::optional< unsigned > getVScaleForTuning() const
Definition: BasicTTIImpl.h:730
InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind)
Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...
bool isTruncateFree(Type *Ty1, Type *Ty2)
Definition: BasicTTIImpl.h:411
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1)
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo)
Definition: BasicTTIImpl.h:636
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=ArrayRef< const Value * >(), const Instruction *CxtI=nullptr)
Definition: BasicTTIImpl.h:865
InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind)
Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI)
Definition: BasicTTIImpl.h:643
virtual bool shouldPrefetchAddressSpace(unsigned AS) const
Definition: BasicTTIImpl.h:716
bool isLegalICmpImmediate(int64_t imm)
Definition: BasicTTIImpl.h:330
bool isProfitableToHoist(Instruction *I)
Definition: BasicTTIImpl.h:415
virtual unsigned getMaxPrefetchIterationsAhead() const
Definition: BasicTTIImpl.h:708
InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index)
std::optional< unsigned > getMaxVScale() const
Definition: BasicTTIImpl.h:729
TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *Ty, int &Index, VectorType *&SubTy) const
Definition: BasicTTIImpl.h:943
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
unsigned getRegUsageForType(Type *Ty)
Definition: BasicTTIImpl.h:426
bool shouldBuildRelLookupTables() const
Definition: BasicTTIImpl.h:502
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind)
Try to calculate op costs for min/max reduction operations.
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const
Definition: BasicTTIImpl.h:550
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr)
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args=std::nullopt)
Definition: BasicTTIImpl.h:991
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
Definition: BasicTTIImpl.h:437
bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const
Definition: BasicTTIImpl.h:369
bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2)
Definition: BasicTTIImpl.h:381
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
Definition: BasicTTIImpl.h:658
bool shouldFoldTerminatingConditionAfterLSR() const
Definition: BasicTTIImpl.h:389
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
Definition: BasicTTIImpl.h:700
bool hasBranchDivergence(const Function *F=nullptr)
Definition: BasicTTIImpl.h:279
bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const
Definition: BasicTTIImpl.h:375
unsigned getAssumedAddrSpace(const Value *V) const
Definition: BasicTTIImpl.h:307
InstructionCost getOperandsScalarizationOverhead(ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing an instructions unique non-constant operands.
Definition: BasicTTIImpl.h:782
InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *, const SCEV *)
InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing an instruction.
Definition: BasicTTIImpl.h:736
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset)
Definition: BasicTTIImpl.h:345
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind)
Definition: BasicTTIImpl.h:431
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty)
Definition: BasicTTIImpl.h:534
virtual std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const
Definition: BasicTTIImpl.h:676
bool isAlwaysUniform(const Value *V)
Definition: BasicTTIImpl.h:283
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr)
Definition: BasicTTIImpl.h:334
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true)
Definition: BasicTTIImpl.h:648
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const
Definition: BasicTTIImpl.h:271
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const
Definition: BasicTTIImpl.h:349
InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind)
Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
Definition: BasicTTIImpl.h:766
virtual std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const
Definition: BasicTTIImpl.h:682
virtual bool enableWritePrefetching() const
Definition: BasicTTIImpl.h:712
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
Definition: BasicTTIImpl.h:321
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP)
Definition: BasicTTIImpl.h:628
InstructionCost getMulAccReductionCost(bool IsUnsigned, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind)
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
Definition: BasicTTIImpl.h:298
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Compute a cost of the given call instruction.
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind)
InstructionCost getFPOpCost(Type *Ty)
Definition: BasicTTIImpl.h:538
InstructionCost getVectorSplitCost()
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
Definition: BasicTTIImpl.h:829
bool haveFastSqrt(Type *Ty)
Definition: BasicTTIImpl.h:527
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
Definition: BasicTTIImpl.h:317
unsigned getInliningThresholdMultiplier() const
Definition: BasicTTIImpl.h:548
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind)
virtual ~BasicTTIImplBase()=default
InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind)
Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...
Definition: BasicTTIImpl.h:811
bool isVScaleKnownToBeAPowerOfTwo() const
Definition: BasicTTIImpl.h:731
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II)
Definition: BasicTTIImpl.h:652
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const
Definition: BasicTTIImpl.h:289
bool isLegalAddImmediate(int64_t imm)
Definition: BasicTTIImpl.h:326
unsigned getFlatAddressSpace()
Definition: BasicTTIImpl.h:293
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
virtual unsigned getCacheLineSize() const
Definition: BasicTTIImpl.h:692
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Definition: BasicTTIImpl.h:303
bool isSourceOfDivergence(const Value *V)
Definition: BasicTTIImpl.h:281
int getInlinerVectorBonusPercent() const
Definition: BasicTTIImpl.h:554
InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
Get intrinsic cost based on argument types.
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Definition: BasicTTIImpl.h:665
bool isSingleThreaded() const
Definition: BasicTTIImpl.h:311
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
Definition: BasicTTIImpl.h:262
unsigned adjustInliningThreshold(const CallBase *CB)
Definition: BasicTTIImpl.h:549
bool isProfitableLSRChainElement(Instruction *I)
Definition: BasicTTIImpl.h:394
Concrete BasicTTIImpl that can be used if no further customization is needed.
size_type count() const
count - Returns the number of bits which are set.
Definition: BitVector.h:162
BitVector & set()
Definition: BitVector.h:351
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1259
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:1127
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:780
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:803
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:807
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:805
@ ICMP_EQ
equal
Definition: InstrTypes.h:801
@ ICMP_NE
not equal
Definition: InstrTypes.h:802
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:790
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
TypeSize getTypeStoreSizeInBits(Type *Ty) const
Returns the maximum number of bits that may be overwritten by storing the specified type; always a mu...
Definition: DataLayout.h:484
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
Definition: DataLayout.h:420
constexpr bool isVector() const
One or more elements.
Definition: TypeSize.h:311
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:296
constexpr bool isScalar() const
Exactly one element.
Definition: TypeSize.h:307
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:539
unsigned getNumElements() const
Definition: DerivedTypes.h:582
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:692
bool isTargetIntrinsic() const
isTargetIntrinsic - Returns true if this function is an intrinsic and the intrinsic is specific to a ...
Definition: Function.cpp:870
The core instruction combiner logic.
Definition: InstCombiner.h:47
static InstructionCost getInvalid(CostType Val=0)
std::optional< CostType > getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
const SmallVectorImpl< Type * > & getArgTypes() const
const SmallVectorImpl< const Value * > & getArgs() const
InstructionCost getScalarizationCost() const
const IntrinsicInst * getInst() const
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:44
virtual bool shouldPrefetchAddressSpace(unsigned AS) const
virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
Return the minimum stride necessary to trigger software prefetching.
virtual bool enableWritePrefetching() const
virtual unsigned getMaxPrefetchIterationsAhead() const
Return the maximum prefetch distance in terms of loop iterations.
virtual unsigned getPrefetchDistance() const
Return the preferred prefetch distance in terms of instructions.
virtual std::optional< unsigned > getCacheAssociativity(unsigned Level) const
Return the cache associatvity for the given level of cache.
virtual std::optional< unsigned > getCacheLineSize(unsigned Level) const
Return the target cache line size in bytes at a given level.
Machine Value Type.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
The optimization diagnostic interface.
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for applied optimization remarks.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Analysis providing profile information.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
static bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
static bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
static bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
static bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
static bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
size_type size() const
Definition: SmallPtrSet.h:93
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:366
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition: Type.cpp:513
Multiway switch.
Provides information about what library functions are available for the current target.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
virtual bool isCheapToSpeculateCttz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const
Return true if the specified store with truncation is legal on this target.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool isLegalAddImmediate(int64_t) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool isProfitableToHoist(Instruction *I) const
bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
virtual bool isCheapToSpeculateCtlz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic ctlz.
virtual int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) const
Return the prefered common base offset.
LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const
Return pair that represents the legalization kind (first) that needs to happen to EVT (second) in ord...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:78
virtual std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
If the specified predicate checks whether a generic pointer falls within a specified address space,...
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
virtual unsigned getAssumedAddrSpace(const Value *V) const
If the specified generic pointer could be assumed as a pointer to a specific address space,...
TargetOptions Options
ThreadModel::Model ThreadModel
ThreadModel - This flag specifies the type of threading model to assume for things like atomics.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual bool useAA() const
Enable use of alias analysis during code generation (during MI scheduling, DAGCombine,...
const DataLayout & getDataLayout() const
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const
bool isProfitableLSRChainElement(Instruction *I) const
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I) const
std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info, TTI::OperandValueInfo Opd2Info, ArrayRef< const Value * > Args, const Instruction *CxtI=nullptr) const
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I) const
bool isLoweredToCall(const Function *F) const
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
CRTP base class for use as a mix-in that aids implementing a TargetTransformInfo-compatible class.
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind)
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
static OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
static bool requiresOrderedReduction(std::optional< FastMathFlags > FMF)
A helper function to determine the type of reduction algorithm used for a given Opcode and set of Fas...
@ TCC_Expensive
The cost of a 'div' instruction on x86.
@ TCC_Basic
The cost of a typical 'add' instruction.
MemIndexedMode
The type of load/store indexing.
@ MIM_PostInc
Post-incrementing.
@ MIM_PostDec
Post-decrementing.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
@ SK_InsertSubvector
InsertSubvector. Index indicates start offset.
@ SK_Select
Selects elements from the corresponding lane of either source operand.
@ SK_PermuteSingleSrc
Shuffle elements of single source vector with any shuffle mask.
@ SK_Transpose
Transpose two vectors.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_PermuteTwoSrc
Merge elements from two source vectors into one with any shuffle mask.
@ SK_Reverse
Reverse the order of the vector.
@ SK_ExtractSubvector
ExtractSubvector Index indicates start offset.
CastContextHint
Represents a hint about the context in which a cast is used.
@ None
The cast is not used with a load/store of any kind.
@ Normal
The cast is used with a normal load/store.
CacheLevel
The possible cache levels.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:357
bool isArch64Bit() const
Test whether the architecture is 64-bit.
Definition: Triple.cpp:1469
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, or DriverKit).
Definition: Triple.h:517
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:332
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:262
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:216
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
static bool isVPBinOp(Intrinsic::ID ID)
static std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
static std::optional< Intrinsic::ID > getFunctionalIntrinsicIDForVP(Intrinsic::ID ID)
static bool isVPIntrinsic(Intrinsic::ID)
static bool isVPReduction(Intrinsic::ID ID)
LLVM Value Representation.
Definition: Value.h:74
Base class of all SIMD vector types.
Definition: DerivedTypes.h:403
static VectorType * getHalfElementsVectorType(VectorType *VTy)
This static method returns a VectorType with half as many elements as the input type and the same ele...
Definition: DerivedTypes.h:507
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Definition: DerivedTypes.h:641
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:676
Type * getElementType() const
Definition: DerivedTypes.h:436
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:203
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
Definition: APInt.cpp:2986
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:714
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:483
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:390
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:255
@ BRIND
BRIND - Indirect branch.
Definition: ISDOpcodes.h:1050
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1054
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
Definition: ISDOpcodes.h:500
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:727
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:969
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition: ISDOpcodes.h:736
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
Definition: ISDOpcodes.h:982
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:493
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:1455
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition: MathExtras.h:417
AddressSpace
Definition: NVPTXBaseInfo.h:21
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1733
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:313
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:264
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
InstructionCost Cost
cl::opt< unsigned > PartialUnrollingThreshold
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:34
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:129
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:624
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:299
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:64
Attributes of a target dependent hardware loop.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
bool AllowPeeling
Allow peeling off loop iterations.
bool AllowLoopNestsPeeling
Allow peeling off loop iterations for loop nests.
bool PeelProfiledIterations
Allow peeling basing on profile.
unsigned PeelCount
A forced peeling factor (the number of bodied of the original loop that should be peeled off before t...
Parameters that control the generic loop unrolling transformation.
bool UpperBound
Allow using trip count upper bound to unroll loops.
unsigned PartialOptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size, like OptSizeThreshold,...
unsigned PartialThreshold
The cost threshold for the unrolled loop, like Threshold, but used for partial/runtime unrolling (set...
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...
unsigned OptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size (set to UINT_MAX to disable).