LLVM 23.0.0git
TargetTransformInfo.h
Go to the documentation of this file.
1//===- TargetTransformInfo.h ------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This pass exposes codegen information to IR-level passes. Every
10/// transformation that uses codegen information is broken into three parts:
11/// 1. The IR-level analysis pass.
12/// 2. The IR-level transformation interface which provides the needed
13/// information.
14/// 3. Codegen-level implementation which uses target-specific hooks.
15///
16/// This file defines #2, which is the interface that IR-level transformations
17/// use for querying the codegen.
18///
19//===----------------------------------------------------------------------===//
20
21#ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
22#define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
23
24#include "llvm/ADT/APInt.h"
25#include "llvm/ADT/ArrayRef.h"
27#include "llvm/ADT/Uniformity.h"
30#include "llvm/IR/FMF.h"
31#include "llvm/IR/InstrTypes.h"
32#include "llvm/IR/PassManager.h"
33#include "llvm/Pass.h"
38#include <functional>
39#include <optional>
40#include <utility>
41
42namespace llvm {
43
44namespace Intrinsic {
45typedef unsigned ID;
46}
47
48class AllocaInst;
49class AssumptionCache;
51class DominatorTree;
52class CondBrInst;
53class Function;
54class GlobalValue;
55class InstCombiner;
58class IntrinsicInst;
59class LoadInst;
60class Loop;
61class LoopInfo;
65class SCEV;
66class ScalarEvolution;
67class SmallBitVector;
68class StoreInst;
69class SwitchInst;
71class Type;
72class VPIntrinsic;
73struct KnownBits;
74
75/// Information about a load/store intrinsic defined by the target.
77 /// This is the pointer that the intrinsic is loading from or storing to.
78 /// If this is non-null, then analysis/optimization passes can assume that
79 /// this intrinsic is functionally equivalent to a load/store from this
80 /// pointer.
81 Value *PtrVal = nullptr;
82
83 // Ordering for atomic operations.
85
86 // Same Id is set by the target for corresponding load/store intrinsics.
87 unsigned short MatchingId = 0;
88
89 bool ReadMem = false;
90 bool WriteMem = false;
91 bool IsVolatile = false;
92
94
100};
101
102/// Attributes of a target dependent hardware loop.
106 Loop *L = nullptr;
109 const SCEV *ExitCount = nullptr;
111 Value *LoopDecrement = nullptr; // Decrement the loop counter by this
112 // value in every iteration.
113 bool IsNestingLegal = false; // Can a hardware loop be a parent to
114 // another hardware loop?
115 bool CounterInReg = false; // Should loop counter be updated in
116 // the loop via a phi?
117 bool PerformEntryTest = false; // Generate the intrinsic which also performs
118 // icmp ne zero on the loop counter value and
119 // produces an i1 to guard the loop entry.
121 DominatorTree &DT,
122 bool ForceNestedLoop = false,
123 bool ForceHardwareLoopPHI = false);
124 LLVM_ABI bool canAnalyze(LoopInfo &LI);
125};
126
127/// Information for memory intrinsic cost model.
129 /// Optional context instruction, if one exists, e.g. the
130 /// load/store to transform to the intrinsic.
131 const Instruction *I = nullptr;
132
133 /// Address in memory.
134 const Value *Ptr = nullptr;
135
136 /// Vector type of the data to be loaded or stored.
137 Type *DataTy = nullptr;
138
139 /// ID of the memory intrinsic.
140 Intrinsic::ID IID;
141
142 /// True when the memory access is predicated with a mask
143 /// that is not a compile-time constant.
144 bool VariableMask = true;
145
146 /// Address space of the pointer.
147 unsigned AddressSpace = 0;
148
149 /// Alignment of single element.
150 Align Alignment;
151
152public:
154 const Value *Ptr, bool VariableMask,
155 Align Alignment,
156 const Instruction *I = nullptr)
157 : I(I), Ptr(Ptr), DataTy(DataTy), IID(Id), VariableMask(VariableMask),
158 Alignment(Alignment) {}
159
161 Align Alignment,
162 unsigned AddressSpace = 0)
163 : DataTy(DataTy), IID(Id), AddressSpace(AddressSpace),
164 Alignment(Alignment) {}
165
167 bool VariableMask, Align Alignment,
168 const Instruction *I = nullptr)
169 : I(I), DataTy(DataTy), IID(Id), VariableMask(VariableMask),
170 Alignment(Alignment) {}
171
172 Intrinsic::ID getID() const { return IID; }
173 const Instruction *getInst() const { return I; }
174 const Value *getPointer() const { return Ptr; }
175 Type *getDataType() const { return DataTy; }
176 bool getVariableMask() const { return VariableMask; }
177 unsigned getAddressSpace() const { return AddressSpace; }
178 Align getAlignment() const { return Alignment; }
179};
180
182 const IntrinsicInst *II = nullptr;
183 Type *RetTy = nullptr;
184 Intrinsic::ID IID;
185 SmallVector<Type *, 4> ParamTys;
187 FastMathFlags FMF;
188 // If ScalarizationCost is UINT_MAX, the cost of scalarizing the
189 // arguments and the return value will be computed based on types.
190 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
191
192public:
194 Intrinsic::ID Id, const CallBase &CI,
196 bool TypeBasedOnly = false);
197
199 Intrinsic::ID Id, Type *RTy, ArrayRef<Type *> Tys,
200 FastMathFlags Flags = FastMathFlags(), const IntrinsicInst *I = nullptr,
202
205
209 const IntrinsicInst *I = nullptr,
211
212 Intrinsic::ID getID() const { return IID; }
213 const IntrinsicInst *getInst() const { return II; }
214 Type *getReturnType() const { return RetTy; }
215 FastMathFlags getFlags() const { return FMF; }
216 InstructionCost getScalarizationCost() const { return ScalarizationCost; }
217 const SmallVectorImpl<const Value *> &getArgs() const { return Arguments; }
218 const SmallVectorImpl<Type *> &getArgTypes() const { return ParamTys; }
219
220 bool isTypeBasedOnly() const {
221 return Arguments.empty();
222 }
223
224 bool skipScalarizationCost() const { return ScalarizationCost.isValid(); }
225};
226
228 /// Don't use tail folding
230 /// Use predicate only to mask operations on data in the loop.
231 /// When the VL is not known to be a power-of-2, this method requires a
232 /// runtime overflow check for the i + VL in the loop because it compares the
233 /// scalar induction variable against the tripcount rounded up by VL which may
234 /// overflow. When the VL is a power-of-2, both the increment and uprounded
235 /// tripcount will overflow to 0, which does not require a runtime check
236 /// since the loop is exited when the loop induction variable equals the
237 /// uprounded trip-count, which are both 0.
239 /// Same as Data, but avoids using the get.active.lane.mask intrinsic to
240 /// calculate the mask and instead implements this with a
241 /// splat/stepvector/cmp.
242 /// FIXME: Can this kind be removed now that SelectionDAGBuilder expands the
243 /// active.lane.mask intrinsic when it is not natively supported?
245 /// Use predicate to control both data and control flow.
246 /// This method always requires a runtime overflow check for the i + VL
247 /// increment inside the loop, because it uses the result direclty in the
248 /// active.lane.mask to calculate the mask for the next iteration. If the
249 /// increment overflows, the mask is no longer correct.
251 /// Use predicated EVL instructions for tail-folding.
252 /// Indicates that VP intrinsics should be used.
254};
255
264
265class TargetTransformInfo;
268
269/// This pass provides access to the codegen interfaces that are needed
270/// for IR-level transformations.
272public:
279
280 /// Get the kind of extension that an instruction represents.
283 /// Get the kind of extension that a cast opcode represents.
286 /// Get the cast opcode for an extension kind.
289
290 /// Construct a TTI object using a type implementing the \c Concept
291 /// API below.
292 ///
293 /// This is used by targets to construct a TTI wrapping their target-specific
294 /// implementation that encodes appropriate costs for their target.
296 std::unique_ptr<const TargetTransformInfoImplBase> Impl);
297
298 /// Construct a baseline TTI object using a minimal implementation of
299 /// the \c Concept API below.
300 ///
301 /// The TTI implementation will reflect the information in the DataLayout
302 /// provided if non-null.
303 LLVM_ABI explicit TargetTransformInfo(const DataLayout &DL);
304
305 // Provide move semantics.
308
309 // We need to define the destructor out-of-line to define our sub-classes
310 // out-of-line.
312
313 /// Handle the invalidation of this information.
314 ///
315 /// When used as a result of \c TargetIRAnalysis this method will be called
316 /// when the function this was computed for changes. When it returns false,
317 /// the information is preserved across those changes.
319 FunctionAnalysisManager::Invalidator &) {
320 // FIXME: We should probably in some way ensure that the subtarget
321 // information for a function hasn't changed.
322 return false;
323 }
324
325 /// \name Generic Target Information
326 /// @{
327
328 /// The kind of cost model.
329 ///
330 /// There are several different cost models that can be customized by the
331 /// target. The normalization of each cost model may be target specific.
332 /// e.g. TCK_SizeAndLatency should be comparable to target thresholds such as
333 /// those derived from MCSchedModel::LoopMicroOpBufferSize etc.
335 TCK_RecipThroughput, ///< Reciprocal throughput.
336 TCK_Latency, ///< The latency of instruction.
337 TCK_CodeSize, ///< Instruction code size.
338 TCK_SizeAndLatency ///< The weighted sum of size and latency.
339 };
340
341 /// Underlying constants for 'cost' values in this interface.
342 ///
343 /// Many APIs in this interface return a cost. This enum defines the
344 /// fundamental values that should be used to interpret (and produce) those
345 /// costs. The costs are returned as an int rather than a member of this
346 /// enumeration because it is expected that the cost of one IR instruction
347 /// may have a multiplicative factor to it or otherwise won't fit directly
348 /// into the enum. Moreover, it is common to sum or average costs which works
349 /// better as simple integral values. Thus this enum only provides constants.
350 /// Also note that the returned costs are signed integers to make it natural
351 /// to add, subtract, and test with zero (a common boundary condition). It is
352 /// not expected that 2^32 is a realistic cost to be modeling at any point.
353 ///
354 /// Note that these costs should usually reflect the intersection of code-size
355 /// cost and execution cost. A free instruction is typically one that folds
356 /// into another instruction. For example, reg-to-reg moves can often be
357 /// skipped by renaming the registers in the CPU, but they still are encoded
358 /// and thus wouldn't be considered 'free' here.
360 TCC_Free = 0, ///< Expected to fold away in lowering.
361 TCC_Basic = 1, ///< The cost of a typical 'add' instruction.
362 TCC_Expensive = 4 ///< The cost of a 'div' instruction on x86.
363 };
364
365 /// Estimate the cost of a GEP operation when lowered.
366 ///
367 /// \p PointeeType is the source element type of the GEP.
368 /// \p Ptr is the base pointer operand.
369 /// \p Operands is the list of indices following the base pointer.
370 ///
371 /// \p AccessType is a hint as to what type of memory might be accessed by
372 /// users of the GEP. getGEPCost will use it to determine if the GEP can be
373 /// folded into the addressing mode of a load/store. If AccessType is null,
374 /// then the resulting target type based off of PointeeType will be used as an
375 /// approximation.
377 getGEPCost(Type *PointeeType, const Value *Ptr,
378 ArrayRef<const Value *> Operands, Type *AccessType = nullptr,
379 TargetCostKind CostKind = TCK_SizeAndLatency) const;
380
381 /// Describe known properties for a set of pointers.
383 /// All the GEPs in a set have same base address.
384 unsigned IsSameBaseAddress : 1;
385 /// These properties only valid if SameBaseAddress is set.
386 /// True if all pointers are separated by a unit stride.
387 unsigned IsUnitStride : 1;
388 /// True if distance between any two neigbouring pointers is a known value.
389 unsigned IsKnownStride : 1;
390 unsigned Reserved : 29;
391
392 bool isSameBase() const { return IsSameBaseAddress; }
393 bool isUnitStride() const { return IsSameBaseAddress && IsUnitStride; }
395
397 return {/*IsSameBaseAddress=*/1, /*IsUnitStride=*/1,
398 /*IsKnownStride=*/1, 0};
399 }
401 return {/*IsSameBaseAddress=*/1, /*IsUnitStride=*/0,
402 /*IsKnownStride=*/1, 0};
403 }
405 return {/*IsSameBaseAddress=*/1, /*IsUnitStride=*/0,
406 /*IsKnownStride=*/0, 0};
407 }
408 };
409 static_assert(sizeof(PointersChainInfo) == 4, "Was size increase justified?");
410
411 /// Estimate the cost of a chain of pointers (typically pointer operands of a
412 /// chain of loads or stores within same block) operations set when lowered.
413 /// \p AccessTy is the type of the loads/stores that will ultimately use the
414 /// \p Ptrs.
417 const PointersChainInfo &Info, Type *AccessTy,
418 TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
419
420 /// \returns A value by which our inlining threshold should be multiplied.
421 /// This is primarily used to bump up the inlining threshold wholesale on
422 /// targets where calls are unusually expensive.
423 ///
424 /// TODO: This is a rather blunt instrument. Perhaps altering the costs of
425 /// individual classes of instructions would be better.
427
430
431 /// \returns The bonus of inlining the last call to a static function.
433
434 /// \returns A value to be added to the inlining threshold.
435 LLVM_ABI unsigned adjustInliningThreshold(const CallBase *CB) const;
436
437 /// \returns The cost of having an Alloca in the caller if not inlined, to be
438 /// added to the threshold
439 LLVM_ABI unsigned getCallerAllocaCost(const CallBase *CB,
440 const AllocaInst *AI) const;
441
442 /// \returns Vector bonus in percent.
443 ///
444 /// Vector bonuses: We want to more aggressively inline vector-dense kernels
445 /// and apply this bonus based on the percentage of vector instructions. A
446 /// bonus is applied if the vector instructions exceed 50% and half that
447 /// amount is applied if it exceeds 10%. Note that these bonuses are some what
448 /// arbitrary and evolved over time by accident as much as because they are
449 /// principled bonuses.
450 /// FIXME: It would be nice to base the bonus values on something more
451 /// scientific. A target may has no bonus on vector instructions.
453
454 /// \return the expected cost of a memcpy, which could e.g. depend on the
455 /// source/destination type and alignment and the number of bytes copied.
457
458 /// Returns the maximum memset / memcpy size in bytes that still makes it
459 /// profitable to inline the call.
461
462 /// \return The estimated number of case clusters when lowering \p 'SI'.
463 /// \p JTSize Set a jump table size only when \p SI is suitable for a jump
464 /// table.
465 LLVM_ABI unsigned
466 getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JTSize,
468 BlockFrequencyInfo *BFI) const;
469
470 /// Estimate the cost of a given IR user when lowered.
471 ///
472 /// This can estimate the cost of either a ConstantExpr or Instruction when
473 /// lowered.
474 ///
475 /// \p Operands is a list of operands which can be a result of transformations
476 /// of the current operands. The number of the operands on the list must equal
477 /// to the number of the current operands the IR user has. Their order on the
478 /// list must be the same as the order of the current operands the IR user
479 /// has.
480 ///
481 /// The returned cost is defined in terms of \c TargetCostConstants, see its
482 /// comments for a detailed explanation of the cost values.
485 TargetCostKind CostKind) const;
486
487 /// This is a helper function which calls the three-argument
488 /// getInstructionCost with \p Operands which are the current operands U has.
490 TargetCostKind CostKind) const {
491 SmallVector<const Value *, 4> Operands(U->operand_values());
492 return getInstructionCost(U, Operands, CostKind);
493 }
494
495 /// If a branch or a select condition is skewed in one direction by more than
496 /// this factor, it is very likely to be predicted correctly.
498
499 /// Returns estimated penalty of a branch misprediction in latency. Indicates
500 /// how aggressive the target wants for eliminating unpredictable branches. A
501 /// zero return value means extra optimization applied to them should be
502 /// minimal.
504
505 /// Return true if branch divergence exists.
506 ///
507 /// Branch divergence has a significantly negative impact on GPU performance
508 /// when threads in the same wavefront take different paths due to conditional
509 /// branches.
510 ///
511 /// If \p F is passed, provides a context function. If \p F is known to only
512 /// execute in a single threaded environment, the target may choose to skip
513 /// uniformity analysis and assume all values are uniform.
514 LLVM_ABI bool hasBranchDivergence(const Function *F = nullptr) const;
515
516 /// Get target-specific uniformity information for a value.
517 /// This allows targets to provide more fine-grained control over
518 /// uniformity analysis by specifying whether specific values
519 /// should always or never be considered uniform, or require custom
520 /// operand-based analysis.
521 /// \param V The value to query for uniformity information.
522 /// \return ValueUniformity.
524
525 /// Query the target whether the specified address space cast from FromAS to
526 /// ToAS is valid.
527 LLVM_ABI bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const;
528
529 /// Return false if a \p AS0 address cannot possibly alias a \p AS1 address.
530 LLVM_ABI bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const;
531
532 /// Returns the address space ID for a target's 'flat' address space. Note
533 /// this is not necessarily the same as addrspace(0), which LLVM sometimes
534 /// refers to as the generic address space. The flat address space is a
535 /// generic address space that can be used access multiple segments of memory
536 /// with different address spaces. Access of a memory location through a
537 /// pointer with this address space is expected to be legal but slower
538 /// compared to the same memory location accessed through a pointer with a
539 /// different address space.
540 //
541 /// This is for targets with different pointer representations which can
542 /// be converted with the addrspacecast instruction. If a pointer is converted
543 /// to this address space, optimizations should attempt to replace the access
544 /// with the source address space.
545 ///
546 /// \returns ~0u if the target does not have such a flat address space to
547 /// optimize away.
548 LLVM_ABI unsigned getFlatAddressSpace() const;
549
550 /// Return any intrinsic address operand indexes which may be rewritten if
551 /// they use a flat address space pointer.
552 ///
553 /// \returns true if the intrinsic was handled.
555 Intrinsic::ID IID) const;
556
557 LLVM_ABI bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const;
558
559 // Given an address space cast of the given pointer value, calculate the known
560 // bits of the source pointer in the source addrspace and the destination
561 // pointer in the destination addrspace.
562 LLVM_ABI std::pair<KnownBits, KnownBits>
563 computeKnownBitsAddrSpaceCast(unsigned ToAS, const Value &PtrOp) const;
564
565 // Given an address space cast, calculate the known bits of the resulting ptr
566 // in the destination addrspace using the known bits of the source pointer in
567 // the source addrspace.
569 unsigned FromAS, unsigned ToAS, const KnownBits &FromPtrBits) const;
570
571 /// Returns a mask indicating which bits of a pointer remain unchanged when
572 /// casting between address spaces. The returned APInt has the same bit width
573 /// as the source address space pointer size.
574 ///
575 /// Some targets allow certain bits of a pointer to change (e.g., the low
576 /// bits within a page) while still preserving the address space. This mask
577 /// identifies those bits that are guaranteed to be preserved. If the mask is
578 /// all zeros, no bits are preserved and address space inference cannot be
579 /// performed safely.
580 ///
581 /// For example, given:
582 /// %gp = addrspacecast ptr addrspace(2) %sp to ptr
583 /// %a = ptrtoint ptr %gp to i64
584 /// %b = xor i64 7, %a
585 /// %gp2 = inttoptr i64 %b to ptr
586 /// store i16 0, ptr %gp2, align 2
587 /// if the target preserves the upper bits, `%gp2` can be safely replaced
588 /// with `inttoptr i64 %b to ptr addrspace(2)`.
590 unsigned DstAS) const;
591
592 /// Return true if globals in this address space can have initializers other
593 /// than `undef`.
594 LLVM_ABI bool
596
597 LLVM_ABI unsigned getAssumedAddrSpace(const Value *V) const;
598
599 LLVM_ABI bool isSingleThreaded() const;
600
601 LLVM_ABI std::pair<const Value *, unsigned>
602 getPredicatedAddrSpace(const Value *V) const;
603
604 /// Rewrite intrinsic call \p II such that \p OldV will be replaced with \p
605 /// NewV, which has a different address space. This should happen for every
606 /// operand index that collectFlatAddressOperands returned for the intrinsic.
607 /// \returns nullptr if the intrinsic was not handled. Otherwise, returns the
608 /// new value (which may be the original \p II with modified operands).
610 Value *OldV,
611 Value *NewV) const;
612
613 /// Test whether calls to a function lower to actual program function
614 /// calls.
615 ///
616 /// The idea is to test whether the program is likely to require a 'call'
617 /// instruction or equivalent in order to call the given function.
618 ///
619 /// FIXME: It's not clear that this is a good or useful query API. Client's
620 /// should probably move to simpler cost metrics using the above.
621 /// Alternatively, we could split the cost interface into distinct code-size
622 /// and execution-speed costs. This would allow modelling the core of this
623 /// query more accurately as a call is a single small instruction, but
624 /// incurs significant execution cost.
625 LLVM_ABI bool isLoweredToCall(const Function *F) const;
626
627 struct LSRCost {
628 /// TODO: Some of these could be merged. Also, a lexical ordering
629 /// isn't always optimal.
630 unsigned Insns;
631 unsigned NumRegs;
632 unsigned AddRecCost;
633 unsigned NumIVMuls;
634 unsigned NumBaseAdds;
635 unsigned ImmCost;
636 unsigned SetupCost;
637 unsigned ScaleCost;
638 };
639
640 /// Parameters that control the generic loop unrolling transformation.
642 /// The cost threshold for the unrolled loop. Should be relative to the
643 /// getInstructionCost values returned by this API, and the expectation is
644 /// that the unrolled loop's instructions when run through that interface
645 /// should not exceed this cost. However, this is only an estimate. Also,
646 /// specific loops may be unrolled even with a cost above this threshold if
647 /// deemed profitable. Set this to UINT_MAX to disable the loop body cost
648 /// restriction.
649 unsigned Threshold;
650 /// If complete unrolling will reduce the cost of the loop, we will boost
651 /// the Threshold by a certain percent to allow more aggressive complete
652 /// unrolling. This value provides the maximum boost percentage that we
653 /// can apply to Threshold (The value should be no less than 100).
654 /// BoostedThreshold = Threshold * min(RolledCost / UnrolledCost,
655 /// MaxPercentThresholdBoost / 100)
656 /// E.g. if complete unrolling reduces the loop execution time by 50%
657 /// then we boost the threshold by the factor of 2x. If unrolling is not
658 /// expected to reduce the running time, then we do not increase the
659 /// threshold.
661 /// The cost threshold for the unrolled loop when optimizing for size (set
662 /// to UINT_MAX to disable).
664 /// The cost threshold for the unrolled loop, like Threshold, but used
665 /// for partial/runtime unrolling (set to UINT_MAX to disable).
667 /// The cost threshold for the unrolled loop when optimizing for size, like
668 /// OptSizeThreshold, but used for partial/runtime unrolling (set to
669 /// UINT_MAX to disable).
671 /// A forced unrolling factor (the number of concatenated bodies of the
672 /// original loop in the unrolled loop body). When set to 0, the unrolling
673 /// transformation will select an unrolling factor based on the current cost
674 /// threshold and other factors.
675 unsigned Count;
676 /// Default unroll count for loops with run-time trip count.
678 // Set the maximum unrolling factor. The unrolling factor may be selected
679 // using the appropriate cost threshold, but may not exceed this number
680 // (set to UINT_MAX to disable). This does not apply in cases where the
681 // loop is being fully unrolled.
682 unsigned MaxCount;
683 /// Set the maximum upper bound of trip count. Allowing the MaxUpperBound
684 /// to be overrided by a target gives more flexiblity on certain cases.
685 /// By default, MaxUpperBound uses UnrollMaxUpperBound which value is 8.
687 /// Set the maximum unrolling factor for full unrolling. Like MaxCount, but
688 /// applies even if full unrolling is selected. This allows a target to fall
689 /// back to Partial unrolling if full unrolling is above FullUnrollMaxCount.
691 // Represents number of instructions optimized when "back edge"
692 // becomes "fall through" in unrolled loop.
693 // For now we count a conditional branch on a backedge and a comparison
694 // feeding it.
695 unsigned BEInsns;
696 /// Allow partial unrolling (unrolling of loops to expand the size of the
697 /// loop body, not only to eliminate small constant-trip-count loops).
699 /// Allow runtime unrolling (unrolling of loops to expand the size of the
700 /// loop body even when the number of loop iterations is not known at
701 /// compile time).
703 /// Allow generation of a loop remainder (extra iterations after unroll).
705 /// Allow emitting expensive instructions (such as divisions) when computing
706 /// the trip count of a loop for runtime unrolling.
708 /// Apply loop unroll on any kind of loop
709 /// (mainly to loops that fail runtime unrolling).
710 bool Force;
711 /// Allow using trip count upper bound to unroll loops.
713 /// Allow unrolling of all the iterations of the runtime loop remainder.
715 /// Allow unroll and jam. Used to enable unroll and jam for the target.
717 /// Threshold for unroll and jam, for inner loop size. The 'Threshold'
718 /// value above is used during unroll and jam for the outer loop size.
719 /// This value is used in the same manner to limit the size of the inner
720 /// loop.
722 /// Don't allow loop unrolling to simulate more than this number of
723 /// iterations when checking full unroll profitability
725 /// Disable runtime unrolling by default for vectorized loops.
727 /// Don't allow runtime unrolling if expanding the trip count takes more
728 /// than SCEVExpansionBudget.
730 /// Allow runtime unrolling multi-exit loops. Should only be set if the
731 /// target determined that multi-exit unrolling is profitable for the loop.
732 /// Fall back to the generic logic to determine whether multi-exit unrolling
733 /// is profitable if set to false.
735 /// Allow unrolling to add parallel reduction phis.
737 };
738
739 /// Get target-customized preferences for the generic loop unrolling
740 /// transformation. The caller will initialize UP with the current
741 /// target-independent defaults.
744 OptimizationRemarkEmitter *ORE) const;
745
746 /// Query the target whether it would be profitable to convert the given loop
747 /// into a hardware loop.
749 AssumptionCache &AC,
750 TargetLibraryInfo *LibInfo,
751 HardwareLoopInfo &HWLoopInfo) const;
752
753 // Query the target for which minimum vectorization factor epilogue
754 // vectorization should be considered.
756
757 /// Query the target whether it would be prefered to create a predicated
758 /// vector loop, which can avoid the need to emit a scalar epilogue loop.
760
761 /// Query the target what the preferred style of tail folding is.
763
764 // Parameters that control the loop peeling transformation
766 /// A forced peeling factor (the number of bodied of the original loop
767 /// that should be peeled off before the loop body). When set to 0, the
768 /// a peeling factor based on profile information and other factors.
769 unsigned PeelCount;
770 /// Allow peeling off loop iterations.
772 /// Allow peeling off loop iterations for loop nests.
774 /// Allow peeling basing on profile. Uses to enable peeling off all
775 /// iterations basing on provided profile.
776 /// If the value is true the peeling cost model can decide to peel only
777 /// some iterations and in this case it will set this to false.
779
780 /// Peel off the last PeelCount loop iterations.
782 };
783
784 /// Get target-customized preferences for the generic loop peeling
785 /// transformation. The caller will initialize \p PP with the current
786 /// target-independent defaults with information from \p L and \p SE.
788 PeelingPreferences &PP) const;
789
790 /// Targets can implement their own combinations for target-specific
791 /// intrinsics. This function will be called from the InstCombine pass every
792 /// time a target-specific intrinsic is encountered.
793 ///
794 /// \returns std::nullopt to not do anything target specific or a value that
795 /// will be returned from the InstCombiner. It is possible to return null and
796 /// stop further processing of the intrinsic by returning nullptr.
797 LLVM_ABI std::optional<Instruction *>
799 /// Can be used to implement target-specific instruction combining.
800 /// \see instCombineIntrinsic
801 LLVM_ABI std::optional<Value *>
803 APInt DemandedMask, KnownBits &Known,
804 bool &KnownBitsComputed) const;
805 /// Can be used to implement target-specific instruction combining.
806 /// \see instCombineIntrinsic
807 LLVM_ABI std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
808 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
809 APInt &UndefElts2, APInt &UndefElts3,
810 std::function<void(Instruction *, unsigned, APInt, APInt &)>
811 SimplifyAndSetOp) const;
812 /// @}
813
814 /// \name Scalar Target Information
815 /// @{
816
817 /// Flags indicating the kind of support for population count.
818 ///
819 /// Compared to the SW implementation, HW support is supposed to
820 /// significantly boost the performance when the population is dense, and it
821 /// may or may not degrade performance if the population is sparse. A HW
822 /// support is considered as "Fast" if it can outperform, or is on a par
823 /// with, SW implementation when the population is sparse; otherwise, it is
824 /// considered as "Slow".
826
827 /// Return true if the specified immediate is legal add immediate, that
828 /// is the target has add instructions which can add a register with the
829 /// immediate without having to materialize the immediate into a register.
830 LLVM_ABI bool isLegalAddImmediate(int64_t Imm) const;
831
832 /// Return true if adding the specified scalable immediate is legal, that is
833 /// the target has add instructions which can add a register with the
834 /// immediate (multiplied by vscale) without having to materialize the
835 /// immediate into a register.
836 LLVM_ABI bool isLegalAddScalableImmediate(int64_t Imm) const;
837
838 /// Return true if the specified immediate is legal icmp immediate,
839 /// that is the target has icmp instructions which can compare a register
840 /// against the immediate without having to materialize the immediate into a
841 /// register.
842 LLVM_ABI bool isLegalICmpImmediate(int64_t Imm) const;
843
844 /// Return true if the addressing mode represented by AM is legal for
845 /// this target, for a load/store of the specified type.
846 /// The type may be VoidTy, in which case only return true if the addressing
847 /// mode is legal for a load/store of any legal type.
848 /// If target returns true in LSRWithInstrQueries(), I may be valid.
849 /// \param ScalableOffset represents a quantity of bytes multiplied by vscale,
850 /// an invariant value known only at runtime. Most targets should not accept
851 /// a scalable offset.
852 ///
853 /// TODO: Handle pre/postinc as well.
855 int64_t BaseOffset, bool HasBaseReg,
856 int64_t Scale, unsigned AddrSpace = 0,
857 Instruction *I = nullptr,
858 int64_t ScalableOffset = 0) const;
859
860 /// Return true if LSR cost of C1 is lower than C2.
862 const TargetTransformInfo::LSRCost &C2) const;
863
864 /// Return true if LSR major cost is number of registers. Targets which
865 /// implement their own isLSRCostLess and unset number of registers as major
866 /// cost should return false, otherwise return true.
868
869 /// Return true if LSR should drop a found solution if it's calculated to be
870 /// less profitable than the baseline.
872
873 /// \returns true if LSR should not optimize a chain that includes \p I.
875
876 /// Return true if the target can fuse a compare and branch.
877 /// Loop-strength-reduction (LSR) uses that knowledge to adjust its cost
878 /// calculation for the instructions in a loop.
879 LLVM_ABI bool canMacroFuseCmp() const;
880
881 /// Return true if the target can save a compare for loop count, for example
882 /// hardware loop saves a compare.
885 TargetLibraryInfo *LibInfo) const;
886
887 /// Which addressing mode Loop Strength Reduction will try to generate.
889 AMK_None = 0x0, ///< Don't prefer any addressing mode
890 AMK_PreIndexed = 0x1, ///< Prefer pre-indexed addressing mode
891 AMK_PostIndexed = 0x2, ///< Prefer post-indexed addressing mode
892 AMK_All = 0x3, ///< Consider all addressing modes
893 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/AMK_All)
894 };
895
896 /// Return the preferred addressing mode LSR should make efforts to generate.
899
900 /// Some targets only support masked load/store with a constant mask.
905
906 /// Return true if the target supports masked store.
907 LLVM_ABI bool
908 isLegalMaskedStore(Type *DataType, Align Alignment, unsigned AddressSpace,
910 /// Return true if the target supports masked load.
911 LLVM_ABI bool
912 isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddressSpace,
914
915 /// Return true if the target supports nontemporal store.
916 LLVM_ABI bool isLegalNTStore(Type *DataType, Align Alignment) const;
917 /// Return true if the target supports nontemporal load.
918 LLVM_ABI bool isLegalNTLoad(Type *DataType, Align Alignment) const;
919
920 /// \Returns true if the target supports broadcasting a load to a vector of
921 /// type <NumElements x ElementTy>.
922 LLVM_ABI bool isLegalBroadcastLoad(Type *ElementTy,
923 ElementCount NumElements) const;
924
925 /// Return true if the target supports masked scatter.
926 LLVM_ABI bool isLegalMaskedScatter(Type *DataType, Align Alignment) const;
927 /// Return true if the target supports masked gather.
928 LLVM_ABI bool isLegalMaskedGather(Type *DataType, Align Alignment) const;
929 /// Return true if the target forces scalarizing of llvm.masked.gather
930 /// intrinsics.
932 Align Alignment) const;
933 /// Return true if the target forces scalarizing of llvm.masked.scatter
934 /// intrinsics.
936 Align Alignment) const;
937
938 /// Return true if the target supports masked compress store.
940 Align Alignment) const;
941 /// Return true if the target supports masked expand load.
942 LLVM_ABI bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const;
943
944 /// Return true if the target supports strided load.
945 LLVM_ABI bool isLegalStridedLoadStore(Type *DataType, Align Alignment) const;
946
947 /// Return true is the target supports interleaved access for the given vector
948 /// type \p VTy, interleave factor \p Factor, alignment \p Alignment and
949 /// address space \p AddrSpace.
950 LLVM_ABI bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor,
951 Align Alignment,
952 unsigned AddrSpace) const;
953
954 // Return true if the target supports masked vector histograms.
956 Type *DataType) const;
957
958 /// Return true if this is an alternating opcode pattern that can be lowered
959 /// to a single instruction on the target. In X86 this is for the addsub
960 /// instruction which corrsponds to a Shuffle + Fadd + FSub pattern in IR.
961 /// This function expectes two opcodes: \p Opcode1 and \p Opcode2 being
962 /// selected by \p OpcodeMask. The mask contains one bit per lane and is a `0`
963 /// when \p Opcode0 is selected and `1` when Opcode1 is selected.
964 /// \p VecTy is the vector type of the instruction to be generated.
965 LLVM_ABI bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0,
966 unsigned Opcode1,
967 const SmallBitVector &OpcodeMask) const;
968
969 /// Return true if we should be enabling ordered reductions for the target.
971
972 /// Return true if the target has a unified operation to calculate division
973 /// and remainder. If so, the additional implicit multiplication and
974 /// subtraction required to calculate a remainder from division are free. This
975 /// can enable more aggressive transformations for division and remainder than
976 /// would typically be allowed using throughput or size cost models.
977 LLVM_ABI bool hasDivRemOp(Type *DataType, bool IsSigned) const;
978
979 /// Return true if the given instruction (assumed to be a memory access
980 /// instruction) has a volatile variant. If that's the case then we can avoid
981 /// addrspacecast to generic AS for volatile loads/stores. Default
982 /// implementation returns false, which prevents address space inference for
983 /// volatile loads/stores.
984 LLVM_ABI bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const;
985
986 /// Return true if target doesn't mind addresses in vectors.
988
989 /// Return the cost of the scaling factor used in the addressing
990 /// mode represented by AM for this target, for a load/store
991 /// of the specified type.
992 /// If the AM is supported, the return value must be >= 0.
993 /// If the AM is not supported, it returns a negative value.
994 /// TODO: Handle pre/postinc as well.
996 StackOffset BaseOffset,
997 bool HasBaseReg, int64_t Scale,
998 unsigned AddrSpace = 0) const;
999
1000 /// Return true if the loop strength reduce pass should make
1001 /// Instruction* based TTI queries to isLegalAddressingMode(). This is
1002 /// needed on SystemZ, where e.g. a memcpy can only have a 12 bit unsigned
1003 /// immediate offset and no index register.
1004 LLVM_ABI bool LSRWithInstrQueries() const;
1005
1006 /// Return true if it's free to truncate a value of type Ty1 to type
1007 /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
1008 /// by referencing its sub-register AX.
1009 LLVM_ABI bool isTruncateFree(Type *Ty1, Type *Ty2) const;
1010
1011 /// Return true if it is profitable to hoist instruction in the
1012 /// then/else to before if.
1014
1015 LLVM_ABI bool useAA() const;
1016
1017 /// Return true if this type is legal.
1018 LLVM_ABI bool isTypeLegal(Type *Ty) const;
1019
1020 /// Returns the estimated number of registers required to represent \p Ty.
1021 LLVM_ABI unsigned getRegUsageForType(Type *Ty) const;
1022
1023 /// Return true if switches should be turned into lookup tables for the
1024 /// target.
1025 LLVM_ABI bool shouldBuildLookupTables() const;
1026
1027 /// Return true if switches should be turned into lookup tables
1028 /// containing this constant value for the target.
1030
1031 /// Return true if lookup tables should be turned into relative lookup tables.
1033
1034 /// Return true if the input function which is cold at all call sites,
1035 /// should use coldcc calling convention.
1037
1038 /// Return true if the input function is internal, should use fastcc calling
1039 /// convention.
1041
1043
1044 /// Identifies if the vector form of the intrinsic has a scalar operand.
1046 unsigned ScalarOpdIdx) const;
1047
1048 /// Identifies if the vector form of the intrinsic is overloaded on the type
1049 /// of the operand at index \p OpdIdx, or on the return type if \p OpdIdx is
1050 /// -1.
1052 int OpdIdx) const;
1053
1054 /// Identifies if the vector form of the intrinsic that returns a struct is
1055 /// overloaded at the struct element index \p RetIdx.
1056 LLVM_ABI bool
1058 int RetIdx) const;
1059
1060 /// Represents a hint about the context in which an insert/extract is used.
1061 ///
1062 /// On some targets, inserts/extracts can cheaply be folded into loads/stores.
1063 ///
1064 /// This enum allows the vectorizer to give getVectorInstrCost an idea of how
1065 /// inserts/extracts are used
1066 ///
1067 /// See \c getVectorInstrContextHint to compute a VectorInstrContext from an
1068 /// insert/extract Instruction*.
1070 None, ///< The insert/extract is not used with a load/store.
1071 Load, ///< The value being inserted comes from a load (InsertElement only).
1072 Store, ///< The extracted value is stored (ExtractElement only).
1073 };
1074
1075 /// Calculates a VectorInstrContext from \p I.
1077
1078 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
1079 /// are set if the demanded result elements need to be inserted and/or
1080 /// extracted from vectors. The involved values may be passed in VL if
1081 /// Insert is true.
1083 VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
1084 TTI::TargetCostKind CostKind, bool ForPoisonSrc = true,
1085 ArrayRef<Value *> VL = {},
1087
1088 /// Estimate the overhead of scalarizing operands with the given types. The
1089 /// (potentially vector) types to use for each of argument are passes via Tys.
1093
1094 /// If target has efficient vector element load/store instructions, it can
1095 /// return true here so that insertion/extraction costs are not added to
1096 /// the scalarization cost of a load/store.
1098
1099 /// If the target supports tail calls.
1100 LLVM_ABI bool supportsTailCalls() const;
1101
1102 /// If target supports tail call on \p CB
1103 LLVM_ABI bool supportsTailCallFor(const CallBase *CB) const;
1104
1105 /// Don't restrict interleaved unrolling to small loops.
1106 LLVM_ABI bool enableAggressiveInterleaving(bool LoopHasReductions) const;
1107
1108 /// Returns options for expansion of memcmp. IsZeroCmp is
1109 // true if this is the expansion of memcmp(p1, p2, s) == 0.
1111 // Return true if memcmp expansion is enabled.
1112 operator bool() const { return MaxNumLoads > 0; }
1113
1114 // Maximum number of load operations.
1115 unsigned MaxNumLoads = 0;
1116
1117 // The list of available load sizes (in bytes), sorted in decreasing order.
1119
1120 // For memcmp expansion when the memcmp result is only compared equal or
1121 // not-equal to 0, allow up to this number of load pairs per block. As an
1122 // example, this may allow 'memcmp(a, b, 3) == 0' in a single block:
1123 // a0 = load2bytes &a[0]
1124 // b0 = load2bytes &b[0]
1125 // a2 = load1byte &a[2]
1126 // b2 = load1byte &b[2]
1127 // r = cmp eq (a0 ^ b0 | a2 ^ b2), 0
1128 unsigned NumLoadsPerBlock = 1;
1129
1130 // Set to true to allow overlapping loads. For example, 7-byte compares can
1131 // be done with two 4-byte compares instead of 4+2+1-byte compares. This
1132 // requires all loads in LoadSizes to be doable in an unaligned way.
1134
1135 // Sometimes, the amount of data that needs to be compared is smaller than
1136 // the standard register size, but it cannot be loaded with just one load
1137 // instruction. For example, if the size of the memory comparison is 6
1138 // bytes, we can handle it more efficiently by loading all 6 bytes in a
1139 // single block and generating an 8-byte number, instead of generating two
1140 // separate blocks with conditional jumps for 4 and 2 byte loads. This
1141 // approach simplifies the process and produces the comparison result as
1142 // normal. This array lists the allowed sizes of memcmp tails that can be
1143 // merged into one block
1145 };
1147 bool IsZeroCmp) const;
1148
1149 /// Should the Select Optimization pass be enabled and ran.
1150 LLVM_ABI bool enableSelectOptimize() const;
1151
1152 /// Should the Select Optimization pass treat the given instruction like a
1153 /// select, potentially converting it to a conditional branch. This can
1154 /// include select-like instructions like or(zext(c), x) that can be converted
1155 /// to selects.
1157
1158 /// Enable matching of interleaved access groups.
1160
1161 /// Enable matching of interleaved access groups that contain predicated
1162 /// accesses or gaps and therefore vectorized using masked
1163 /// vector loads/stores.
1165
1166 /// Indicate that it is potentially unsafe to automatically vectorize
1167 /// floating-point operations because the semantics of vector and scalar
1168 /// floating-point semantics may differ. For example, ARM NEON v7 SIMD math
1169 /// does not support IEEE-754 denormal numbers, while depending on the
1170 /// platform, scalar floating-point math does.
1171 /// This applies to floating-point math operations and calls, not memory
1172 /// operations, shuffles, or casts.
1174
1175 /// Determine if the target supports unaligned memory accesses.
1177 unsigned BitWidth,
1178 unsigned AddressSpace = 0,
1179 Align Alignment = Align(1),
1180 unsigned *Fast = nullptr) const;
1181
1182 /// Return hardware support for population count.
1183 LLVM_ABI PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const;
1184
1185 /// Return true if the hardware has a fast square-root instruction.
1186 LLVM_ABI bool haveFastSqrt(Type *Ty) const;
1187
1188 /// Return true if the cost of the instruction is too high to speculatively
1189 /// execute and should be kept behind a branch.
1190 /// This normally just wraps around a getInstructionCost() call, but some
1191 /// targets might report a low TCK_SizeAndLatency value that is incompatible
1192 /// with the fixed TCC_Expensive value.
1193 /// NOTE: This assumes the instruction passes isSafeToSpeculativelyExecute().
1195
1196 /// Return true if it is faster to check if a floating-point value is NaN
1197 /// (or not-NaN) versus a comparison against a constant FP zero value.
1198 /// Targets should override this if materializing a 0.0 for comparison is
1199 /// generally as cheap as checking for ordered/unordered.
1201
1202 /// Return the expected cost of supporting the floating point operation
1203 /// of the specified type.
1205
1206 /// Return the expected cost of materializing for the given integer
1207 /// immediate of the specified type.
1209 TargetCostKind CostKind) const;
1210
1211 /// Return the expected cost of materialization for the given integer
1212 /// immediate of the specified type for a given instruction. The cost can be
1213 /// zero if the immediate can be folded into the specified instruction.
1214 LLVM_ABI InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx,
1215 const APInt &Imm, Type *Ty,
1217 Instruction *Inst = nullptr) const;
1219 const APInt &Imm, Type *Ty,
1220 TargetCostKind CostKind) const;
1221
1222 /// Return the expected cost for the given integer when optimising
1223 /// for size. This is different than the other integer immediate cost
1224 /// functions in that it is subtarget agnostic. This is useful when you e.g.
1225 /// target one ISA such as Aarch32 but smaller encodings could be possible
1226 /// with another such as Thumb. This return value is used as a penalty when
1227 /// the total costs for a constant is calculated (the bigger the cost, the
1228 /// more beneficial constant hoisting is).
1229 LLVM_ABI InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx,
1230 const APInt &Imm,
1231 Type *Ty) const;
1232
1233 /// It can be advantageous to detach complex constants from their uses to make
1234 /// their generation cheaper. This hook allows targets to report when such
1235 /// transformations might negatively effect the code generation of the
1236 /// underlying operation. The motivating example is divides whereby hoisting
1237 /// constants prevents the code generator's ability to transform them into
1238 /// combinations of simpler operations.
1240 const Function &Fn) const;
1241
1242 /// @}
1243
1244 /// \name Vector Target Information
1245 /// @{
1246
1247 /// The various kinds of shuffle patterns for vector queries.
1249 SK_Broadcast, ///< Broadcast element 0 to all other elements.
1250 SK_Reverse, ///< Reverse the order of the vector.
1251 SK_Select, ///< Selects elements from the corresponding lane of
1252 ///< either source operand. This is equivalent to a
1253 ///< vector select with a constant condition operand.
1254 SK_Transpose, ///< Transpose two vectors.
1255 SK_InsertSubvector, ///< InsertSubvector. Index indicates start offset.
1256 SK_ExtractSubvector, ///< ExtractSubvector Index indicates start offset.
1257 SK_PermuteTwoSrc, ///< Merge elements from two source vectors into one
1258 ///< with any shuffle mask.
1259 SK_PermuteSingleSrc, ///< Shuffle elements of single source vector with any
1260 ///< shuffle mask.
1261 SK_Splice ///< Concatenates elements from the first input vector
1262 ///< with elements of the second input vector. Returning
1263 ///< a vector of the same type as the input vectors.
1264 ///< Index indicates start offset in first input vector.
1265 };
1266
1267 /// Additional information about an operand's possible values.
1269 OK_AnyValue, // Operand can have any value.
1270 OK_UniformValue, // Operand is uniform (splat of a value).
1271 OK_UniformConstantValue, // Operand is uniform constant.
1272 OK_NonUniformConstantValue // Operand is a non uniform constant value.
1273 };
1274
1275 /// Additional properties of an operand's values.
1281
1282 // Describe the values an operand can take. We're in the process
1283 // of migrating uses of OperandValueKind and OperandValueProperties
1284 // to use this class, and then will change the internal representation.
1288
1289 bool isConstant() const {
1291 }
1292 bool isUniform() const {
1294 }
1295 bool isPowerOf2() const {
1296 return Properties == OP_PowerOf2;
1297 }
1298 bool isNegatedPowerOf2() const {
1300 }
1301
1303 return {Kind, OP_None};
1304 }
1305
1307 OperandValueKind MergeKind = OK_AnyValue;
1308 if (isConstant() && OpInfoY.isConstant())
1309 MergeKind = OK_NonUniformConstantValue;
1310
1311 OperandValueProperties MergeProp = OP_None;
1312 if (Properties == OpInfoY.Properties)
1313 MergeProp = Properties;
1314 return {MergeKind, MergeProp};
1315 }
1316 };
1317
1318 /// \return the number of registers in the target-provided register class.
1319 LLVM_ABI unsigned getNumberOfRegisters(unsigned ClassID) const;
1320
1321 /// \return true if the target supports load/store that enables fault
1322 /// suppression of memory operands when the source condition is false.
1323 LLVM_ABI bool hasConditionalLoadStoreForType(Type *Ty, bool IsStore) const;
1324
1325 /// \return the target-provided register class ID for the provided type,
1326 /// accounting for type promotion and other type-legalization techniques that
1327 /// the target might apply. However, it specifically does not account for the
1328 /// scalarization or splitting of vector types. Should a vector type require
1329 /// scalarization or splitting into multiple underlying vector registers, that
1330 /// type should be mapped to a register class containing no registers.
1331 /// Specifically, this is designed to provide a simple, high-level view of the
1332 /// register allocation later performed by the backend. These register classes
1333 /// don't necessarily map onto the register classes used by the backend.
1334 /// FIXME: It's not currently possible to determine how many registers
1335 /// are used by the provided type.
1337 Type *Ty = nullptr) const;
1338
1339 /// \return the target-provided register class name
1340 LLVM_ABI const char *getRegisterClassName(unsigned ClassID) const;
1341
1342 /// \return the cost of spilling a register in the target-provided register
1343 /// class to the stack.
1345 getRegisterClassSpillCost(unsigned ClassID, TargetCostKind CostKind) const;
1346
1347 /// \return the cost of reloading a register in the target-provided register
1348 /// class from the stack.
1350 getRegisterClassReloadCost(unsigned ClassID, TargetCostKind CostKind) const;
1351
1353
1354 /// \return The width of the largest scalar or vector register type.
1355 LLVM_ABI TypeSize getRegisterBitWidth(RegisterKind K) const;
1356
1357 /// \return The width of the smallest vector register type.
1358 LLVM_ABI unsigned getMinVectorRegisterBitWidth() const;
1359
1360 /// \return The maximum value of vscale if the target specifies an
1361 /// architectural maximum vector length, and std::nullopt otherwise.
1362 LLVM_ABI std::optional<unsigned> getMaxVScale() const;
1363
1364 /// \return the value of vscale to tune the cost model for.
1365 LLVM_ABI std::optional<unsigned> getVScaleForTuning() const;
1366
1367 /// \return True if the vectorization factor should be chosen to
1368 /// make the vector of the smallest element type match the size of a
1369 /// vector register. For wider element types, this could result in
1370 /// creating vectors that span multiple vector registers.
1371 /// If false, the vectorization factor will be chosen based on the
1372 /// size of the widest element type.
1373 /// \p K Register Kind for vectorization.
1374 LLVM_ABI bool
1376
1377 /// \return The minimum vectorization factor for types of given element
1378 /// bit width, or 0 if there is no minimum VF. The returned value only
1379 /// applies when shouldMaximizeVectorBandwidth returns true.
1380 /// If IsScalable is true, the returned ElementCount must be a scalable VF.
1381 LLVM_ABI ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const;
1382
1383 /// \return The maximum vectorization factor for types of given element
1384 /// bit width and opcode, or 0 if there is no maximum VF.
1385 /// Currently only used by the SLP vectorizer.
1386 LLVM_ABI unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const;
1387
1388 /// \return The minimum vectorization factor for the store instruction. Given
1389 /// the initial estimation of the minimum vector factor and store value type,
1390 /// it tries to find possible lowest VF, which still might be profitable for
1391 /// the vectorization.
1392 /// \param VF Initial estimation of the minimum vector factor.
1393 /// \param ScalarMemTy Scalar memory type of the store operation.
1394 /// \param ScalarValTy Scalar type of the stored value.
1395 /// \param Alignment Alignment of the store
1396 /// \param AddrSpace Address space of the store
1397 /// Currently only used by the SLP vectorizer.
1398 LLVM_ABI unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
1399 Type *ScalarValTy, Align Alignment,
1400 unsigned AddrSpace) const;
1401
1402 /// \return True if it should be considered for address type promotion.
1403 /// \p AllowPromotionWithoutCommonHeader Set true if promoting \p I is
1404 /// profitable without finding other extensions fed by the same input.
1406 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const;
1407
1408 /// \return The size of a cache line in bytes.
1409 LLVM_ABI unsigned getCacheLineSize() const;
1410
1411 /// The possible cache levels
1412 enum class CacheLevel {
1413 L1D, // The L1 data cache
1414 L2D, // The L2 data cache
1415
1416 // We currently do not model L3 caches, as their sizes differ widely between
1417 // microarchitectures. Also, we currently do not have a use for L3 cache
1418 // size modeling yet.
1419 };
1420
1421 /// \return The size of the cache level in bytes, if available.
1422 LLVM_ABI std::optional<unsigned> getCacheSize(CacheLevel Level) const;
1423
1424 /// \return The associativity of the cache level, if available.
1425 LLVM_ABI std::optional<unsigned>
1426 getCacheAssociativity(CacheLevel Level) const;
1427
1428 /// \return The minimum architectural page size for the target.
1429 LLVM_ABI std::optional<unsigned> getMinPageSize() const;
1430
1431 /// \return How much before a load we should place the prefetch
1432 /// instruction. This is currently measured in number of
1433 /// instructions.
1434 LLVM_ABI unsigned getPrefetchDistance() const;
1435
1436 /// Some HW prefetchers can handle accesses up to a certain constant stride.
1437 /// Sometimes prefetching is beneficial even below the HW prefetcher limit,
1438 /// and the arguments provided are meant to serve as a basis for deciding this
1439 /// for a particular loop.
1440 ///
1441 /// \param NumMemAccesses Number of memory accesses in the loop.
1442 /// \param NumStridedMemAccesses Number of the memory accesses that
1443 /// ScalarEvolution could find a known stride
1444 /// for.
1445 /// \param NumPrefetches Number of software prefetches that will be
1446 /// emitted as determined by the addresses
1447 /// involved and the cache line size.
1448 /// \param HasCall True if the loop contains a call.
1449 ///
1450 /// \return This is the minimum stride in bytes where it makes sense to start
1451 /// adding SW prefetches. The default is 1, i.e. prefetch with any
1452 /// stride.
1453 LLVM_ABI unsigned getMinPrefetchStride(unsigned NumMemAccesses,
1454 unsigned NumStridedMemAccesses,
1455 unsigned NumPrefetches,
1456 bool HasCall) const;
1457
1458 /// \return The maximum number of iterations to prefetch ahead. If
1459 /// the required number of iterations is more than this number, no
1460 /// prefetching is performed.
1461 LLVM_ABI unsigned getMaxPrefetchIterationsAhead() const;
1462
1463 /// \return True if prefetching should also be done for writes.
1464 LLVM_ABI bool enableWritePrefetching() const;
1465
1466 /// \return if target want to issue a prefetch in address space \p AS.
1467 LLVM_ABI bool shouldPrefetchAddressSpace(unsigned AS) const;
1468
1469 /// \return The cost of a partial reduction, which is a reduction from a
1470 /// vector to another vector with fewer elements of larger size. They are
1471 /// represented by the llvm.vector.partial.reduce.add and
1472 /// llvm.vector.partial.reduce.fadd intrinsics, which take an accumulator of
1473 /// type \p AccumType and a second vector operand to be accumulated, whose
1474 /// element count is specified by \p VF. The type of reduction is specified by
1475 /// \p Opcode. The second operand passed to the intrinsic could be the result
1476 /// of an extend, such as sext or zext. In this case \p BinOp is nullopt,
1477 /// \p InputTypeA represents the type being extended and \p OpAExtend the
1478 /// operation, i.e. sign- or zero-extend.
1479 /// For floating-point partial reductions, any fast math flags (FMF) should be
1480 /// provided to govern which reductions are valid to perform (depending on
1481 /// reassoc or contract, for example), whereas this must be nullopt for
1482 /// integer partial reductions.
1483 /// Also, \p InputTypeB should be nullptr and OpBExtend should be None.
1484 /// Alternatively, the second operand could be the result of a binary
1485 /// operation performed on two extends, i.e.
1486 /// mul(zext i8 %a -> i32, zext i8 %b -> i32).
1487 /// In this case \p BinOp may specify the opcode of the binary operation,
1488 /// \p InputTypeA and \p InputTypeB the types being extended, and
1489 /// \p OpAExtend, \p OpBExtend the form of extensions. An example of an
1490 /// operation that uses a partial reduction is a dot product, which reduces
1491 /// two vectors in binary mul operation to another of 4 times fewer and 4
1492 /// times larger elements.
1494 unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType,
1496 PartialReductionExtendKind OpBExtend, std::optional<unsigned> BinOp,
1497 TTI::TargetCostKind CostKind, std::optional<FastMathFlags> FMF) const;
1498
1499 /// \return The maximum interleave factor that any transform should try to
1500 /// perform for this target. This number depends on the level of parallelism
1501 /// and the number of execution units in the CPU.
1502 LLVM_ABI unsigned getMaxInterleaveFactor(ElementCount VF) const;
1503
1504 /// Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
1505 LLVM_ABI static OperandValueInfo getOperandInfo(const Value *V);
1506
1507 /// Collect common data between two OperandValueInfo inputs
1508 LLVM_ABI static OperandValueInfo commonOperandInfo(const Value *X,
1509 const Value *Y);
1510
1511 /// This is an approximation of reciprocal throughput of a math/logic op.
1512 /// A higher cost indicates less expected throughput.
1513 /// From Agner Fog's guides, reciprocal throughput is "the average number of
1514 /// clock cycles per instruction when the instructions are not part of a
1515 /// limiting dependency chain."
1516 /// Therefore, costs should be scaled to account for multiple execution units
1517 /// on the target that can process this type of instruction. For example, if
1518 /// there are 5 scalar integer units and 2 vector integer units that can
1519 /// calculate an 'add' in a single cycle, this model should indicate that the
1520 /// cost of the vector add instruction is 2.5 times the cost of the scalar
1521 /// add instruction.
1522 /// \p Args is an optional argument which holds the instruction operands
1523 /// values so the TTI can analyze those values searching for special
1524 /// cases or optimizations based on those values.
1525 /// \p CxtI is the optional original context instruction, if one exists, to
1526 /// provide even more information.
1527 /// \p TLibInfo is used to search for platform specific vector library
1528 /// functions for instructions that might be converted to calls (e.g. frem).
1530 unsigned Opcode, Type *Ty,
1534 ArrayRef<const Value *> Args = {}, const Instruction *CxtI = nullptr,
1535 const TargetLibraryInfo *TLibInfo = nullptr) const;
1536
1537 /// Returns the cost estimation for alternating opcode pattern that can be
1538 /// lowered to a single instruction on the target. In X86 this is for the
1539 /// addsub instruction which corrsponds to a Shuffle + Fadd + FSub pattern in
1540 /// IR. This function expects two opcodes: \p Opcode1 and \p Opcode2 being
1541 /// selected by \p OpcodeMask. The mask contains one bit per lane and is a `0`
1542 /// when \p Opcode0 is selected and `1` when Opcode1 is selected.
1543 /// \p VecTy is the vector type of the instruction to be generated.
1545 VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
1546 const SmallBitVector &OpcodeMask,
1548
1549 /// \return The cost of a shuffle instruction of kind Kind with inputs of type
1550 /// SrcTy, producing a vector of type DstTy. The exact mask may be passed as
1551 /// Mask, or else the array will be empty. The Index and SubTp parameters
1552 /// are used by the subvector insertions shuffle kinds to show the insert
1553 /// point and the type of the subvector being inserted. The operands of the
1554 /// shuffle can be passed through \p Args, which helps improve the cost
1555 /// estimation in some cases, like in broadcast loads.
1557 ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy,
1558 ArrayRef<int> Mask = {},
1560 VectorType *SubTp = nullptr, ArrayRef<const Value *> Args = {},
1561 const Instruction *CxtI = nullptr) const;
1562
1563 /// Represents a hint about the context in which a cast is used.
1564 ///
1565 /// For zext/sext, the context of the cast is the operand, which must be a
1566 /// load of some kind. For trunc, the context is of the cast is the single
1567 /// user of the instruction, which must be a store of some kind.
1568 ///
1569 /// This enum allows the vectorizer to give getCastInstrCost an idea of the
1570 /// type of cast it's dealing with, as not every cast is equal. For instance,
1571 /// the zext of a load may be free, but the zext of an interleaving load can
1572 //// be (very) expensive!
1573 ///
1574 /// See \c getCastContextHint to compute a CastContextHint from a cast
1575 /// Instruction*. Callers can use it if they don't need to override the
1576 /// context and just want it to be calculated from the instruction.
1577 ///
1578 /// FIXME: This handles the types of load/store that the vectorizer can
1579 /// produce, which are the cases where the context instruction is most
1580 /// likely to be incorrect. There are other situations where that can happen
1581 /// too, which might be handled here but in the long run a more general
1582 /// solution of costing multiple instructions at the same times may be better.
1584 None, ///< The cast is not used with a load/store of any kind.
1585 Normal, ///< The cast is used with a normal load/store.
1586 Masked, ///< The cast is used with a masked load/store.
1587 GatherScatter, ///< The cast is used with a gather/scatter.
1588 Interleave, ///< The cast is used with an interleaved load/store.
1589 Reversed, ///< The cast is used with a reversed load/store.
1590 };
1591
1592 /// Calculates a CastContextHint from \p I.
1593 /// This should be used by callers of getCastInstrCost if they wish to
1594 /// determine the context from some instruction.
1595 /// \returns the CastContextHint for ZExt/SExt/Trunc, None if \p I is nullptr,
1596 /// or if it's another type of cast.
1598
1599 /// \return The expected cost of cast instructions, such as bitcast, trunc,
1600 /// zext, etc. If there is an existing instruction that holds Opcode, it
1601 /// may be passed in the 'I' parameter.
1603 unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH,
1605 const Instruction *I = nullptr) const;
1606
1607 /// \return The expected cost of a sign- or zero-extended vector extract. Use
1608 /// Index = -1 to indicate that there is no information about the index value.
1610 getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
1611 unsigned Index, TTI::TargetCostKind CostKind) const;
1612
1613 /// \return The expected cost of control-flow related instructions such as
1614 /// Phi, Ret, Br, Switch.
1617 const Instruction *I = nullptr) const;
1618
1619 /// \returns The expected cost of compare and select instructions. If there
1620 /// is an existing instruction that holds Opcode, it may be passed in the
1621 /// 'I' parameter. The \p VecPred parameter can be used to indicate the select
1622 /// is using a compare with the specified predicate as condition. When vector
1623 /// types are passed, \p VecPred must be used for all lanes. For a
1624 /// comparison, the two operands are the natural values. For a select, the
1625 /// two operands are the *value* operands, not the condition operand.
1627 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
1629 OperandValueInfo Op1Info = {OK_AnyValue, OP_None},
1630 OperandValueInfo Op2Info = {OK_AnyValue, OP_None},
1631 const Instruction *I = nullptr) const;
1632
1633 /// \return The expected cost of vector Insert and Extract.
1634 /// Use -1 to indicate that there is no information on the index value.
1635 /// This is used when the instruction is not available; a typical use
1636 /// case is to provision the cost of vectorization/scalarization in
1637 /// vectorizer passes.
1639 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind,
1640 unsigned Index = -1, const Value *Op0 = nullptr,
1641 const Value *Op1 = nullptr,
1643
1644 /// \return The expected cost of vector Insert and Extract.
1645 /// Use -1 to indicate that there is no information on the index value.
1646 /// This is used when the instruction is not available; a typical use
1647 /// case is to provision the cost of vectorization/scalarization in
1648 /// vectorizer passes.
1649 /// \param ScalarUserAndIdx encodes the information about extracts from a
1650 /// vector with 'Scalar' being the value being extracted,'User' being the user
1651 /// of the extract(nullptr if user is not known before vectorization) and
1652 /// 'Idx' being the extract lane.
1654 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
1655 Value *Scalar,
1656 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx,
1658
1659 /// \return The expected cost of vector Insert and Extract.
1660 /// This is used when instruction is available, and implementation
1661 /// asserts 'I' is not nullptr.
1662 ///
1663 /// A typical suitable use case is cost estimation when vector instruction
1664 /// exists (e.g., from basic blocks during transformation).
1666 const Instruction &I, Type *Val, TTI::TargetCostKind CostKind,
1667 unsigned Index = -1,
1669
1670 /// \return The expected cost of inserting or extracting a lane that is \p
1671 /// Index elements from the end of a vector, i.e. the mathematical expression
1672 /// for the lane is (VF - 1 - Index). This is required for scalable vectors
1673 /// where the exact lane index is unknown at compile time.
1675 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind,
1676 unsigned Index) const;
1677
1678 /// \return The expected cost of aggregate inserts and extracts. This is
1679 /// used when the instruction is not available; a typical use case is to
1680 /// provision the cost of vectorization/scalarization in vectorizer passes.
1682 unsigned Opcode, TTI::TargetCostKind CostKind) const;
1683
1684 /// \return The cost of replication shuffle of \p VF elements typed \p EltTy
1685 /// \p ReplicationFactor times.
1686 ///
1687 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
1688 /// <0,0,0,1,1,1,2,2,2,3,3,3>
1690 Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts,
1692
1693 /// \return The cost of Load and Store instructions. The operand info
1694 /// \p OpdInfo should refer to the stored value for stores and the address
1695 /// for loads.
1697 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
1700 const Instruction *I = nullptr) const;
1701
1702 /// \return The cost of the interleaved memory operation.
1703 /// \p Opcode is the memory operation code
1704 /// \p VecTy is the vector type of the interleaved access.
1705 /// \p Factor is the interleave factor
1706 /// \p Indices is the indices for interleaved load members (as interleaved
1707 /// load allows gaps)
1708 /// \p Alignment is the alignment of the memory operation
1709 /// \p AddressSpace is address space of the pointer.
1710 /// \p UseMaskForCond indicates if the memory access is predicated.
1711 /// \p UseMaskForGaps indicates if gaps should be masked.
1713 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1714 Align Alignment, unsigned AddressSpace,
1716 bool UseMaskForCond = false, bool UseMaskForGaps = false) const;
1717
1718 /// A helper function to determine the type of reduction algorithm used
1719 /// for a given \p Opcode and set of FastMathFlags \p FMF.
1720 static bool requiresOrderedReduction(std::optional<FastMathFlags> FMF) {
1721 return FMF && !(*FMF).allowReassoc();
1722 }
1723
1724 /// Calculate the cost of vector reduction intrinsics.
1725 ///
1726 /// This is the cost of reducing the vector value of type \p Ty to a scalar
1727 /// value using the operation denoted by \p Opcode. The FastMathFlags
1728 /// parameter \p FMF indicates what type of reduction we are performing:
1729 /// 1. Tree-wise. This is the typical 'fast' reduction performed that
1730 /// involves successively splitting a vector into half and doing the
1731 /// operation on the pair of halves until you have a scalar value. For
1732 /// example:
1733 /// (v0, v1, v2, v3)
1734 /// ((v0+v2), (v1+v3), undef, undef)
1735 /// ((v0+v2+v1+v3), undef, undef, undef)
1736 /// This is the default behaviour for integer operations, whereas for
1737 /// floating point we only do this if \p FMF indicates that
1738 /// reassociation is allowed.
1739 /// 2. Ordered. For a vector with N elements this involves performing N
1740 /// operations in lane order, starting with an initial scalar value, i.e.
1741 /// result = InitVal + v0
1742 /// result = result + v1
1743 /// result = result + v2
1744 /// result = result + v3
1745 /// This is only the case for FP operations and when reassociation is not
1746 /// allowed.
1747 ///
1749 unsigned Opcode, VectorType *Ty, std::optional<FastMathFlags> FMF,
1751
1755
1756 /// Calculate the cost of an extended reduction pattern, similar to
1757 /// getArithmeticReductionCost of an Add/Sub reduction with multiply and
1758 /// optional extensions. This is the cost of as:
1759 /// * ResTy vecreduce.add/sub(mul (A, B)) or,
1760 /// * ResTy vecreduce.add/sub(mul(ext(Ty A), ext(Ty B)).
1762 bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty,
1764
1765 /// Calculate the cost of an extended reduction pattern, similar to
1766 /// getArithmeticReductionCost of a reduction with an extension.
1767 /// This is the cost of as:
1768 /// ResTy vecreduce.opcode(ext(Ty A)).
1770 unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty,
1771 std::optional<FastMathFlags> FMF,
1773
1774 /// \returns The cost of Intrinsic instructions. Analyses the real arguments.
1775 /// Three cases are handled: 1. scalar instruction 2. vector instruction
1776 /// 3. scalar instruction which is to be vectorized.
1779
1780 /// \returns The cost of memory intrinsic instructions.
1781 /// Used when IntrinsicInst is not materialized.
1785
1786 /// \returns The cost of Call instructions.
1788 Function *F, Type *RetTy, ArrayRef<Type *> Tys,
1790
1791 /// \returns The number of pieces into which the provided type must be
1792 /// split during legalization. Zero is returned when the answer is unknown.
1793 LLVM_ABI unsigned getNumberOfParts(Type *Tp) const;
1794
1795 /// \returns The cost of the address computation. For most targets this can be
1796 /// merged into the instruction indexing mode. Some targets might want to
1797 /// distinguish between address computation for memory operations with vector
1798 /// pointer types and scalar pointer types. Such targets should override this
1799 /// function. \p SE holds the pointer for the scalar evolution object which
1800 /// was used in order to get the Ptr step value. \p Ptr holds the SCEV of the
1801 /// access pointer.
1803 getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr,
1805
1806 /// \returns The cost, if any, of keeping values of the given types alive
1807 /// over a callsite.
1808 ///
1809 /// Some types may require the use of register classes that do not have
1810 /// any callee-saved registers, so would require a spill and fill.
1813
1814 /// \returns True if the intrinsic is a supported memory intrinsic. Info
1815 /// will contain additional information - whether the intrinsic may write
1816 /// or read to memory, volatility and the pointer. Info is undefined
1817 /// if false is returned.
1819 MemIntrinsicInfo &Info) const;
1820
1821 /// \returns The maximum element size, in bytes, for an element
1822 /// unordered-atomic memory intrinsic.
1824
1825 /// \returns A value which is the result of the given memory intrinsic. If \p
1826 /// CanCreate is true, new instructions may be created to extract the result
1827 /// from the given intrinsic memory operation. Returns nullptr if the target
1828 /// cannot create a result from the given intrinsic.
1829 LLVM_ABI Value *
1831 bool CanCreate = true) const;
1832
1833 /// \returns The type to use in a loop expansion of a memcpy call.
1835 LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
1836 unsigned DestAddrSpace, Align SrcAlign, Align DestAlign,
1837 std::optional<uint32_t> AtomicElementSize = std::nullopt) const;
1838
1839 /// \param[out] OpsOut The operand types to copy RemainingBytes of memory.
1840 /// \param RemainingBytes The number of bytes to copy.
1841 ///
1842 /// Calculates the operand types to use when copying \p RemainingBytes of
1843 /// memory, where source and destination alignments are \p SrcAlign and
1844 /// \p DestAlign respectively.
1846 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
1847 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
1848 Align SrcAlign, Align DestAlign,
1849 std::optional<uint32_t> AtomicCpySize = std::nullopt) const;
1850
1851 /// \returns True if the two functions have compatible attributes for inlining
1852 /// purposes.
1853 LLVM_ABI bool areInlineCompatible(const Function *Caller,
1854 const Function *Callee) const;
1855
1856 /// Returns a penalty for invoking call \p Call in \p F.
1857 /// For example, if a function F calls a function G, which in turn calls
1858 /// function H, then getInlineCallPenalty(F, H()) would return the
1859 /// penalty of calling H from F, e.g. after inlining G into F.
1860 /// \p DefaultCallPenalty is passed to give a default penalty that
1861 /// the target can amend or override.
1862 LLVM_ABI unsigned getInlineCallPenalty(const Function *F,
1863 const CallBase &Call,
1864 unsigned DefaultCallPenalty) const;
1865
1866 /// \returns true if `Caller`'s `Attr` should be added to the new function
1867 /// created by outlining part of `Caller`.
1868 LLVM_ABI bool
1870 const Attribute &Attr) const;
1871
1872 /// \returns True if the caller and callee agree on how \p Types will be
1873 /// passed to or returned from the callee.
1874 /// to the callee.
1875 /// \param Types List of types to check.
1876 LLVM_ABI bool areTypesABICompatible(const Function *Caller,
1877 const Function *Callee,
1878 ArrayRef<Type *> Types) const;
1879
1880 /// The type of load/store indexing.
1882 MIM_Unindexed, ///< No indexing.
1883 MIM_PreInc, ///< Pre-incrementing.
1884 MIM_PreDec, ///< Pre-decrementing.
1885 MIM_PostInc, ///< Post-incrementing.
1886 MIM_PostDec ///< Post-decrementing.
1887 };
1888
1889 /// \returns True if the specified indexed load for the given type is legal.
1890 LLVM_ABI bool isIndexedLoadLegal(enum MemIndexedMode Mode, Type *Ty) const;
1891
1892 /// \returns True if the specified indexed store for the given type is legal.
1893 LLVM_ABI bool isIndexedStoreLegal(enum MemIndexedMode Mode, Type *Ty) const;
1894
1895 /// \returns The bitwidth of the largest vector type that should be used to
1896 /// load/store in the given address space.
1897 LLVM_ABI unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const;
1898
1899 /// \returns True if the load instruction is legal to vectorize.
1901
1902 /// \returns True if the store instruction is legal to vectorize.
1904
1905 /// \returns True if it is legal to vectorize the given load chain.
1906 LLVM_ABI bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
1907 Align Alignment,
1908 unsigned AddrSpace) const;
1909
1910 /// \returns True if it is legal to vectorize the given store chain.
1911 LLVM_ABI bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
1912 Align Alignment,
1913 unsigned AddrSpace) const;
1914
1915 /// \returns True if it is legal to vectorize the given reduction kind.
1917 ElementCount VF) const;
1918
1919 /// \returns True if the given type is supported for scalable vectors
1921
1922 /// \returns The new vector factor value if the target doesn't support \p
1923 /// SizeInBytes loads or has a better vector factor.
1924 LLVM_ABI unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
1925 unsigned ChainSizeInBytes,
1926 VectorType *VecTy) const;
1927
1928 /// \returns The new vector factor value if the target doesn't support \p
1929 /// SizeInBytes stores or has a better vector factor.
1930 LLVM_ABI unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
1931 unsigned ChainSizeInBytes,
1932 VectorType *VecTy) const;
1933
1934 /// \returns True if the target prefers fixed width vectorization if the
1935 /// loop vectorizer's cost-model assigns an equal cost to the fixed and
1936 /// scalable version of the vectorized loop.
1937 /// \p IsEpilogue is true if the decision is for the epilogue loop.
1938 LLVM_ABI bool preferFixedOverScalableIfEqualCost(bool IsEpilogue) const;
1939
1940 /// \returns True if target prefers SLP vectorizer with altermate opcode
1941 /// vectorization, false - otherwise.
1943
1944 /// \returns True if the target prefers reductions of \p Kind to be performed
1945 /// in the loop.
1946 LLVM_ABI bool preferInLoopReduction(RecurKind Kind, Type *Ty) const;
1947
1948 /// \returns True if the target prefers reductions select kept in the loop
1949 /// when tail folding. i.e.
1950 /// loop:
1951 /// p = phi (0, s)
1952 /// a = add (p, x)
1953 /// s = select (mask, a, p)
1954 /// vecreduce.add(s)
1955 ///
1956 /// As opposed to the normal scheme of p = phi (0, a) which allows the select
1957 /// to be pulled out of the loop. If the select(.., add, ..) can be predicated
1958 /// by the target, this can lead to cleaner code generation.
1960
1961 /// Return true if the loop vectorizer should consider vectorizing an
1962 /// otherwise scalar epilogue loop if the loop already has been vectorized
1963 /// processing \p Iters scalar iterations per vector iteration.
1965
1966 /// \returns True if the loop vectorizer should discard any VFs where the
1967 /// maximum register pressure exceeds getNumberOfRegisters.
1969
1970 /// \returns True if the target wants to expand the given reduction intrinsic
1971 /// into a shuffle sequence.
1973
1975
1976 /// \returns The shuffle sequence pattern used to expand the given reduction
1977 /// intrinsic.
1980
1981 /// \returns the size cost of rematerializing a GlobalValue address relative
1982 /// to a stack reload.
1983 LLVM_ABI unsigned getGISelRematGlobalCost() const;
1984
1985 /// \returns the lower bound of a trip count to decide on vectorization
1986 /// while tail-folding.
1988
1989 /// \returns True if the target supports scalable vectors.
1990 LLVM_ABI bool supportsScalableVectors() const;
1991
1992 /// \return true when scalable vectorization is preferred.
1994
1995 /// \name Vector Predication Information
1996 /// @{
1997 /// Whether the target supports the %evl parameter of VP intrinsic efficiently
1998 /// in hardware. (see LLVM Language Reference - "Vector Predication
1999 /// Intrinsics"). Use of %evl is discouraged when that is not the case.
2000 LLVM_ABI bool hasActiveVectorLength() const;
2001
2002 /// Return true if sinking I's operands to the same basic block as I is
2003 /// profitable, e.g. because the operands can be folded into a target
2004 /// instruction during instruction selection. After calling the function
2005 /// \p Ops contains the Uses to sink ordered by dominance (dominating users
2006 /// come first).
2009
2010 /// Return true if it's significantly cheaper to shift a vector by a uniform
2011 /// scalar than by an amount which will vary across each lane. On x86 before
2012 /// AVX2 for example, there is a "psllw" instruction for the former case, but
2013 /// no simple instruction for a general "a << b" operation on vectors.
2014 /// This should also apply to lowering for vector funnel shifts (rotates).
2016
2019 // keep the predicating parameter
2021 // where legal, discard the predicate parameter
2023 // transform into something else that is also predicating
2025 };
2026
2027 // How to transform the EVL parameter.
2028 // Legal: keep the EVL parameter as it is.
2029 // Discard: Ignore the EVL parameter where it is safe to do so.
2030 // Convert: Fold the EVL into the mask parameter.
2032
2033 // How to transform the operator.
2034 // Legal: The target supports this operator.
2035 // Convert: Convert this to a non-VP operation.
2036 // The 'Discard' strategy is invalid.
2038
2039 bool shouldDoNothing() const {
2040 return (EVLParamStrategy == Legal) && (OpStrategy == Legal);
2041 }
2044 };
2045
2046 /// \returns How the target needs this vector-predicated operation to be
2047 /// transformed.
2049 getVPLegalizationStrategy(const VPIntrinsic &PI) const;
2050 /// @}
2051
2052 /// \returns Whether a 32-bit branch instruction is available in Arm or Thumb
2053 /// state.
2054 ///
2055 /// Used by the LowerTypeTests pass, which constructs an IR inline assembler
2056 /// node containing a jump table in a format suitable for the target, so it
2057 /// needs to know what format of jump table it can legally use.
2058 ///
2059 /// For non-Arm targets, this function isn't used. It defaults to returning
2060 /// false, but it shouldn't matter what it returns anyway.
2061 LLVM_ABI bool hasArmWideBranch(bool Thumb) const;
2062
2063 /// Returns a bitmask constructed from the target-features or fmv-features
2064 /// metadata of a function corresponding to its Arch Extensions.
2065 LLVM_ABI APInt getFeatureMask(const Function &F) const;
2066
2067 /// Returns a bitmask constructed from the target-features or fmv-features
2068 /// metadata of a function corresponding to its FMV priority.
2069 LLVM_ABI APInt getPriorityMask(const Function &F) const;
2070
2071 /// Returns true if this is an instance of a function with multiple versions.
2072 LLVM_ABI bool isMultiversionedFunction(const Function &F) const;
2073
2074 /// \return The maximum number of function arguments the target supports.
2075 LLVM_ABI unsigned getMaxNumArgs() const;
2076
2077 /// \return For an array of given Size, return alignment boundary to
2078 /// pad to. Default is no padding.
2079 LLVM_ABI unsigned getNumBytesToPadGlobalArray(unsigned Size,
2080 Type *ArrayType) const;
2081
2082 /// @}
2083
2084 /// Collect kernel launch bounds for \p F into \p LB.
2086 const Function &F,
2087 SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const;
2088
2089 /// Returns true if GEP should not be used to index into vectors for this
2090 /// target.
2092
2093 /// Determine if an instruction with Custom uniformity can be proven uniform
2094 /// based on which operands are uniform.
2095 ///
2096 /// \param I The instruction to check.
2097 /// \param UniformArgs A bitvector indicating which operands are known to be
2098 /// uniform (bit N corresponds to operand N).
2099 /// \returns true if the instruction result can be proven uniform given the
2100 /// uniform operands, false otherwise.
2101 LLVM_ABI bool isUniform(const Instruction *I,
2102 const SmallBitVector &UniformArgs) const;
2103
2104private:
2105 std::unique_ptr<const TargetTransformInfoImplBase> TTIImpl;
2106};
2107
2108/// Analysis pass providing the \c TargetTransformInfo.
2109///
2110/// The core idea of the TargetIRAnalysis is to expose an interface through
2111/// which LLVM targets can analyze and provide information about the middle
2112/// end's target-independent IR. This supports use cases such as target-aware
2113/// cost modeling of IR constructs.
2114///
2115/// This is a function analysis because much of the cost modeling for targets
2116/// is done in a subtarget specific way and LLVM supports compiling different
2117/// functions targeting different subtargets in order to support runtime
2118/// dispatch according to the observed subtarget.
2119class TargetIRAnalysis : public AnalysisInfoMixin<TargetIRAnalysis> {
2120public:
2122
2123 /// Default construct a target IR analysis.
2124 ///
2125 /// This will use the module's datalayout to construct a baseline
2126 /// conservative TTI result.
2128
2129 /// Construct an IR analysis pass around a target-provide callback.
2130 ///
2131 /// The callback will be called with a particular function for which the TTI
2132 /// is needed and must return a TTI object for that function.
2133 LLVM_ABI
2134 TargetIRAnalysis(std::function<Result(const Function &)> TTICallback);
2135
2136 // Value semantics. We spell out the constructors for MSVC.
2138 : TTICallback(Arg.TTICallback) {}
2140 : TTICallback(std::move(Arg.TTICallback)) {}
2142 TTICallback = RHS.TTICallback;
2143 return *this;
2144 }
2146 TTICallback = std::move(RHS.TTICallback);
2147 return *this;
2148 }
2149
2151
2152private:
2154 LLVM_ABI static AnalysisKey Key;
2155
2156 /// The callback used to produce a result.
2157 ///
2158 /// We use a completely opaque callback so that targets can provide whatever
2159 /// mechanism they desire for constructing the TTI for a given function.
2160 ///
2161 /// FIXME: Should we really use std::function? It's relatively inefficient.
2162 /// It might be possible to arrange for even stateful callbacks to outlive
2163 /// the analysis and thus use a function_ref which would be lighter weight.
2164 /// This may also be less error prone as the callback is likely to reference
2165 /// the external TargetMachine, and that reference needs to never dangle.
2166 std::function<Result(const Function &)> TTICallback;
2167
2168 /// Helper function used as the callback in the default constructor.
2169 static Result getDefaultTTI(const Function &F);
2170};
2171
2172/// Wrapper pass for TargetTransformInfo.
2173///
2174/// This pass can be constructed from a TTI object which it stores internally
2175/// and is queried by passes.
2177 TargetIRAnalysis TIRA;
2178 std::optional<TargetTransformInfo> TTI;
2179
2180 virtual void anchor();
2181
2182public:
2183 static char ID;
2184
2185 /// We must provide a default constructor for the pass but it should
2186 /// never be used.
2187 ///
2188 /// Use the constructor below or call one of the creation routines.
2190
2192
2194};
2195
2196/// Create an analysis pass wrapper around a TTI object.
2197///
2198/// This analysis pass just holds the TTI instance and makes it available to
2199/// clients.
2202
2203} // namespace llvm
2204
2205#endif
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
#define X(NUM, ENUM, NAME)
Definition ELF.h:851
#define LLVM_ABI
Definition Compiler.h:213
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
TargetTransformInfo::VPLegalization VPLegalization
static cl::opt< bool > ForceNestedLoop("force-nested-hardware-loop", cl::Hidden, cl::init(false), cl::desc("Force allowance of nested hardware loops"))
static cl::opt< bool > ForceHardwareLoopPHI("force-hardware-loop-phi", cl::Hidden, cl::init(false), cl::desc("Force hardware loop counter to be updated through a phi"))
This header defines various interfaces for pass management in LLVM.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
uint64_t IntrinsicInst * II
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
Value * RHS
Class for arbitrary precision integers.
Definition APInt.h:78
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
Class to represent array types.
A cache of @llvm.assume calls within a function.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM Basic Block Representation.
Definition BasicBlock.h:62
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
Conditional Branch instruction.
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
ImmutablePass class - This class is used to provide information that does not need to be run.
Definition Pass.h:285
ImmutablePass(char &pid)
Definition Pass.h:287
The core instruction combiner logic.
static InstructionCost getInvalid(CostType Val=0)
Class to represent integer types.
Drive the analysis of interleaved memory accesses in the loop.
const SmallVectorImpl< Type * > & getArgTypes() const
const SmallVectorImpl< const Value * > & getArgs() const
LLVM_ABI IntrinsicCostAttributes(Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarCost=InstructionCost::getInvalid(), bool TypeBasedOnly=false)
InstructionCost getScalarizationCost() const
const IntrinsicInst * getInst() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
LoopVectorizationLegality checks if it is legal to vectorize a loop, and to what vectorization factor...
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Information for memory intrinsic cost model.
LLVM_ABI MemIntrinsicCostAttributes(Intrinsic::ID Id, Type *DataTy, bool VariableMask, Align Alignment, const Instruction *I=nullptr)
LLVM_ABI MemIntrinsicCostAttributes(Intrinsic::ID Id, Type *DataTy, Align Alignment, unsigned AddressSpace=0)
const Instruction * getInst() const
LLVM_ABI MemIntrinsicCostAttributes(Intrinsic::ID Id, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, const Instruction *I=nullptr)
The optimization diagnostic interface.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
An instruction for storing to memory.
Multiway switch.
Analysis pass providing the TargetTransformInfo.
TargetIRAnalysis(const TargetIRAnalysis &Arg)
TargetIRAnalysis & operator=(const TargetIRAnalysis &RHS)
LLVM_ABI Result run(const Function &F, FunctionAnalysisManager &)
LLVM_ABI TargetIRAnalysis()
Default construct a target IR analysis.
TargetIRAnalysis & operator=(TargetIRAnalysis &&RHS)
TargetIRAnalysis(TargetIRAnalysis &&Arg)
Provides information about what library functions are available for the current target.
Base class for use as a mix-in that aids implementing a TargetTransformInfo-compatible class.
TargetTransformInfoWrapperPass()
We must provide a default constructor for the pass but it should never be used.
TargetTransformInfo & getTTI(const Function &F)
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const
LLVM_ABI Value * getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType, bool CanCreate=true) const
LLVM_ABI bool isLegalToVectorizeLoad(LoadInst *LI) const
LLVM_ABI std::optional< unsigned > getVScaleForTuning() const
static LLVM_ABI CastContextHint getCastContextHint(const Instruction *I)
Calculates a CastContextHint from I.
LLVM_ABI unsigned getMaxNumArgs() const
LLVM_ABI bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const
Return false if a AS0 address cannot possibly alias a AS1 address.
LLVM_ABI bool isLegalMaskedScatter(Type *DataType, Align Alignment) const
Return true if the target supports masked scatter.
LLVM_ABI bool shouldBuildLookupTables() const
Return true if switches should be turned into lookup tables for the target.
LLVM_ABI bool isLegalToVectorizeStore(StoreInst *SI) const
LLVM_ABI InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Calculate the cost of an extended reduction pattern, similar to getArithmeticReductionCost of an Add/...
LLVM_ABI bool areTypesABICompatible(const Function *Caller, const Function *Callee, ArrayRef< Type * > Types) const
LLVM_ABI bool enableAggressiveInterleaving(bool LoopHasReductions) const
Don't restrict interleaved unrolling to small loops.
LLVM_ABI bool isMultiversionedFunction(const Function &F) const
Returns true if this is an instance of a function with multiple versions.
LLVM_ABI bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const
Return true if it is faster to check if a floating-point value is NaN (or not-NaN) versus a compariso...
LLVM_ABI bool isLegalMaskedStore(Type *DataType, Align Alignment, unsigned AddressSpace, MaskKind MaskKind=VariableOrConstantMask) const
Return true if the target supports masked store.
LLVM_ABI bool supportsEfficientVectorElementLoadStore() const
If target has efficient vector element load/store instructions, it can return true here so that inser...
LLVM_ABI unsigned getAssumedAddrSpace(const Value *V) const
LLVM_ABI bool preferAlternateOpcodeVectorization() const
LLVM_ABI bool shouldDropLSRSolutionIfLessProfitable() const
Return true if LSR should drop a found solution if it's calculated to be less profitable than the bas...
LLVM_ABI bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2) const
Return true if LSR cost of C1 is lower than C2.
VectorInstrContext
Represents a hint about the context in which an insert/extract is used.
@ None
The insert/extract is not used with a load/store.
@ Load
The value being inserted comes from a load (InsertElement only).
@ Store
The extracted value is stored (ExtractElement only).
LLVM_ABI unsigned getPrefetchDistance() const
LLVM_ABI Type * getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicElementSize=std::nullopt) const
LLVM_ABI bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const
Return true if the target supports masked expand load.
LLVM_ABI bool prefersVectorizedAddressing() const
Return true if target doesn't mind addresses in vectors.
LLVM_ABI InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo Op1Info={OK_AnyValue, OP_None}, OperandValueInfo Op2Info={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const
LLVM_ABI bool hasBranchDivergence(const Function *F=nullptr) const
Return true if branch divergence exists.
LLVM_ABI bool preferEpilogueVectorization(ElementCount Iters) const
Return true if the loop vectorizer should consider vectorizing an otherwise scalar epilogue loop if t...
LLVM_ABI MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const
bool invalidate(Function &, const PreservedAnalyses &, FunctionAnalysisManager::Invalidator &)
Handle the invalidation of this information.
LLVM_ABI void getUnrollingPreferences(Loop *L, ScalarEvolution &, UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const
Get target-customized preferences for the generic loop unrolling transformation.
LLVM_ABI bool shouldBuildLookupTablesForConstant(Constant *C) const
Return true if switches should be turned into lookup tables containing this constant value for the ta...
LLVM_ABI TailFoldingStyle getPreferredTailFoldingStyle() const
Query the target what the preferred style of tail folding is.
LLVM_ABI bool supportsTailCallFor(const CallBase *CB) const
If target supports tail call on CB.
LLVM_ABI std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
Targets can implement their own combinations for target-specific intrinsics.
LLVM_ABI bool isProfitableLSRChainElement(Instruction *I) const
LLVM_ABI TypeSize getRegisterBitWidth(RegisterKind K) const
MaskKind
Some targets only support masked load/store with a constant mask.
LLVM_ABI unsigned getInlineCallPenalty(const Function *F, const CallBase &Call, unsigned DefaultCallPenalty) const
Returns a penalty for invoking call Call in F.
LLVM_ABI InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
Estimate the overhead of scalarizing operands with the given types.
LLVM_ABI bool hasActiveVectorLength() const
LLVM_ABI bool isExpensiveToSpeculativelyExecute(const Instruction *I) const
Return true if the cost of the instruction is too high to speculatively execute and should be kept be...
LLVM_ABI bool preferFixedOverScalableIfEqualCost(bool IsEpilogue) const
LLVM_ABI bool isLegalMaskedGather(Type *DataType, Align Alignment) const
Return true if the target supports masked gather.
LLVM_ABI ValueUniformity getValueUniformity(const Value *V) const
Get target-specific uniformity information for a value.
static LLVM_ABI OperandValueInfo commonOperandInfo(const Value *X, const Value *Y)
Collect common data between two OperandValueInfo inputs.
LLVM_ABI InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo OpdInfo={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const
LLVM_ABI std::optional< unsigned > getMaxVScale() const
LLVM_ABI InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const
LLVM_ABI bool allowVectorElementIndexingUsingGEP() const
Returns true if GEP should not be used to index into vectors for this target.
LLVM_ABI InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, bool UseMaskForCond=false, bool UseMaskForGaps=false) const
LLVM_ABI bool isSingleThreaded() const
LLVM_ABI std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
Can be used to implement target-specific instruction combining.
LLVM_ABI bool enableOrderedReductions() const
Return true if we should be enabling ordered reductions for the target.
InstructionCost getInstructionCost(const User *U, TargetCostKind CostKind) const
This is a helper function which calls the three-argument getInstructionCost with Operands which are t...
LLVM_ABI unsigned getInliningCostBenefitAnalysisProfitableMultiplier() const
LLVM_ABI InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask={}, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, int Index=0, VectorType *SubTp=nullptr, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const
LLVM_ABI InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
LLVM_ABI InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Calculate the cost of vector reduction intrinsics.
LLVM_ABI unsigned getAtomicMemIntrinsicMaxElementSize() const
LLVM_ABI InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
LLVM_ABI InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index=-1, const Value *Op0=nullptr, const Value *Op1=nullptr, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
LLVM_ABI std::pair< KnownBits, KnownBits > computeKnownBitsAddrSpaceCast(unsigned ToAS, const Value &PtrOp) const
LLVM_ABI bool LSRWithInstrQueries() const
Return true if the loop strength reduce pass should make Instruction* based TTI queries to isLegalAdd...
LLVM_ABI unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
LLVM_ABI VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const
static LLVM_ABI PartialReductionExtendKind getPartialReductionExtendKind(Instruction *I)
Get the kind of extension that an instruction represents.
LLVM_ABI bool shouldConsiderVectorizationRegPressure() const
LLVM_ABI bool enableWritePrefetching() const
LLVM_ABI bool shouldTreatInstructionLikeSelect(const Instruction *I) const
Should the Select Optimization pass treat the given instruction like a select, potentially converting...
LLVM_ABI bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
LLVM_ABI bool shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const
LLVM_ABI InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType=nullptr, TargetCostKind CostKind=TCK_SizeAndLatency) const
Estimate the cost of a GEP operation when lowered.
LLVM_ABI bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
LLVM_ABI bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace) const
Return true is the target supports interleaved access for the given vector type VTy,...
LLVM_ABI unsigned getRegUsageForType(Type *Ty) const
Returns the estimated number of registers required to represent Ty.
LLVM_ABI bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const
\Returns true if the target supports broadcasting a load to a vector of type <NumElements x ElementTy...
LLVM_ABI bool isIndexedStoreLegal(enum MemIndexedMode Mode, Type *Ty) const
LLVM_ABI std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
LLVM_ABI InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Calculate the cost of an extended reduction pattern, similar to getArithmeticReductionCost of a reduc...
LLVM_ABI unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const
LLVM_ABI InstructionCost getRegisterClassReloadCost(unsigned ClassID, TargetCostKind CostKind) const
LLVM_ABI ReductionShuffle getPreferredExpandedReductionShuffle(const IntrinsicInst *II) const
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
LLVM_ABI unsigned getRegisterClassForType(bool Vector, Type *Ty=nullptr) const
LLVM_ABI bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace=0, Instruction *I=nullptr, int64_t ScalableOffset=0) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
LLVM_ABI PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const
Return hardware support for population count.
LLVM_ABI unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
LLVM_ABI bool isElementTypeLegalForScalableVector(Type *Ty) const
LLVM_ABI bool forceScalarizeMaskedGather(VectorType *Type, Align Alignment) const
Return true if the target forces scalarizing of llvm.masked.gather intrinsics.
LLVM_ABI unsigned getMaxPrefetchIterationsAhead() const
LLVM_ABI bool canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const
Return true if globals in this address space can have initializers other than undef.
LLVM_ABI ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const
LLVM_ABI InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TargetCostKind CostKind) const
LLVM_ABI bool enableMaskedInterleavedAccessVectorization() const
Enable matching of interleaved access groups that contain predicated accesses or gaps and therefore v...
LLVM_ABI InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty, TargetCostKind CostKind, Instruction *Inst=nullptr) const
Return the expected cost of materialization for the given integer immediate of the specified type for...
LLVM_ABI bool isLegalStridedLoadStore(Type *DataType, Align Alignment) const
Return true if the target supports strided load.
LLVM_ABI TargetTransformInfo & operator=(TargetTransformInfo &&RHS)
LLVM_ABI InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF=FastMathFlags(), TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCK_Latency
The latency of instruction.
LLVM_ABI InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr, const TargetLibraryInfo *TLibInfo=nullptr) const
This is an approximation of reciprocal throughput of a math/logic op.
LLVM_ABI bool enableSelectOptimize() const
Should the Select Optimization pass be enabled and ran.
LLVM_ABI bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
Return any intrinsic address operand indexes which may be rewritten if they use a flat address space ...
OperandValueProperties
Additional properties of an operand's values.
LLVM_ABI int getInliningLastCallToStaticBonus() const
LLVM_ABI InstructionCost getPointersChainCost(ArrayRef< const Value * > Ptrs, const Value *Base, const PointersChainInfo &Info, Type *AccessTy, TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Estimate the cost of a chain of pointers (typically pointer operands of a chain of loads or stores wi...
LLVM_ABI bool isIndexedLoadLegal(enum MemIndexedMode Mode, Type *Ty) const
LLVM_ABI unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const
LLVM_ABI unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy, Align Alignment, unsigned AddrSpace) const
LLVM_ABI bool isLegalICmpImmediate(int64_t Imm) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
LLVM_ABI bool isTypeLegal(Type *Ty) const
Return true if this type is legal.
static bool requiresOrderedReduction(std::optional< FastMathFlags > FMF)
A helper function to determine the type of reduction algorithm used for a given Opcode and set of Fas...
LLVM_ABI bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc, ElementCount VF) const
LLVM_ABI std::optional< unsigned > getCacheAssociativity(CacheLevel Level) const
LLVM_ABI bool isLegalNTLoad(Type *DataType, Align Alignment) const
Return true if the target supports nontemporal load.
LLVM_ABI bool isUniform(const Instruction *I, const SmallBitVector &UniformArgs) const
Determine if an instruction with Custom uniformity can be proven uniform based on which operands are ...
LLVM_ABI InstructionCost getMemcpyCost(const Instruction *I) const
LLVM_ABI unsigned adjustInliningThreshold(const CallBase *CB) const
LLVM_ABI bool isLegalAddImmediate(int64_t Imm) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
LLVM_ABI bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx) const
Identifies if the vector form of the intrinsic that returns a struct is overloaded at the struct elem...
LLVM_ABI unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
LLVM_ABI InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const
LLVM_ABI bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const
LLVM_ABI Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
Rewrite intrinsic call II such that OldV will be replaced with NewV, which has a different address sp...
LLVM_ABI InstructionCost getCostOfKeepingLiveOverCall(ArrayRef< Type * > Tys) const
LLVM_ABI bool canSaveCmp(Loop *L, CondBrInst **BI, ScalarEvolution *SE, LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC, TargetLibraryInfo *LibInfo) const
Return true if the target can save a compare for loop count, for example hardware loop saves a compar...
LLVM_ABI unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
Some HW prefetchers can handle accesses up to a certain constant stride.
LLVM_ABI bool shouldPrefetchAddressSpace(unsigned AS) const
LLVM_ABI InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TargetCostKind CostKind) const
Return the expected cost of materializing for the given integer immediate of the specified type.
LLVM_ABI unsigned getMinVectorRegisterBitWidth() const
LLVM_ABI InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const
LLVM_ABI bool isLegalNTStore(Type *DataType, Align Alignment) const
Return true if the target supports nontemporal store.
LLVM_ABI unsigned getFlatAddressSpace() const
Returns the address space ID for a target's 'flat' address space.
LLVM_ABI bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const
It can be advantageous to detach complex constants from their uses to make their generation cheaper.
LLVM_ABI bool hasArmWideBranch(bool Thumb) const
LLVM_ABI const char * getRegisterClassName(unsigned ClassID) const
LLVM_ABI bool shouldConsiderAddressTypePromotion(const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const
LLVM_ABI APInt getPriorityMask(const Function &F) const
Returns a bitmask constructed from the target-features or fmv-features metadata of a function corresp...
LLVM_ABI BranchProbability getPredictableBranchThreshold() const
If a branch or a select condition is skewed in one direction by more than this factor,...
LLVM_ABI TargetTransformInfo(std::unique_ptr< const TargetTransformInfoImplBase > Impl)
Construct a TTI object using a type implementing the Concept API below.
LLVM_ABI bool preferInLoopReduction(RecurKind Kind, Type *Ty) const
LLVM_ABI unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const
LLVM_ABI bool hasConditionalLoadStoreForType(Type *Ty, bool IsStore) const
LLVM_ABI unsigned getCacheLineSize() const
LLVM_ABI bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace=0, Align Alignment=Align(1), unsigned *Fast=nullptr) const
Determine if the target supports unaligned memory accesses.
LLVM_ABI bool shouldCopyAttributeWhenOutliningFrom(const Function *Caller, const Attribute &Attr) const
LLVM_ABI APInt getAddrSpaceCastPreservedPtrMask(unsigned SrcAS, unsigned DstAS) const
Returns a mask indicating which bits of a pointer remain unchanged when casting between address space...
LLVM_ABI int getInlinerVectorBonusPercent() const
LLVM_ABI unsigned getEpilogueVectorizationMinVF() const
LLVM_ABI void collectKernelLaunchBounds(const Function &F, SmallVectorImpl< std::pair< StringRef, int64_t > > &LB) const
Collect kernel launch bounds for F into LB.
PopcntSupportKind
Flags indicating the kind of support for population count.
LLVM_ABI bool preferPredicatedReductionSelect() const
LLVM_ABI InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty) const
Return the expected cost for the given integer when optimising for size.
LLVM_ABI AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const
Return the preferred addressing mode LSR should make efforts to generate.
LLVM_ABI bool isLoweredToCall(const Function *F) const
Test whether calls to a function lower to actual program function calls.
LLVM_ABI bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
LLVM_ABI bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const
Query the target whether it would be profitable to convert the given loop into a hardware loop.
LLVM_ABI unsigned getInliningThresholdMultiplier() const
LLVM_ABI InstructionCost getBranchMispredictPenalty() const
Returns estimated penalty of a branch misprediction in latency.
LLVM_ABI unsigned getNumberOfRegisters(unsigned ClassID) const
LLVM_ABI bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask) const
Return true if this is an alternating opcode pattern that can be lowered to a single instruction on t...
LLVM_ABI bool isProfitableToHoist(Instruction *I) const
Return true if it is profitable to hoist instruction in the then/else to before if.
LLVM_ABI bool supportsScalableVectors() const
LLVM_ABI bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const
Return true if the given instruction (assumed to be a memory access instruction) has a volatile varia...
LLVM_ABI bool isLegalMaskedCompressStore(Type *DataType, Align Alignment) const
Return true if the target supports masked compress store.
LLVM_ABI std::optional< unsigned > getMinPageSize() const
LLVM_ABI bool isFPVectorizationPotentiallyUnsafe() const
Indicate that it is potentially unsafe to automatically vectorize floating-point operations because t...
LLVM_ABI InstructionCost getInsertExtractValueCost(unsigned Opcode, TTI::TargetCostKind CostKind) const
LLVM_ABI bool shouldBuildRelLookupTables() const
Return true if lookup tables should be turned into relative lookup tables.
LLVM_ABI std::optional< unsigned > getCacheSize(CacheLevel Level) const
LLVM_ABI std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
Can be used to implement target-specific instruction combining.
LLVM_ABI bool isLegalAddScalableImmediate(int64_t Imm) const
Return true if adding the specified scalable immediate is legal, that is the target has add instructi...
LLVM_ABI bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx) const
Identifies if the vector form of the intrinsic has a scalar operand.
LLVM_ABI bool hasDivRemOp(Type *DataType, bool IsSigned) const
Return true if the target has a unified operation to calculate division and remainder.
LLVM_ABI InstructionCost getAltInstrCost(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Returns the cost estimation for alternating opcode pattern that can be lowered to a single instructio...
TargetCostConstants
Underlying constants for 'cost' values in this interface.
@ TCC_Expensive
The cost of a 'div' instruction on x86.
@ TCC_Free
Expected to fold away in lowering.
@ TCC_Basic
The cost of a typical 'add' instruction.
LLVM_ABI bool enableInterleavedAccessVectorization() const
Enable matching of interleaved access groups.
LLVM_ABI unsigned getMinTripCountTailFoldingThreshold() const
LLVM_ABI InstructionCost getPartialReductionCost(unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType, ElementCount VF, PartialReductionExtendKind OpAExtend, PartialReductionExtendKind OpBExtend, std::optional< unsigned > BinOp, TTI::TargetCostKind CostKind, std::optional< FastMathFlags > FMF) const
LLVM_ABI InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
LLVM_ABI unsigned getMaxInterleaveFactor(ElementCount VF) const
LLVM_ABI bool enableScalableVectorization() const
LLVM_ABI bool useFastCCForInternalCall(Function &F) const
Return true if the input function is internal, should use fastcc calling convention.
LLVM_ABI bool isVectorShiftByScalarCheap(Type *Ty) const
Return true if it's significantly cheaper to shift a vector by a uniform scalar than by an amount whi...
LLVM_ABI bool isNumRegsMajorCostOfLSR() const
Return true if LSR major cost is number of registers.
LLVM_ABI unsigned getInliningCostBenefitAnalysisSavingsMultiplier() const
LLVM_ABI bool isLegalMaskedVectorHistogram(Type *AddrType, Type *DataType) const
LLVM_ABI unsigned getGISelRematGlobalCost() const
LLVM_ABI unsigned getNumBytesToPadGlobalArray(unsigned Size, Type *ArrayType) const
static LLVM_ABI Instruction::CastOps getOpcodeForPartialReductionExtendKind(PartialReductionExtendKind Kind)
Get the cast opcode for an extension kind.
MemIndexedMode
The type of load/store indexing.
LLVM_ABI bool isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddressSpace, MaskKind MaskKind=VariableOrConstantMask) const
Return true if the target supports masked load.
LLVM_ABI InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const
LLVM_ABI bool areInlineCompatible(const Function *Caller, const Function *Callee) const
LLVM_ABI bool useColdCCForColdCall(Function &F) const
Return true if the input function which is cold at all call sites, should use coldcc calling conventi...
LLVM_ABI InstructionCost getFPOpCost(Type *Ty) const
Return the expected cost of supporting the floating point operation of the specified type.
LLVM_ABI bool supportsTailCalls() const
If the target supports tail calls.
LLVM_ABI bool canMacroFuseCmp() const
Return true if the target can fuse a compare and branch.
LLVM_ABI bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Query the target whether the specified address space cast from FromAS to ToAS is valid.
LLVM_ABI unsigned getNumberOfParts(Type *Tp) const
AddressingModeKind
Which addressing mode Loop Strength Reduction will try to generate.
@ AMK_PostIndexed
Prefer post-indexed addressing mode.
@ AMK_All
Consider all addressing modes.
@ AMK_PreIndexed
Prefer pre-indexed addressing mode.
@ AMK_None
Don't prefer any addressing mode.
LLVM_ABI InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace=0) const
Return the cost of the scaling factor used in the addressing mode represented by AM for this target,...
LLVM_ABI bool isTruncateFree(Type *Ty1, Type *Ty2) const
Return true if it's free to truncate a value of type Ty1 to type Ty2.
LLVM_ABI bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const
Return true if sinking I's operands to the same basic block as I is profitable, e....
LLVM_ABI void getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicCpySize=std::nullopt) const
LLVM_ABI bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const
Query the target whether it would be prefered to create a predicated vector loop, which can avoid the...
LLVM_ABI bool forceScalarizeMaskedScatter(VectorType *Type, Align Alignment) const
Return true if the target forces scalarizing of llvm.masked.scatter intrinsics.
LLVM_ABI bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx) const
Identifies if the vector form of the intrinsic is overloaded on the type of the operand at index OpdI...
static VectorInstrContext getVectorInstrContextHint(const Instruction *I)
Calculates a VectorInstrContext from I.
LLVM_ABI bool haveFastSqrt(Type *Ty) const
Return true if the hardware has a fast square-root instruction.
LLVM_ABI bool shouldExpandReduction(const IntrinsicInst *II) const
LLVM_ABI InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
Estimate the overhead of scalarizing an instruction.
LLVM_ABI uint64_t getMaxMemIntrinsicInlineSizeThreshold() const
Returns the maximum memset / memcpy size in bytes that still makes it profitable to inline the call.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
@ SK_InsertSubvector
InsertSubvector. Index indicates start offset.
@ SK_Select
Selects elements from the corresponding lane of either source operand.
@ SK_PermuteSingleSrc
Shuffle elements of single source vector with any shuffle mask.
@ SK_Transpose
Transpose two vectors.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_PermuteTwoSrc
Merge elements from two source vectors into one with any shuffle mask.
@ SK_Reverse
Reverse the order of the vector.
@ SK_ExtractSubvector
ExtractSubvector Index indicates start offset.
LLVM_ABI APInt getFeatureMask(const Function &F) const
Returns a bitmask constructed from the target-features or fmv-features metadata of a function corresp...
LLVM_ABI void getPeelingPreferences(Loop *L, ScalarEvolution &SE, PeelingPreferences &PP) const
Get target-customized preferences for the generic loop peeling transformation.
LLVM_ABI InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency) const
LLVM_ABI InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
CastContextHint
Represents a hint about the context in which a cast is used.
@ Reversed
The cast is used with a reversed load/store.
@ Masked
The cast is used with a masked load/store.
@ Normal
The cast is used with a normal load/store.
@ Interleave
The cast is used with an interleaved load/store.
@ GatherScatter
The cast is used with a gather/scatter.
LLVM_ABI InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const
LLVM_ABI InstructionCost getRegisterClassSpillCost(unsigned ClassID, TargetCostKind CostKind) const
OperandValueKind
Additional information about an operand's possible values.
CacheLevel
The possible cache levels.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Definition Value.h:75
Base class of all SIMD vector types.
CallInst * Call
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
@ Length
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
AtomicOrdering
Atomic ordering for LLVM's memory model.
TargetTransformInfo TTI
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:221
LLVM_ABI ImmutablePass * createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA)
Create an analysis pass wrapper around a TTI object.
RecurKind
These are the kinds of recurrences that we support.
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1917
@ DataWithEVL
Use predicated EVL instructions for tail-folding.
@ DataAndControlFlow
Use predicate to control both data and control flow.
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
ValueUniformity
Enum describing how values behave with respect to uniformity and divergence, to answer the question: ...
Definition Uniformity.h:18
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
A CRTP mix-in that provides informational APIs needed for analysis passes.
Definition PassManager.h:93
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
Attributes of a target dependent hardware loop.
LLVM_ABI bool canAnalyze(LoopInfo &LI)
LLVM_ABI bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, bool ForceNestedLoop=false, bool ForceHardwareLoopPHI=false)
Information about a load/store intrinsic defined by the target.
SmallVector< InterestingMemoryOperand, 1 > InterestingOperands
Value * PtrVal
This is the pointer that the intrinsic is loading from or storing to.
InterleavedAccessInfo * IAI
TailFoldingInfo(TargetLibraryInfo *TLI, LoopVectorizationLegality *LVL, InterleavedAccessInfo *IAI)
TargetLibraryInfo * TLI
LoopVectorizationLegality * LVL
unsigned Insns
TODO: Some of these could be merged.
Returns options for expansion of memcmp. IsZeroCmp is.
OperandValueInfo mergeWith(const OperandValueInfo OpInfoY)
bool AllowPeeling
Allow peeling off loop iterations.
bool AllowLoopNestsPeeling
Allow peeling off loop iterations for loop nests.
bool PeelLast
Peel off the last PeelCount loop iterations.
bool PeelProfiledIterations
Allow peeling basing on profile.
unsigned PeelCount
A forced peeling factor (the number of bodied of the original loop that should be peeled off before t...
Describe known properties for a set of pointers.
unsigned IsKnownStride
True if distance between any two neigbouring pointers is a known value.
unsigned IsUnitStride
These properties only valid if SameBaseAddress is set.
unsigned IsSameBaseAddress
All the GEPs in a set have same base address.
Parameters that control the generic loop unrolling transformation.
unsigned Count
A forced unrolling factor (the number of concatenated bodies of the original loop in the unrolled loo...
bool UpperBound
Allow using trip count upper bound to unroll loops.
unsigned Threshold
The cost threshold for the unrolled loop.
bool Force
Apply loop unroll on any kind of loop (mainly to loops that fail runtime unrolling).
unsigned PartialOptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size, like OptSizeThreshold,...
bool UnrollVectorizedLoop
Disable runtime unrolling by default for vectorized loops.
unsigned DefaultUnrollRuntimeCount
Default unroll count for loops with run-time trip count.
unsigned MaxPercentThresholdBoost
If complete unrolling will reduce the cost of the loop, we will boost the Threshold by a certain perc...
bool RuntimeUnrollMultiExit
Allow runtime unrolling multi-exit loops.
unsigned SCEVExpansionBudget
Don't allow runtime unrolling if expanding the trip count takes more than SCEVExpansionBudget.
bool AddAdditionalAccumulators
Allow unrolling to add parallel reduction phis.
unsigned UnrollAndJamInnerLoopThreshold
Threshold for unroll and jam, for inner loop size.
unsigned MaxIterationsCountToAnalyze
Don't allow loop unrolling to simulate more than this number of iterations when checking full unroll ...
bool AllowRemainder
Allow generation of a loop remainder (extra iterations after unroll).
bool UnrollAndJam
Allow unroll and jam. Used to enable unroll and jam for the target.
bool UnrollRemainder
Allow unrolling of all the iterations of the runtime loop remainder.
unsigned FullUnrollMaxCount
Set the maximum unrolling factor for full unrolling.
unsigned PartialThreshold
The cost threshold for the unrolled loop, like Threshold, but used for partial/runtime unrolling (set...
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...
unsigned OptSizeThreshold
The cost threshold for the unrolled loop when optimizing for size (set to UINT_MAX to disable).
bool AllowExpensiveTripCount
Allow emitting expensive instructions (such as divisions) when computing the trip count of a loop for...
unsigned MaxUpperBound
Set the maximum upper bound of trip count.
VPLegalization(VPTransform EVLParamStrategy, VPTransform OpStrategy)