LLVM 23.0.0git
TargetLowering.h
Go to the documentation of this file.
1//===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file describes how to lower LLVM code to machine code. This has two
11/// main components:
12///
13/// 1. Which ValueTypes are natively supported by the target.
14/// 2. Which operations are supported for supported ValueTypes.
15/// 3. Cost thresholds for alternative implementations of certain operations.
16///
17/// In addition it has a few other components, like information about FP
18/// immediates.
19///
20//===----------------------------------------------------------------------===//
21
22#ifndef LLVM_CODEGEN_TARGETLOWERING_H
23#define LLVM_CODEGEN_TARGETLOWERING_H
24
25#include "llvm/ADT/APInt.h"
26#include "llvm/ADT/ArrayRef.h"
27#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/StringRef.h"
42#include "llvm/IR/Attributes.h"
43#include "llvm/IR/CallingConv.h"
44#include "llvm/IR/DataLayout.h"
46#include "llvm/IR/Function.h"
47#include "llvm/IR/InlineAsm.h"
48#include "llvm/IR/Instruction.h"
51#include "llvm/IR/Type.h"
58#include <algorithm>
59#include <cassert>
60#include <climits>
61#include <cstdint>
62#include <map>
63#include <string>
64#include <utility>
65#include <vector>
66
67namespace llvm {
68
69class AssumptionCache;
70class CCState;
71class CCValAssign;
74class Constant;
75class FastISel;
77class GlobalValue;
78class Loop;
80class IntrinsicInst;
81class IRBuilderBase;
82struct KnownBits;
83class LLVMContext;
85class MachineFunction;
86class MachineInstr;
88class MachineLoop;
90class MCContext;
91class MCExpr;
92class Module;
95class TargetMachine;
99class Value;
100class VPIntrinsic;
101
102namespace Sched {
103
105 None, // No preference
106 Source, // Follow source order.
107 RegPressure, // Scheduling for lowest register pressure.
108 Hybrid, // Scheduling for both latency and register pressure.
109 ILP, // Scheduling for ILP in low register pressure mode.
110 VLIW, // Scheduling for VLIW targets.
111 Fast, // Fast suboptimal list scheduling
112 Linearize, // Linearize DAG, no scheduling
113 Last = Linearize // Marker for the last Sched::Preference
114};
115
116} // end namespace Sched
117
118// MemOp models a memory operation, either memset or memcpy/memmove.
119struct MemOp {
120private:
121 // Shared
122 uint64_t Size;
123 bool DstAlignCanChange; // true if destination alignment can satisfy any
124 // constraint.
125 Align DstAlign; // Specified alignment of the memory operation.
126
127 bool AllowOverlap;
128 // memset only
129 bool IsMemset; // If setthis memory operation is a memset.
130 bool ZeroMemset; // If set clears out memory with zeros.
131 // memcpy only
132 bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register
133 // constant so it does not need to be loaded.
134 Align SrcAlign; // Inferred alignment of the source or default value if the
135 // memory operation does not need to load the value.
136public:
137 static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
138 Align SrcAlign, bool IsVolatile,
139 bool MemcpyStrSrc = false) {
140 MemOp Op;
141 Op.Size = Size;
142 Op.DstAlignCanChange = DstAlignCanChange;
143 Op.DstAlign = DstAlign;
144 Op.AllowOverlap = !IsVolatile;
145 Op.IsMemset = false;
146 Op.ZeroMemset = false;
147 Op.MemcpyStrSrc = MemcpyStrSrc;
148 Op.SrcAlign = SrcAlign;
149 return Op;
150 }
151
152 static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
153 bool IsZeroMemset, bool IsVolatile) {
154 MemOp Op;
155 Op.Size = Size;
156 Op.DstAlignCanChange = DstAlignCanChange;
157 Op.DstAlign = DstAlign;
158 Op.AllowOverlap = !IsVolatile;
159 Op.IsMemset = true;
160 Op.ZeroMemset = IsZeroMemset;
161 Op.MemcpyStrSrc = false;
162 return Op;
163 }
164
165 uint64_t size() const { return Size; }
167 assert(!DstAlignCanChange);
168 return DstAlign;
169 }
170 bool isFixedDstAlign() const { return !DstAlignCanChange; }
171 bool allowOverlap() const { return AllowOverlap; }
172 bool isMemset() const { return IsMemset; }
173 bool isMemcpy() const { return !IsMemset; }
175 return isMemcpy() && !DstAlignCanChange;
176 }
177 bool isZeroMemset() const { return isMemset() && ZeroMemset; }
178 bool isMemcpyStrSrc() const {
179 assert(isMemcpy() && "Must be a memcpy");
180 return MemcpyStrSrc;
181 }
183 assert(isMemcpy() && "Must be a memcpy");
184 return SrcAlign;
185 }
186 bool isSrcAligned(Align AlignCheck) const {
187 return isMemset() || llvm::isAligned(AlignCheck, SrcAlign.value());
188 }
189 bool isDstAligned(Align AlignCheck) const {
190 return DstAlignCanChange || llvm::isAligned(AlignCheck, DstAlign.value());
191 }
192 bool isAligned(Align AlignCheck) const {
193 return isSrcAligned(AlignCheck) && isDstAligned(AlignCheck);
194 }
195};
196
197/// This base class for TargetLowering contains the SelectionDAG-independent
198/// parts that can be used from the rest of CodeGen.
200public:
201 /// This enum indicates whether operations are valid for a target, and if not,
202 /// what action should be used to make them valid.
204 Legal, // The target natively supports this operation.
205 Promote, // This operation should be executed in a larger type.
206 Expand, // Try to expand this to other ops, otherwise use a libcall.
207 LibCall, // Don't try to expand this to other ops, always use a libcall.
208 Custom // Use the LowerOperation hook to implement custom lowering.
209 };
210
211 /// This enum indicates whether a types are legal for a target, and if not,
212 /// what action should be used to make them valid.
214 TypeLegal, // The target natively supports this type.
215 TypePromoteInteger, // Replace this integer with a larger one.
216 TypeExpandInteger, // Split this integer into two of half the size.
217 TypeSoftenFloat, // Convert this float to a same size integer type.
218 TypeExpandFloat, // Split this float into two of half the size.
219 TypeScalarizeVector, // Replace this one-element vector with its element.
220 TypeSplitVector, // Split this vector into two of half the size.
221 TypeWidenVector, // This vector should be widened into a larger vector.
222 TypeSoftPromoteHalf, // Soften half to i16 and use float to do arithmetic.
223 TypeScalarizeScalableVector, // This action is explicitly left
224 // unimplemented. While it is theoretically
225 // possible to legalize operations on scalable
226 // types with a loop that handles the vscale *
227 // #lanes of the vector, this is non-trivial at
228 // SelectionDAG level and these types are
229 // better to be widened or promoted.
230 };
231
232 /// LegalizeKind holds the legalization kind that needs to happen to EVT
233 /// in order to type-legalize it.
234 using LegalizeKind = std::pair<LegalizeTypeAction, EVT>;
235
236 /// Enum that describes how the target represents true/false values.
238 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
239 ZeroOrOneBooleanContent, // All bits zero except for bit 0.
240 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
241 };
242
243 /// Enum that describes what type of support for selects the target has.
245 ScalarValSelect, // The target supports scalar selects (ex: cmov).
246 ScalarCondVectorVal, // The target supports selects with a scalar condition
247 // and vector values (ex: cmov).
248 VectorMaskSelect // The target supports vector selects with a vector
249 // mask (ex: x86 blends).
250 };
251
252 /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
253 /// to, if at all. Exists because different targets have different levels of
254 /// support for these atomic instructions, and also have different options
255 /// w.r.t. what they should expand to.
257 None, // Don't expand the instruction.
258 CastToInteger, // Cast the atomic instruction to another type, e.g. from
259 // floating-point to integer type.
260 LLSC, // Expand the instruction into loadlinked/storeconditional; used
261 // by ARM/AArch64/PowerPC.
262 LLOnly, // Expand the (load) instruction into just a load-linked, which has
263 // greater atomic guarantees than a normal load.
264 CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
265 MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
266 BitTestIntrinsic, // Use a target-specific intrinsic for special bit
267 // operations; used by X86.
268 CmpArithIntrinsic, // Use a target-specific intrinsic for special compare
269 // operations; used by X86.
270 Expand, // Generic expansion in terms of other atomic operations.
271 CustomExpand, // Custom target-specific expansion using TLI hooks.
272
273 // Rewrite to a non-atomic form for use in a known non-preemptible
274 // environment.
276 };
277
278 /// Enum that specifies when a multiplication should be expanded.
279 enum class MulExpansionKind {
280 Always, // Always expand the instruction.
281 OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
282 // or custom.
283 };
284
285 /// Enum that specifies when a float negation is beneficial.
286 enum class NegatibleCost {
287 Cheaper = 0, // Negated expression is cheaper.
288 Neutral = 1, // Negated expression has the same cost.
289 Expensive = 2 // Negated expression is more expensive.
290 };
291
292 /// Enum of different potentially desirable ways to fold (and/or (setcc ...),
293 /// (setcc ...)).
295 None = 0, // No fold is preferable.
296 AddAnd = 1, // Fold with `Add` op and `And` op is preferable.
297 NotAnd = 2, // Fold with `Not` op and `And` op is preferable.
298 ABS = 4, // Fold with `llvm.abs` op is preferable.
299 };
300
302 public:
305 /// Original unlegalized argument type.
307 /// Same as OrigTy, or partially legalized for soft float libcalls.
309 bool IsSExt : 1;
310 bool IsZExt : 1;
311 bool IsNoExt : 1;
312 bool IsInReg : 1;
313 bool IsSRet : 1;
314 bool IsNest : 1;
315 bool IsByVal : 1;
316 bool IsByRef : 1;
317 bool IsInAlloca : 1;
319 bool IsReturned : 1;
320 bool IsSwiftSelf : 1;
321 bool IsSwiftAsync : 1;
322 bool IsSwiftError : 1;
324 MaybeAlign Alignment = std::nullopt;
325 Type *IndirectType = nullptr;
326
333
336
338
339 LLVM_ABI void setAttributes(const CallBase *Call, unsigned ArgIdx);
340 };
341 using ArgListTy = std::vector<ArgListEntry>;
342
344 switch (Content) {
346 // Extend by adding rubbish bits.
347 return ISD::ANY_EXTEND;
349 // Extend by adding zero bits.
350 return ISD::ZERO_EXTEND;
352 // Extend by copying the sign bit.
353 return ISD::SIGN_EXTEND;
354 }
355 llvm_unreachable("Invalid content kind");
356 }
357
358 explicit TargetLoweringBase(const TargetMachine &TM,
359 const TargetSubtargetInfo &STI);
363
364 /// Return true if the target support strict float operation
365 bool isStrictFPEnabled() const {
366 return IsStrictFPEnabled;
367 }
368
369protected:
370 /// Initialize all of the actions to default values.
371 void initActions();
372
373public:
374 const TargetMachine &getTargetMachine() const { return TM; }
375
376 virtual bool useSoftFloat() const { return false; }
377
378 /// Return the pointer type for the given address space, defaults to
379 /// the pointer type from the data layout.
380 /// FIXME: The default needs to be removed once all the code is updated.
381 virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
382 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
383 }
384
385 /// Return the in-memory pointer type for the given address space, defaults to
386 /// the pointer type from the data layout.
387 /// FIXME: The default needs to be removed once all the code is updated.
388 virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const {
389 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
390 }
391
392 /// Return the type for frame index, which is determined by
393 /// the alloca address space specified through the data layout.
395 return getPointerTy(DL, DL.getAllocaAddrSpace());
396 }
397
398 /// Return the type for code pointers, which is determined by the program
399 /// address space specified through the data layout.
401 return getPointerTy(DL, DL.getProgramAddressSpace());
402 }
403
404 /// Return the type for operands of fence.
405 /// TODO: Let fence operands be of i32 type and remove this.
406 virtual MVT getFenceOperandTy(const DataLayout &DL) const {
407 return getPointerTy(DL);
408 }
409
410 /// Return the type to use for a scalar shift opcode, given the shifted amount
411 /// type. Targets should return a legal type if the input type is legal.
412 /// Targets can return a type that is too small if the input type is illegal.
413 virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
414
415 /// Returns the type for the shift amount of a shift opcode. For vectors,
416 /// returns the input type. For scalars, calls getScalarShiftAmountTy.
417 /// If getScalarShiftAmountTy type cannot represent all possible shift
418 /// amounts, returns MVT::i32.
419 EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const;
420
421 /// Return the preferred type to use for a shift opcode, given the shifted
422 /// amount type is \p ShiftValueTy.
424 virtual LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const {
425 return ShiftValueTy;
426 }
427
428 /// Returns the type to be used for the index operand vector operations. By
429 /// default we assume it will have the same size as an address space 0
430 /// pointer.
431 virtual unsigned getVectorIdxWidth(const DataLayout &DL) const {
432 return DL.getPointerSizeInBits(0);
433 }
434
435 /// Returns the type to be used for the index operand of:
436 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
437 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
441
442 /// Returns the type to be used for the index operand of:
443 /// G_INSERT_VECTOR_ELT, G_EXTRACT_VECTOR_ELT,
444 /// G_INSERT_SUBVECTOR, and G_EXTRACT_SUBVECTOR
447 }
448
449 /// Returns the type to be used for the EVL/AVL operand of VP nodes:
450 /// ISD::VP_ADD, ISD::VP_SUB, etc. It must be a legal scalar integer type,
451 /// and must be at least as large as i32. The EVL is implicitly zero-extended
452 /// to any larger type.
453 virtual MVT getVPExplicitVectorLengthTy() const { return MVT::i32; }
454
455 /// This callback is used to inspect load/store instructions and add
456 /// target-specific MachineMemOperand flags to them. The default
457 /// implementation does nothing.
461
462 /// This callback is used to inspect load/store SDNode.
463 /// The default implementation does nothing.
468
469 MachineMemOperand::Flags getLoadMemOperandFlags(
470 const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC = nullptr,
471 const TargetLibraryInfo *LibInfo = nullptr,
473 MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI,
474 const DataLayout &DL) const;
475 MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI,
476 const DataLayout &DL) const;
478 getVPIntrinsicMemOperandFlags(const VPIntrinsic &VPIntrin) const;
479
480 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
481 return true;
482 }
483
484 /// Return true if the @llvm.get.active.lane.mask intrinsic should be expanded
485 /// using generic code in SelectionDAGBuilder.
486 virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const {
487 return true;
488 }
489
490 virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF,
491 bool IsScalable) const {
492 return true;
493 }
494
495 /// Return true if the @llvm.experimental.cttz.elts intrinsic should be
496 /// expanded using generic code in SelectionDAGBuilder.
497 virtual bool shouldExpandCttzElements(EVT VT) const { return true; }
498
499 /// Return the minimum number of bits required to hold the maximum possible
500 /// number of trailing zero vector elements.
501 unsigned getBitWidthForCttzElements(EVT RetVT, ElementCount EC,
502 bool ZeroIsPoison,
503 const ConstantRange *VScaleRange) const;
504
505 /// Return true if the @llvm.experimental.vector.match intrinsic should be
506 /// expanded for vector type `VT' and search size `SearchSize' using generic
507 /// code in SelectionDAGBuilder.
508 virtual bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const {
509 return true;
510 }
511
512 // Return true if op(vecreduce(x), vecreduce(y)) should be reassociated to
513 // vecreduce(op(x, y)) for the reduction opcode RedOpc.
514 virtual bool shouldReassociateReduction(unsigned RedOpc, EVT VT) const {
515 return true;
516 }
517
518 /// Return true if it is profitable to convert a select of FP constants into
519 /// a constant pool load whose address depends on the select condition. The
520 /// parameter may be used to differentiate a select with FP compare from
521 /// integer compare.
522 virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
523 return true;
524 }
525
526 /// Does the target have multiple (allocatable) condition registers that
527 /// can be used to store the results of comparisons for use by selects
528 /// and conditional branches. With multiple condition registers, the code
529 /// generator will not aggressively sink comparisons into the blocks of their
530 /// users.
531 virtual bool hasMultipleConditionRegisters(EVT VT) const { return false; }
532
533 /// Return true if the target has BitExtract instructions.
534 bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
535
536 /// Return the preferred vector type legalization action.
539 // The default action for one element vectors is to scalarize
541 return TypeScalarizeVector;
542 // The default action for an odd-width vector is to widen.
543 if (!VT.isPow2VectorType())
544 return TypeWidenVector;
545 // The default action for other vectors is to promote
546 return TypePromoteInteger;
547 }
548
549 // Return true if, for soft-promoted half, the half type should be passed to
550 // and returned from functions as f32. The default behavior is to pass as
551 // i16. If soft-promoted half is not used, this function is ignored and
552 // values are always passed and returned as f32.
553 virtual bool useFPRegsForHalfType() const { return false; }
554
555 // There are two general methods for expanding a BUILD_VECTOR node:
556 // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
557 // them together.
558 // 2. Build the vector on the stack and then load it.
559 // If this function returns true, then method (1) will be used, subject to
560 // the constraint that all of the necessary shuffles are legal (as determined
561 // by isShuffleMaskLegal). If this function returns false, then method (2) is
562 // always used. The vector type, and the number of defined values, are
563 // provided.
564 virtual bool
566 unsigned DefinedValues) const {
567 return DefinedValues < 3;
568 }
569
570 /// Return true if integer divide is usually cheaper than a sequence of
571 /// several shifts, adds, and multiplies for this target.
572 /// The definition of "cheaper" may depend on whether we're optimizing
573 /// for speed or for size.
574 virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; }
575
576 /// Return true if the target can handle a standalone remainder operation.
577 virtual bool hasStandaloneRem(EVT VT) const {
578 return true;
579 }
580
581 /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
582 virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
583 // Default behavior is to replace SQRT(X) with X*RSQRT(X).
584 return false;
585 }
586
587 /// Reciprocal estimate status values used by the functions below.
592 };
593
594 /// Return a ReciprocalEstimate enum value for a square root of the given type
595 /// based on the function's attributes. If the operation is not overridden by
596 /// the function's attributes, "Unspecified" is returned and target defaults
597 /// are expected to be used for instruction selection.
598 int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const;
599
600 /// Return a ReciprocalEstimate enum value for a division of the given type
601 /// based on the function's attributes. If the operation is not overridden by
602 /// the function's attributes, "Unspecified" is returned and target defaults
603 /// are expected to be used for instruction selection.
604 int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const;
605
606 /// Return the refinement step count for a square root of the given type based
607 /// on the function's attributes. If the operation is not overridden by
608 /// the function's attributes, "Unspecified" is returned and target defaults
609 /// are expected to be used for instruction selection.
610 int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
611
612 /// Return the refinement step count for a division of the given type based
613 /// on the function's attributes. If the operation is not overridden by
614 /// the function's attributes, "Unspecified" is returned and target defaults
615 /// are expected to be used for instruction selection.
616 int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
617
618 /// Returns true if target has indicated at least one type should be bypassed.
619 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
620
621 /// Returns map of slow types for division or remainder with corresponding
622 /// fast types
624 return BypassSlowDivWidths;
625 }
626
627 /// Return true if Flow Control is an expensive operation that should be
628 /// avoided.
629 bool isJumpExpensive() const { return JumpIsExpensive; }
630
631 // Costs parameters used by
632 // SelectionDAGBuilder::shouldKeepJumpConditionsTogether.
633 // shouldKeepJumpConditionsTogether will use these parameter value to
634 // determine if two conditions in the form `br (and/or cond1, cond2)` should
635 // be split into two branches or left as one.
636 //
637 // BaseCost is the cost threshold (in latency). If the estimated latency of
638 // computing both `cond1` and `cond2` is below the cost of just computing
639 // `cond1` + BaseCost, the two conditions will be kept together. Otherwise
640 // they will be split.
641 //
642 // LikelyBias increases BaseCost if branch probability info indicates that it
643 // is likely that both `cond1` and `cond2` will be computed.
644 //
645 // UnlikelyBias decreases BaseCost if branch probability info indicates that
646 // it is likely that both `cond1` and `cond2` will be computed.
647 //
648 // Set any field to -1 to make it ignored (setting BaseCost to -1 results in
649 // `shouldKeepJumpConditionsTogether` always returning false).
655 // Return params for deciding if we should keep two branch conditions merged
656 // or split them into two separate branches.
657 // Arg0: The binary op joining the two conditions (and/or).
658 // Arg1: The first condition (cond1)
659 // Arg2: The second condition (cond2)
660 virtual CondMergingParams
662 const Value *) const {
663 // -1 will always result in splitting.
664 return {-1, -1, -1};
665 }
666
667 /// Return true if selects are only cheaper than branches if the branch is
668 /// unlikely to be predicted right.
672
673 virtual bool fallBackToDAGISel(const Instruction &Inst) const {
674 return false;
675 }
676
677 /// Return true if the following transform is beneficial:
678 /// fold (conv (load x)) -> (load (conv*)x)
679 /// On architectures that don't natively support some vector loads
680 /// efficiently, casting the load to a smaller vector of larger types and
681 /// loading is more efficient, however, this can be undone by optimizations in
682 /// dag combiner.
683 virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
684 const SelectionDAG &DAG,
685 const MachineMemOperand &MMO) const;
686
687 /// Return true if the following transform is beneficial:
688 /// (store (y (conv x)), y*)) -> (store x, (x*))
689 virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT,
690 const SelectionDAG &DAG,
691 const MachineMemOperand &MMO) const {
692 // Default to the same logic as loads.
693 return isLoadBitCastBeneficial(StoreVT, BitcastVT, DAG, MMO);
694 }
695
696 /// Return true if it is expected to be cheaper to do a store of vector
697 /// constant with the given size and type for the address space than to
698 /// store the individual scalar element constants.
699 virtual bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT,
700 unsigned NumElem,
701 unsigned AddrSpace) const {
702 return IsZero;
703 }
704
705 /// Allow store merging for the specified type after legalization in addition
706 /// to before legalization. This may transform stores that do not exist
707 /// earlier (for example, stores created from intrinsics).
708 virtual bool mergeStoresAfterLegalization(EVT MemVT) const {
709 return true;
710 }
711
712 /// Returns if it's reasonable to merge stores to MemVT size.
713 virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
714 const MachineFunction &MF) const {
715 return true;
716 }
717
718 /// Return true if it is cheap to speculate a call to intrinsic cttz.
719 virtual bool isCheapToSpeculateCttz(Type *Ty) const {
720 return false;
721 }
722
723 /// Return true if it is cheap to speculate a call to intrinsic ctlz.
724 virtual bool isCheapToSpeculateCtlz(Type *Ty) const {
725 return false;
726 }
727
728 /// Return true if ctlz instruction is fast.
729 virtual bool isCtlzFast() const {
730 return false;
731 }
732
733 /// Return true if ctpop instruction is fast.
734 virtual bool isCtpopFast(EVT VT) const {
735 return isOperationLegal(ISD::CTPOP, VT);
736 }
737
738 /// Return the maximum number of "x & (x - 1)" operations that can be done
739 /// instead of deferring to a custom CTPOP.
740 virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const {
741 return 1;
742 }
743
744 /// Return true if instruction generated for equality comparison is folded
745 /// with instruction generated for signed comparison.
746 virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; }
747
748 /// Return true if the heuristic to prefer icmp eq zero should be used in code
749 /// gen prepare.
750 virtual bool preferZeroCompareBranch() const { return false; }
751
752 /// Return true if it is cheaper to split the store of a merged int val
753 /// from a pair of smaller values into multiple stores.
754 virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
755 return false;
756 }
757
758 /// Return if the target supports combining a
759 /// chain like:
760 /// \code
761 /// %andResult = and %val1, #mask
762 /// %icmpResult = icmp %andResult, 0
763 /// \endcode
764 /// into a single machine instruction of a form like:
765 /// \code
766 /// cc = test %register, #mask
767 /// \endcode
768 virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
769 return false;
770 }
771
772 /// Return true if it is valid to merge the TargetMMOFlags in two SDNodes.
773 virtual bool
775 const MemSDNode &NodeY) const {
776 return true;
777 }
778
779 /// Use bitwise logic to make pairs of compares more efficient. For example:
780 /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
781 /// This should be true when it takes more than one instruction to lower
782 /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on
783 /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win.
784 virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const {
785 return false;
786 }
787
788 /// Return the preferred operand type if the target has a quick way to compare
789 /// integer values of the given size. Assume that any legal integer type can
790 /// be compared efficiently. Targets may override this to allow illegal wide
791 /// types to return a vector type if there is support to compare that type.
792 virtual MVT hasFastEqualityCompare(unsigned NumBits) const {
793 MVT VT = MVT::getIntegerVT(NumBits);
795 }
796
797 /// Return true if the target should transform:
798 /// (X & Y) == Y ---> (~X & Y) == 0
799 /// (X & Y) != Y ---> (~X & Y) != 0
800 ///
801 /// This may be profitable if the target has a bitwise and-not operation that
802 /// sets comparison flags. A target may want to limit the transformation based
803 /// on the type of Y or if Y is a constant.
804 ///
805 /// Note that the transform will not occur if Y is known to be a power-of-2
806 /// because a mask and compare of a single bit can be handled by inverting the
807 /// predicate, for example:
808 /// (X & 8) == 8 ---> (X & 8) != 0
809 virtual bool hasAndNotCompare(SDValue Y) const {
810 return false;
811 }
812
813 /// Return true if the target has a bitwise and-not operation:
814 /// X = ~A & B
815 /// This can be used to simplify select or other instructions.
816 virtual bool hasAndNot(SDValue X) const {
817 // If the target has the more complex version of this operation, assume that
818 // it has this operation too.
819 return hasAndNotCompare(X);
820 }
821
822 /// Return true if the target has a bit-test instruction:
823 /// (X & (1 << Y)) ==/!= 0
824 /// This knowledge can be used to prevent breaking the pattern,
825 /// or creating it if it could be recognized.
826 virtual bool hasBitTest(SDValue X, SDValue Y) const { return false; }
827
828 /// There are two ways to clear extreme bits (either low or high):
829 /// Mask: x & (-1 << y) (the instcombine canonical form)
830 /// Shifts: x >> y << y
831 /// Return true if the variant with 2 variable shifts is preferred.
832 /// Return false if there is no preference.
834 // By default, let's assume that no one prefers shifts.
835 return false;
836 }
837
838 /// Return true if it is profitable to fold a pair of shifts into a mask.
839 /// This is usually true on most targets. But some targets, like Thumb1,
840 /// have immediate shift instructions, but no immediate "and" instruction;
841 /// this makes the fold unprofitable.
842 virtual bool shouldFoldConstantShiftPairToMask(const SDNode *N) const {
843 return true;
844 }
845
846 /// Should we tranform the IR-optimal check for whether given truncation
847 /// down into KeptBits would be truncating or not:
848 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
849 /// Into it's more traditional form:
850 /// ((%x << C) a>> C) dstcond %x
851 /// Return true if we should transform.
852 /// Return false if there is no preference.
854 unsigned KeptBits) const {
855 // By default, let's assume that no one prefers shifts.
856 return false;
857 }
858
859 /// Given the pattern
860 /// (X & (C l>>/<< Y)) ==/!= 0
861 /// return true if it should be transformed into:
862 /// ((X <</l>> Y) & C) ==/!= 0
863 /// WARNING: if 'X' is a constant, the fold may deadlock!
864 /// FIXME: we could avoid passing XC, but we can't use isConstOrConstSplat()
865 /// here because it can end up being not linked in.
868 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
869 SelectionDAG &DAG) const {
870 if (hasBitTest(X, Y)) {
871 // One interesting pattern that we'd want to form is 'bit test':
872 // ((1 << Y) & C) ==/!= 0
873 // But we also need to be careful not to try to reverse that fold.
874
875 // Is this '1 << Y' ?
876 if (OldShiftOpcode == ISD::SHL && CC->isOne())
877 return false; // Keep the 'bit test' pattern.
878
879 // Will it be '1 << Y' after the transform ?
880 if (XC && NewShiftOpcode == ISD::SHL && XC->isOne())
881 return true; // Do form the 'bit test' pattern.
882 }
883
884 // If 'X' is a constant, and we transform, then we will immediately
885 // try to undo the fold, thus causing endless combine loop.
886 // So by default, let's assume everyone prefers the fold
887 // iff 'X' is not a constant.
888 return !XC;
889 }
890
891 // Return true if its desirable to perform the following transform:
892 // (fmul C, (uitofp Pow2))
893 // -> (bitcast_to_FP (add (bitcast_to_INT C), Log2(Pow2) << mantissa))
894 // (fdiv C, (uitofp Pow2))
895 // -> (bitcast_to_FP (sub (bitcast_to_INT C), Log2(Pow2) << mantissa))
896 //
897 // This is only queried after we have verified the transform will be bitwise
898 // equals.
899 //
900 // SDNode *N : The FDiv/FMul node we want to transform.
901 // SDValue FPConst: The Float constant operand in `N`.
902 // SDValue IntPow2: The Integer power of 2 operand in `N`.
904 SDValue IntPow2) const {
905 // Default to avoiding fdiv which is often very expensive.
906 return N->getOpcode() == ISD::FDIV;
907 }
908
909 // Given:
910 // (icmp eq/ne (and X, C0), (shift X, C1))
911 // or
912 // (icmp eq/ne X, (rotate X, CPow2))
913
914 // If C0 is a mask or shifted mask and the shift amt (C1) isolates the
915 // remaining bits (i.e something like `(x64 & UINT32_MAX) == (x64 >> 32)`)
916 // Do we prefer the shift to be shift-right, shift-left, or rotate.
917 // Note: Its only valid to convert the rotate version to the shift version iff
918 // the shift-amt (`C1`) is a power of 2 (including 0).
919 // If ShiftOpc (current Opcode) is returned, do nothing.
921 EVT VT, unsigned ShiftOpc, bool MayTransformRotate,
922 const APInt &ShiftOrRotateAmt,
923 const std::optional<APInt> &AndMask) const {
924 return ShiftOpc;
925 }
926
927 /// These two forms are equivalent:
928 /// sub %y, (xor %x, -1)
929 /// add (add %x, 1), %y
930 /// The variant with two add's is IR-canonical.
931 /// Some targets may prefer one to the other.
932 virtual bool preferIncOfAddToSubOfNot(EVT VT) const {
933 // By default, let's assume that everyone prefers the form with two add's.
934 return true;
935 }
936
937 // By default prefer folding (abs (sub nsw x, y)) -> abds(x, y). Some targets
938 // may want to avoid this to prevent loss of sub_nsw pattern.
939 virtual bool preferABDSToABSWithNSW(EVT VT) const {
940 return true;
941 }
942
943 // Return true if the target wants to transform Op(Splat(X)) -> Splat(Op(X))
944 virtual bool preferScalarizeSplat(SDNode *N) const { return true; }
945
946 // Return true if the target wants to transform:
947 // (TruncVT truncate(sext_in_reg(VT X, ExtVT))
948 // -> (TruncVT sext_in_reg(truncate(VT X), ExtVT))
949 // Some targets might prefer pre-sextinreg to improve truncation/saturation.
950 virtual bool preferSextInRegOfTruncate(EVT TruncVT, EVT VT, EVT ExtVT) const {
951 return true;
952 }
953
954 /// Return true if the target wants to use the optimization that
955 /// turns ext(promotableInst1(...(promotableInstN(load)))) into
956 /// promotedInst1(...(promotedInstN(ext(load)))).
958
959 /// Return true if the target can combine store(extractelement VectorTy,
960 /// Idx).
961 /// \p Cost[out] gives the cost of that transformation when this is true.
962 virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
963 unsigned &Cost) const {
964 return false;
965 }
966
967 /// Return true if the target shall perform extract vector element and store
968 /// given that the vector is known to be splat of constant.
969 /// \p Index[out] gives the index of the vector element to be extracted when
970 /// this is true.
972 Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const {
973 return false;
974 }
975
976 /// Return true if inserting a scalar into a variable element of an undef
977 /// vector is more efficiently handled by splatting the scalar instead.
978 virtual bool shouldSplatInsEltVarIndex(EVT) const {
979 return false;
980 }
981
982 /// Return true if target always benefits from combining into FMA for a
983 /// given value type. This must typically return false on targets where FMA
984 /// takes more cycles to execute than FADD.
985 virtual bool enableAggressiveFMAFusion(EVT VT) const { return false; }
986
987 /// Return true if target always benefits from combining into FMA for a
988 /// given value type. This must typically return false on targets where FMA
989 /// takes more cycles to execute than FADD.
990 virtual bool enableAggressiveFMAFusion(LLT Ty) const { return false; }
991
992 /// Return the ValueType of the result of SETCC operations.
993 virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
994 EVT VT) const;
995
996 /// Return the ValueType for comparison libcalls. Comparison libcalls include
997 /// floating point comparison calls, and Ordered/Unordered check calls on
998 /// floating point numbers.
1000 return MVT::i32; // return the default value
1001 }
1002
1003 /// For targets without i1 registers, this gives the nature of the high-bits
1004 /// of boolean values held in types wider than i1.
1005 ///
1006 /// "Boolean values" are special true/false values produced by nodes like
1007 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
1008 /// Not to be confused with general values promoted from i1. Some cpus
1009 /// distinguish between vectors of boolean and scalars; the isVec parameter
1010 /// selects between the two kinds. For example on X86 a scalar boolean should
1011 /// be zero extended from i1, while the elements of a vector of booleans
1012 /// should be sign extended from i1.
1013 ///
1014 /// Some cpus also treat floating point types the same way as they treat
1015 /// vectors instead of the way they treat scalars.
1016 BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
1017 if (isVec)
1018 return BooleanVectorContents;
1019 return isFloat ? BooleanFloatContents : BooleanContents;
1020 }
1021
1023 return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
1024 }
1025
1026 /// Promote the given target boolean to a target boolean of the given type.
1027 /// A target boolean is an integer value, not necessarily of type i1, the bits
1028 /// of which conform to getBooleanContents.
1029 ///
1030 /// ValVT is the type of values that produced the boolean.
1032 EVT ValVT) const {
1033 SDLoc dl(Bool);
1034 EVT BoolVT =
1035 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ValVT);
1037 return DAG.getNode(ExtendCode, dl, BoolVT, Bool);
1038 }
1039
1040 /// Return target scheduling preference.
1042 return SchedPreferenceInfo;
1043 }
1044
1045 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
1046 /// for different nodes. This function returns the preference (or none) for
1047 /// the given node.
1049 return Sched::None;
1050 }
1051
1052 /// Return the register class that should be used for the specified value
1053 /// type.
1054 virtual const TargetRegisterClass *getRegClassFor(MVT VT, bool isDivergent = false) const {
1055 (void)isDivergent;
1056 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1057 assert(RC && "This value type is not natively supported!");
1058 return RC;
1059 }
1060
1061 /// Allows target to decide about the register class of the
1062 /// specific value that is live outside the defining block.
1063 /// Returns true if the value needs uniform register class.
1065 const Value *) const {
1066 return false;
1067 }
1068
1069 /// Return the 'representative' register class for the specified value
1070 /// type.
1071 ///
1072 /// The 'representative' register class is the largest legal super-reg
1073 /// register class for the register class of the value type. For example, on
1074 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
1075 /// register class is GR64 on x86_64.
1076 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
1077 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
1078 return RC;
1079 }
1080
1081 /// Return the cost of the 'representative' register class for the specified
1082 /// value type.
1084 return RepRegClassCostForVT[VT.SimpleTy];
1085 }
1086
1087 /// Return the preferred strategy to legalize tihs SHIFT instruction, with
1088 /// \p ExpansionFactor being the recursion depth - how many expansion needed.
1094 virtual ShiftLegalizationStrategy
1096 unsigned ExpansionFactor) const {
1097 if (ExpansionFactor == 1)
1100 }
1101
1102 /// Return true if the target has native support for the specified value type.
1103 /// This means that it has a register that directly holds it without
1104 /// promotions or expansions.
1105 bool isTypeLegal(EVT VT) const {
1106 assert(!VT.isSimple() ||
1107 (unsigned)VT.getSimpleVT().SimpleTy < std::size(RegClassForVT));
1108 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
1109 }
1110
1112 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
1113 /// that indicates how instruction selection should deal with the type.
1114 LegalizeTypeAction ValueTypeActions[MVT::VALUETYPE_SIZE];
1115
1116 public:
1117 ValueTypeActionImpl() { llvm::fill(ValueTypeActions, TypeLegal); }
1118
1120 return ValueTypeActions[VT.SimpleTy];
1121 }
1122
1124 ValueTypeActions[VT.SimpleTy] = Action;
1125 }
1126 };
1127
1129 return ValueTypeActions;
1130 }
1131
1132 /// Return pair that represents the legalization kind (first) that needs to
1133 /// happen to EVT (second) in order to type-legalize it.
1134 ///
1135 /// First: how we should legalize values of this type, either it is already
1136 /// legal (return 'Legal') or we need to promote it to a larger type (return
1137 /// 'Promote'), or we need to expand it into multiple registers of smaller
1138 /// integer type (return 'Expand'). 'Custom' is not an option.
1139 ///
1140 /// Second: for types supported by the target, this is an identity function.
1141 /// For types that must be promoted to larger types, this returns the larger
1142 /// type to promote to. For integer types that are larger than the largest
1143 /// integer register, this contains one step in the expansion to get to the
1144 /// smaller register. For illegal floating point types, this returns the
1145 /// integer type to transform to.
1146 LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
1147
1148 /// Return how we should legalize values of this type, either it is already
1149 /// legal (return 'Legal') or we need to promote it to a larger type (return
1150 /// 'Promote'), or we need to expand it into multiple registers of smaller
1151 /// integer type (return 'Expand'). 'Custom' is not an option.
1153 return getTypeConversion(Context, VT).first;
1154 }
1156 return ValueTypeActions.getTypeAction(VT);
1157 }
1158
1159 /// For types supported by the target, this is an identity function. For
1160 /// types that must be promoted to larger types, this returns the larger type
1161 /// to promote to. For integer types that are larger than the largest integer
1162 /// register, this contains one step in the expansion to get to the smaller
1163 /// register. For illegal floating point types, this returns the integer type
1164 /// to transform to.
1165 virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
1166 return getTypeConversion(Context, VT).second;
1167 }
1168
1169 /// Perform getTypeToTransformTo repeatedly until a legal type is obtained.
1170 /// Useful for vector operations that might take multiple steps to legalize.
1172 EVT LegalVT = getTypeToTransformTo(Context, VT);
1173 while (LegalVT != VT) {
1174 VT = LegalVT;
1175 LegalVT = getTypeToTransformTo(Context, VT);
1176 }
1177 return LegalVT;
1178 }
1179
1180 /// For types supported by the target, this is an identity function. For
1181 /// types that must be expanded (i.e. integer types that are larger than the
1182 /// largest integer register or illegal floating point types), this returns
1183 /// the largest legal type it will be expanded to.
1184 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
1185 assert(!VT.isVector());
1186 while (true) {
1187 switch (getTypeAction(Context, VT)) {
1188 case TypeLegal:
1189 return VT;
1190 case TypeExpandInteger:
1191 VT = getTypeToTransformTo(Context, VT);
1192 break;
1193 default:
1194 llvm_unreachable("Type is not legal nor is it to be expanded!");
1195 }
1196 }
1197 }
1198
1199 /// Vector types are broken down into some number of legal first class types.
1200 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
1201 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
1202 /// turns into 4 EVT::i32 values with both PPC and X86.
1203 ///
1204 /// This method returns the number of registers needed, and the VT for each
1205 /// register. It also returns the VT and quantity of the intermediate values
1206 /// before they are promoted/expanded.
1207 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
1208 EVT &IntermediateVT,
1209 unsigned &NumIntermediates,
1210 MVT &RegisterVT) const;
1211
1212 /// Certain targets such as MIPS require that some types such as vectors are
1213 /// always broken down into scalars in some contexts. This occurs even if the
1214 /// vector type is legal.
1216 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
1217 unsigned &NumIntermediates, MVT &RegisterVT) const {
1218 return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
1219 RegisterVT);
1220 }
1221
1223 unsigned opc = 0; // target opcode
1224 EVT memVT; // memory VT
1225
1226 // value representing memory location
1228
1229 // Fallback address space for use if ptrVal is nullptr. std::nullopt means
1230 // unknown address space.
1231 std::optional<unsigned> fallbackAddressSpace;
1232
1233 int offset = 0; // offset off of ptrVal
1234 uint64_t size = 0; // the size of the memory location
1235 // (taken from memVT if zero)
1236 MaybeAlign align = Align(1); // alignment
1237
1242 IntrinsicInfo() = default;
1243 };
1244
1245 /// Given an intrinsic, checks if on the target the intrinsic will need to map
1246 /// to a MemIntrinsicNode (touches memory). If this is the case, it stores
1247 /// the intrinsic information into the IntrinsicInfo vector passed to the
1248 /// function. The vector may contain multiple entries for intrinsics that
1249 /// access multiple memory locations.
1251 const CallBase &I, MachineFunction &MF,
1252 unsigned Intrinsic) const {}
1253
1254 /// Returns true if the target can instruction select the specified FP
1255 /// immediate natively. If false, the legalizer will materialize the FP
1256 /// immediate as a load from a constant pool.
1257 virtual bool isFPImmLegal(const APFloat & /*Imm*/, EVT /*VT*/,
1258 bool ForCodeSize = false) const {
1259 return false;
1260 }
1261
1262 /// Targets can use this to indicate that they only support *some*
1263 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
1264 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
1265 /// legal.
1266 virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const {
1267 return true;
1268 }
1269
1270 /// Returns true if the operation can trap for the value type.
1271 ///
1272 /// VT must be a legal type. By default, we optimistically assume most
1273 /// operations don't trap except for integer divide and remainder.
1274 virtual bool canOpTrap(unsigned Op, EVT VT) const;
1275
1276 /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
1277 /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
1278 /// constant pool entry.
1280 EVT /*VT*/) const {
1281 return false;
1282 }
1283
1284 /// How to legalize this custom operation?
1286 return Legal;
1287 }
1288
1289 /// Return how this operation should be treated: either it is legal, needs to
1290 /// be promoted to a larger size, needs to be expanded to some other code
1291 /// sequence, or the target has a custom expander for it.
1293 // If a target-specific SDNode requires legalization, require the target
1294 // to provide custom legalization for it.
1295 if (Op >= std::size(OpActions[0]))
1296 return Custom;
1297 if (VT.isExtended())
1298 return Expand;
1299 return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
1300 }
1301
1302 /// Custom method defined by each target to indicate if an operation which
1303 /// may require a scale is supported natively by the target.
1304 /// If not, the operation is illegal.
1305 virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT,
1306 unsigned Scale) const {
1307 return false;
1308 }
1309
1310 /// Some fixed point operations may be natively supported by the target but
1311 /// only for specific scales. This method allows for checking
1312 /// if the width is supported by the target for a given operation that may
1313 /// depend on scale.
1315 unsigned Scale) const {
1316 auto Action = getOperationAction(Op, VT);
1317 if (Action != Legal)
1318 return Action;
1319
1320 // This operation is supported in this type but may only work on specific
1321 // scales.
1322 bool Supported;
1323 switch (Op) {
1324 default:
1325 llvm_unreachable("Unexpected fixed point operation.");
1326 case ISD::SMULFIX:
1327 case ISD::SMULFIXSAT:
1328 case ISD::UMULFIX:
1329 case ISD::UMULFIXSAT:
1330 case ISD::SDIVFIX:
1331 case ISD::SDIVFIXSAT:
1332 case ISD::UDIVFIX:
1333 case ISD::UDIVFIXSAT:
1334 Supported = isSupportedFixedPointOperation(Op, VT, Scale);
1335 break;
1336 }
1337
1338 return Supported ? Action : Expand;
1339 }
1340
1341 // If Op is a strict floating-point operation, return the result
1342 // of getOperationAction for the equivalent non-strict operation.
1344 unsigned EqOpc;
1345 switch (Op) {
1346 default: llvm_unreachable("Unexpected FP pseudo-opcode");
1347#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1348 case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break;
1349#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1350 case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break;
1351#include "llvm/IR/ConstrainedOps.def"
1352 }
1353
1354 return getOperationAction(EqOpc, VT);
1355 }
1356
1357 /// Return true if the specified operation is legal on this target or can be
1358 /// made legal with custom lowering. This is used to help guide high-level
1359 /// lowering decisions. LegalOnly is an optional convenience for code paths
1360 /// traversed pre and post legalisation.
1362 bool LegalOnly = false) const {
1363 if (LegalOnly)
1364 return isOperationLegal(Op, VT);
1365
1366 return (VT == MVT::Other || isTypeLegal(VT)) &&
1367 (getOperationAction(Op, VT) == Legal ||
1368 getOperationAction(Op, VT) == Custom);
1369 }
1370
1371 /// Return true if the specified operation is legal on this target or can be
1372 /// made legal using promotion. This is used to help guide high-level lowering
1373 /// decisions. LegalOnly is an optional convenience for code paths traversed
1374 /// pre and post legalisation.
1376 bool LegalOnly = false) const {
1377 if (LegalOnly)
1378 return isOperationLegal(Op, VT);
1379
1380 return (VT == MVT::Other || isTypeLegal(VT)) &&
1381 (getOperationAction(Op, VT) == Legal ||
1382 getOperationAction(Op, VT) == Promote);
1383 }
1384
1385 /// Return true if the specified operation is legal on this target or can be
1386 /// made legal with custom lowering or using promotion. This is used to help
1387 /// guide high-level lowering decisions. LegalOnly is an optional convenience
1388 /// for code paths traversed pre and post legalisation.
1390 bool LegalOnly = false) const {
1391 if (LegalOnly)
1392 return isOperationLegal(Op, VT);
1393
1394 return (VT == MVT::Other || isTypeLegal(VT)) &&
1395 (getOperationAction(Op, VT) == Legal ||
1396 getOperationAction(Op, VT) == Custom ||
1397 getOperationAction(Op, VT) == Promote);
1398 }
1399
1400 /// Return true if the operation uses custom lowering, regardless of whether
1401 /// the type is legal or not.
1402 bool isOperationCustom(unsigned Op, EVT VT) const {
1403 return getOperationAction(Op, VT) == Custom;
1404 }
1405
1406 /// Return true if lowering to a jump table is allowed.
1407 virtual bool areJTsAllowed(const Function *Fn) const {
1408 if (Fn->getFnAttribute("no-jump-tables").getValueAsBool())
1409 return false;
1410
1411 return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1413 }
1414
1415 /// Check whether the range [Low,High] fits in a machine word.
1416 bool rangeFitsInWord(const APInt &Low, const APInt &High,
1417 const DataLayout &DL) const {
1418 // FIXME: Using the pointer type doesn't seem ideal.
1419 uint64_t BW = DL.getIndexSizeInBits(0u);
1420 uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
1421 return Range <= BW;
1422 }
1423
1424 /// Return true if lowering to a jump table is suitable for a set of case
1425 /// clusters which may contain \p NumCases cases, \p Range range of values.
1426 virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
1428 BlockFrequencyInfo *BFI) const;
1429
1430 /// Returns preferred type for switch condition.
1431 virtual MVT getPreferredSwitchConditionType(LLVMContext &Context,
1432 EVT ConditionVT) const;
1433
1434 /// Return true if lowering to a bit test is suitable for a set of case
1435 /// clusters which contains \p NumDests unique destinations, \p Low and
1436 /// \p High as its lowest and highest case values, and expects \p NumCmps
1437 /// case value comparisons. Check if the number of destinations, comparison
1438 /// metric, and range are all suitable.
1441 const APInt &Low, const APInt &High, const DataLayout &DL) const {
1442 // FIXME: I don't think NumCmps is the correct metric: a single case and a
1443 // range of cases both require only one branch to lower. Just looking at the
1444 // number of clusters and destinations should be enough to decide whether to
1445 // build bit tests.
1446
1447 // To lower a range with bit tests, the range must fit the bitwidth of a
1448 // machine word.
1449 if (!rangeFitsInWord(Low, High, DL))
1450 return false;
1451
1452 unsigned NumDests = DestCmps.size();
1453 unsigned NumCmps = 0;
1454 unsigned int MaxBitTestEntry = 0;
1455 for (auto &DestCmp : DestCmps) {
1456 NumCmps += DestCmp.second;
1457 if (DestCmp.second > MaxBitTestEntry)
1458 MaxBitTestEntry = DestCmp.second;
1459 }
1460
1461 // Comparisons might be cheaper for small number of comparisons, which can
1462 // be Arch Target specific.
1463 if (MaxBitTestEntry < getMinimumBitTestCmps())
1464 return false;
1465
1466 // Decide whether it's profitable to lower this range with bit tests. Each
1467 // destination requires a bit test and branch, and there is an overall range
1468 // check branch. For a small number of clusters, separate comparisons might
1469 // be cheaper, and for many destinations, splitting the range might be
1470 // better.
1471 return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
1472 (NumDests == 3 && NumCmps >= 6);
1473 }
1474
1475 /// Return true if the specified operation is illegal on this target or
1476 /// unlikely to be made legal with custom lowering. This is used to help guide
1477 /// high-level lowering decisions.
1478 bool isOperationExpand(unsigned Op, EVT VT) const {
1479 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
1480 }
1481
1482 /// Return true if the specified operation is legal on this target.
1483 bool isOperationLegal(unsigned Op, EVT VT) const {
1484 return (VT == MVT::Other || isTypeLegal(VT)) &&
1485 getOperationAction(Op, VT) == Legal;
1486 }
1487
1488 bool isOperationExpandOrLibCall(unsigned Op, EVT VT) const {
1489 return isOperationExpand(Op, VT) || getOperationAction(Op, VT) == LibCall;
1490 }
1491
1492 /// Returns an alternative action to use when the coarser lookups (configured
1493 /// through `setLoadExtAction` and `setAtomicLoadExtAction`) yield
1494 /// `LegalizeAction::Custom`. Allows targets to use builtin behaviors (e.g.
1495 /// Legal, Promote) specialized by Alignment and AddrSpace, rather than just
1496 /// types.
1497 virtual LegalizeAction
1498 getCustomLoadAction(EVT ValVT, EVT MemVT, Align Alignment, unsigned AddrSpace,
1499 unsigned ExtType, bool Atomic) const {
1501 }
1502
1503 /// Return how this load with extension should be treated: either it is legal,
1504 /// needs to be promoted to a larger size, needs to be expanded to some other
1505 /// code sequence, or the target has a custom expander for it.
1506 LegalizeAction getLoadAction(EVT ValVT, EVT MemVT, Align Alignment,
1507 unsigned AddrSpace, unsigned ExtType,
1508 bool Atomic) const {
1509 if (ValVT.isExtended() || MemVT.isExtended())
1510 return Expand;
1511 unsigned ValI = (unsigned)ValVT.getSimpleVT().SimpleTy;
1512 unsigned MemI = (unsigned)MemVT.getSimpleVT().SimpleTy;
1514 MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!");
1515 unsigned Shift = 4 * ExtType;
1516
1517 LegalizeAction Action;
1518 if (Atomic) {
1519 Action =
1520 (LegalizeAction)((AtomicLoadExtActions[ValI][MemI] >> Shift) & 0xf);
1521 assert((Action == Legal || Action == Expand) &&
1522 "Unsupported atomic load extension action.");
1523 } else {
1524 Action = (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
1525 }
1526
1527 if (Action == LegalizeAction::Custom) {
1528 return getCustomLoadAction(ValVT, MemVT, Alignment, AddrSpace, ExtType,
1529 Atomic);
1530 }
1531
1532 return Action;
1533 }
1534
1535 /// Return true if the specified load with extension is legal on this target.
1536 bool isLoadLegal(EVT ValVT, EVT MemVT, Align Alignment, unsigned AddrSpace,
1537 unsigned ExtType, bool Atomic) const {
1538 return getLoadAction(ValVT, MemVT, Alignment, AddrSpace, ExtType, Atomic) ==
1539 Legal;
1540 }
1541
1542 /// Return true if the specified load with extension is legal or custom
1543 /// on this target.
1544 bool isLoadLegalOrCustom(EVT ValVT, EVT MemVT, Align Alignment,
1545 unsigned AddrSpace, unsigned ExtType,
1546 bool Atomic) const {
1547 LegalizeAction Action =
1548 getLoadAction(ValVT, MemVT, Alignment, AddrSpace, ExtType, Atomic);
1549 return Action == Legal || Action == Custom;
1550 }
1551
1552 /// Returns an alternative action to use when the coarser lookups (configured
1553 /// through `setTruncStoreAction` yield
1554 /// `LegalizeAction::Custom`. Allows targets to use builtin behaviors (e.g.
1555 /// Legal, Promote) specialized by Alignment and AddrSpace, rather than just
1556 /// types.
1558 Align Alignment,
1559 unsigned AddrSpace) const {
1561 }
1562
1563 /// Return how this store with truncation should be treated: either it is
1564 /// legal, needs to be promoted to a larger size, needs to be expanded to some
1565 /// other code sequence, or the target has a custom expander for it.
1567 unsigned AddrSpace) const {
1568 if (ValVT.isExtended() || MemVT.isExtended())
1569 return Expand;
1570 unsigned ValI = (unsigned)ValVT.getSimpleVT().SimpleTy;
1571 unsigned MemI = (unsigned)MemVT.getSimpleVT().SimpleTy;
1573 "Table isn't big enough!");
1574
1575 LegalizeAction Action = TruncStoreActions[ValI][MemI];
1576
1577 if (Action == LegalizeAction::Custom) {
1578 return getCustomTruncStoreAction(ValVT, MemVT, Alignment, AddrSpace);
1579 }
1580
1581 return Action;
1582 }
1583
1584 /// Return true if the specified store with truncation is legal on this
1585 /// target.
1586 bool isTruncStoreLegal(EVT ValVT, EVT MemVT, Align Alignment,
1587 unsigned AddrSpace) const {
1588 return isTypeLegal(ValVT) &&
1589 getTruncStoreAction(ValVT, MemVT, Alignment, AddrSpace) == Legal;
1590 }
1591
1592 /// Return true if the specified store with truncation has solution on this
1593 /// target.
1594 bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT, Align Alignment,
1595 unsigned AddrSpace) const {
1596 if (!isTypeLegal(ValVT))
1597 return false;
1598
1599 LegalizeAction Action =
1600 getTruncStoreAction(ValVT, MemVT, Alignment, AddrSpace);
1601 return (Action == Legal || Action == Custom);
1602 }
1603
1604 virtual bool canCombineTruncStore(EVT ValVT, EVT MemVT, Align Alignment,
1605 unsigned AddrSpace, bool LegalOnly) const {
1606 if (LegalOnly)
1607 return isTruncStoreLegal(ValVT, MemVT, Alignment, AddrSpace);
1608
1609 return isTruncStoreLegalOrCustom(ValVT, MemVT, Alignment, AddrSpace);
1610 }
1611
1612 /// Return how the indexed load should be treated: either it is legal, needs
1613 /// to be promoted to a larger size, needs to be expanded to some other code
1614 /// sequence, or the target has a custom expander for it.
1615 LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
1616 return getIndexedModeAction(IdxMode, VT, IMAB_Load);
1617 }
1618
1619 /// Return true if the specified indexed load is legal on this target.
1620 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
1621 return VT.isSimple() &&
1622 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1623 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1624 }
1625
1626 /// Return how the indexed store should be treated: either it is legal, needs
1627 /// to be promoted to a larger size, needs to be expanded to some other code
1628 /// sequence, or the target has a custom expander for it.
1629 LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
1630 return getIndexedModeAction(IdxMode, VT, IMAB_Store);
1631 }
1632
1633 /// Return true if the specified indexed load is legal on this target.
1634 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
1635 return VT.isSimple() &&
1636 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1637 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1638 }
1639
1640 /// Return how the indexed load should be treated: either it is legal, needs
1641 /// to be promoted to a larger size, needs to be expanded to some other code
1642 /// sequence, or the target has a custom expander for it.
1643 LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const {
1644 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad);
1645 }
1646
1647 /// Return true if the specified indexed load is legal on this target.
1648 bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const {
1649 return VT.isSimple() &&
1650 (getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1652 }
1653
1654 /// Return how the indexed store should be treated: either it is legal, needs
1655 /// to be promoted to a larger size, needs to be expanded to some other code
1656 /// sequence, or the target has a custom expander for it.
1657 LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const {
1658 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore);
1659 }
1660
1661 /// Return true if the specified indexed load is legal on this target.
1662 bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const {
1663 return VT.isSimple() &&
1664 (getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1666 }
1667
1668 /// Returns true if the index type for a masked gather/scatter requires
1669 /// extending
1670 virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const { return false; }
1671
1672 // Returns true if Extend can be folded into the index of a masked gathers/scatters
1673 // on this target.
1674 virtual bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const {
1675 return false;
1676 }
1677
1678 // Return true if the target supports a scatter/gather instruction with
1679 // indices which are scaled by the particular value. Note that all targets
1680 // must by definition support scale of 1.
1682 uint64_t ElemSize) const {
1683 // MGATHER/MSCATTER are only required to support scaling by one or by the
1684 // element size.
1685 if (Scale != ElemSize && Scale != 1)
1686 return false;
1687 return true;
1688 }
1689
1690 /// Return how the condition code should be treated: either it is legal, needs
1691 /// to be expanded to some other code sequence, or the target has a custom
1692 /// expander for it.
1695 assert((unsigned)CC < std::size(CondCodeActions) &&
1696 ((unsigned)VT.SimpleTy >> 3) < std::size(CondCodeActions[0]) &&
1697 "Table isn't big enough!");
1698 // See setCondCodeAction for how this is encoded.
1699 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
1700 uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
1701 LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
1702 assert(Action != Promote && "Can't promote condition code!");
1703 return Action;
1704 }
1705
1706 /// Return true if the specified condition code is legal for a comparison of
1707 /// the specified types on this target.
1708 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
1709 return getCondCodeAction(CC, VT) == Legal;
1710 }
1711
1712 /// Return true if the specified condition code is legal or custom for a
1713 /// comparison of the specified types on this target.
1715 return getCondCodeAction(CC, VT) == Legal ||
1716 getCondCodeAction(CC, VT) == Custom;
1717 }
1718
1719 /// Return how a PARTIAL_REDUCE_U/SMLA node with Acc type AccVT and Input type
1720 /// InputVT should be treated. Either it's legal, needs to be promoted to a
1721 /// larger size, needs to be expanded to some other code sequence, or the
1722 /// target has a custom expander for it.
1724 EVT InputVT) const {
1727 PartialReduceActionTypes Key = {Opc, AccVT.getSimpleVT().SimpleTy,
1728 InputVT.getSimpleVT().SimpleTy};
1729 auto It = PartialReduceMLAActions.find(Key);
1730 return It != PartialReduceMLAActions.end() ? It->second : Expand;
1731 }
1732
1733 /// Return true if a PARTIAL_REDUCE_U/SMLA node with the specified types is
1734 /// legal or custom for this target.
1736 EVT InputVT) const {
1737 LegalizeAction Action = getPartialReduceMLAAction(Opc, AccVT, InputVT);
1738 return Action == Legal || Action == Custom;
1739 }
1740
1741 /// If the action for this operation is to promote, this method returns the
1742 /// ValueType to promote to.
1743 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
1745 "This operation isn't promoted!");
1746
1747 // See if this has an explicit type specified.
1748 std::map<std::pair<unsigned, MVT::SimpleValueType>,
1750 PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
1751 if (PTTI != PromoteToType.end()) return PTTI->second;
1752
1753 assert((VT.isInteger() || VT.isFloatingPoint()) &&
1754 "Cannot autopromote this type, add it with AddPromotedToType.");
1755
1756 uint64_t VTBits = VT.getScalarSizeInBits();
1757 MVT NVT = VT;
1758 do {
1759 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
1760 assert(NVT.isInteger() == VT.isInteger() &&
1761 NVT.isFloatingPoint() == VT.isFloatingPoint() &&
1762 "Didn't find type to promote to!");
1763 } while (VTBits >= NVT.getScalarSizeInBits() || !isTypeLegal(NVT) ||
1764 getOperationAction(Op, NVT) == Promote);
1765 return NVT;
1766 }
1767
1769 bool AllowUnknown = false) const {
1770 return getValueType(DL, Ty, AllowUnknown);
1771 }
1772
1773 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
1774 /// operations except for the pointer size. If AllowUnknown is true, this
1775 /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
1776 /// otherwise it will assert.
1778 bool AllowUnknown = false) const {
1779 // Lower scalar pointers to native pointer types.
1780 if (auto *PTy = dyn_cast<PointerType>(Ty))
1781 return getPointerTy(DL, PTy->getAddressSpace());
1782
1783 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1784 Type *EltTy = VTy->getElementType();
1785 // Lower vectors of pointers to native pointer types.
1786 EVT EltVT;
1787 if (auto *PTy = dyn_cast<PointerType>(EltTy))
1788 EltVT = getPointerTy(DL, PTy->getAddressSpace());
1789 else
1790 EltVT = EVT::getEVT(EltTy, false);
1791 return EVT::getVectorVT(Ty->getContext(), EltVT, VTy->getElementCount());
1792 }
1793
1794 return EVT::getEVT(Ty, AllowUnknown);
1795 }
1796
1798 bool AllowUnknown = false) const {
1799 // Lower scalar pointers to native pointer types.
1800 if (auto *PTy = dyn_cast<PointerType>(Ty))
1801 return getPointerMemTy(DL, PTy->getAddressSpace());
1802
1803 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1804 Type *EltTy = VTy->getElementType();
1805 EVT EltVT;
1806 if (auto *PTy = dyn_cast<PointerType>(EltTy))
1807 EltVT = getPointerMemTy(DL, PTy->getAddressSpace());
1808 else
1809 EltVT = EVT::getEVT(EltTy, false);
1810 return EVT::getVectorVT(Ty->getContext(), EltVT, VTy->getElementCount());
1811 }
1812
1813 return getValueType(DL, Ty, AllowUnknown);
1814 }
1815
1816
1817 /// Return the MVT corresponding to this LLVM type. See getValueType.
1819 bool AllowUnknown = false) const {
1820 return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
1821 }
1822
1823 /// Returns the desired alignment for ByVal or InAlloca aggregate function
1824 /// arguments in the caller parameter area.
1825 virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
1826
1827 /// Return the type of registers that this ValueType will eventually require.
1829 assert((unsigned)VT.SimpleTy < std::size(RegisterTypeForVT));
1830 return RegisterTypeForVT[VT.SimpleTy];
1831 }
1832
1833 /// Return the type of registers that this ValueType will eventually require.
1834 MVT getRegisterType(LLVMContext &Context, EVT VT) const {
1835 if (VT.isSimple())
1836 return getRegisterType(VT.getSimpleVT());
1837 if (VT.isVector()) {
1838 EVT VT1;
1839 MVT RegisterVT;
1840 unsigned NumIntermediates;
1841 (void)getVectorTypeBreakdown(Context, VT, VT1,
1842 NumIntermediates, RegisterVT);
1843 return RegisterVT;
1844 }
1845 if (VT.isInteger()) {
1846 return getRegisterType(Context, getTypeToTransformTo(Context, VT));
1847 }
1848 llvm_unreachable("Unsupported extended type!");
1849 }
1850
1851 /// Return the number of registers that this ValueType will eventually
1852 /// require.
1853 ///
1854 /// This is one for any types promoted to live in larger registers, but may be
1855 /// more than one for types (like i64) that are split into pieces. For types
1856 /// like i140, which are first promoted then expanded, it is the number of
1857 /// registers needed to hold all the bits of the original type. For an i140
1858 /// on a 32 bit machine this means 5 registers.
1859 ///
1860 /// RegisterVT may be passed as a way to override the default settings, for
1861 /// instance with i128 inline assembly operands on SystemZ.
1862 virtual unsigned
1864 std::optional<MVT> RegisterVT = std::nullopt) const {
1865 if (VT.isSimple()) {
1866 assert((unsigned)VT.getSimpleVT().SimpleTy <
1867 std::size(NumRegistersForVT));
1868 return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
1869 }
1870 if (VT.isVector()) {
1871 EVT VT1;
1872 MVT VT2;
1873 unsigned NumIntermediates;
1874 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
1875 }
1876 if (VT.isInteger()) {
1877 unsigned BitWidth = VT.getSizeInBits();
1878 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
1879 return (BitWidth + RegWidth - 1) / RegWidth;
1880 }
1881 llvm_unreachable("Unsupported extended type!");
1882 }
1883
1884 /// Certain combinations of ABIs, Targets and features require that types
1885 /// are legal for some operations and not for other operations.
1886 /// For MIPS all vector types must be passed through the integer register set.
1888 CallingConv::ID CC, EVT VT) const {
1889 return getRegisterType(Context, VT);
1890 }
1891
1892 /// Certain targets require unusual breakdowns of certain types. For MIPS,
1893 /// this occurs when a vector type is used, as vector are passed through the
1894 /// integer register set.
1896 CallingConv::ID CC,
1897 EVT VT) const {
1898 return getNumRegisters(Context, VT);
1899 }
1900
1901 /// Certain targets have context sensitive alignment requirements, where one
1902 /// type has the alignment requirement of another type.
1904 const DataLayout &DL) const {
1905 return DL.getABITypeAlign(ArgTy);
1906 }
1907
1908 /// If true, then instruction selection should seek to shrink the FP constant
1909 /// of the specified type to a smaller type in order to save space and / or
1910 /// reduce runtime.
1911 virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
1912
1913 /// Return true if it is profitable to reduce a load to a smaller type.
1914 /// \p ByteOffset is only set if we know the pointer offset at compile time
1915 /// otherwise we should assume that additional pointer math is required.
1916 /// Example: (i16 (trunc (i32 (load x))) -> i16 load x
1917 /// Example: (i16 (trunc (srl (i32 (load x)), 16)) -> i16 load x+2
1919 SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT,
1920 std::optional<unsigned> ByteOffset = std::nullopt) const {
1921 // By default, assume that it is cheaper to extract a subvector from a wide
1922 // vector load rather than creating multiple narrow vector loads.
1923 if (NewVT.isVector() && !SDValue(Load, 0).hasOneUse())
1924 return false;
1925
1926 return true;
1927 }
1928
1929 /// Return true (the default) if it is profitable to remove a sext_inreg(x)
1930 /// where the sext is redundant, and use x directly.
1931 virtual bool shouldRemoveRedundantExtend(SDValue Op) const { return true; }
1932
1933 /// Indicates if any padding is guaranteed to go at the most significant bits
1934 /// when storing the type to memory and the type size isn't equal to the store
1935 /// size.
1937 return VT.isScalarInteger() && !VT.isByteSized();
1938 }
1939
1940 /// When splitting a value of the specified type into parts, does the Lo
1941 /// or Hi part come first? This usually follows the endianness, except
1942 /// for ppcf128, where the Hi part always comes first.
1944 return DL.isBigEndian() || VT == MVT::ppcf128;
1945 }
1946
1947 /// If true, the target has custom DAG combine transformations that it can
1948 /// perform for the specified node.
1950 assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray));
1951 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
1952 }
1953
1956 }
1957
1958 /// Returns the size of the platform's va_list object.
1959 virtual unsigned getVaListSizeInBits(const DataLayout &DL) const {
1960 return getPointerTy(DL).getSizeInBits();
1961 }
1962
1963 /// Get maximum # of store operations permitted for llvm.memset
1964 ///
1965 /// This function returns the maximum number of store operations permitted
1966 /// to replace a call to llvm.memset. The value is set by the target at the
1967 /// performance threshold for such a replacement. If OptSize is true,
1968 /// return the limit for functions that have OptSize attribute.
1969 unsigned getMaxStoresPerMemset(bool OptSize) const;
1970
1971 /// Get maximum # of store operations permitted for llvm.memcpy
1972 ///
1973 /// This function returns the maximum number of store operations permitted
1974 /// to replace a call to llvm.memcpy. The value is set by the target at the
1975 /// performance threshold for such a replacement. If OptSize is true,
1976 /// return the limit for functions that have OptSize attribute.
1977 unsigned getMaxStoresPerMemcpy(bool OptSize) const;
1978
1979 /// \brief Get maximum # of store operations to be glued together
1980 ///
1981 /// This function returns the maximum number of store operations permitted
1982 /// to glue together during lowering of llvm.memcpy. The value is set by
1983 // the target at the performance threshold for such a replacement.
1984 virtual unsigned getMaxGluedStoresPerMemcpy() const {
1986 }
1987
1988 /// Get maximum # of load operations permitted for memcmp
1989 ///
1990 /// This function returns the maximum number of load operations permitted
1991 /// to replace a call to memcmp. The value is set by the target at the
1992 /// performance threshold for such a replacement. If OptSize is true,
1993 /// return the limit for functions that have OptSize attribute.
1994 unsigned getMaxExpandSizeMemcmp(bool OptSize) const {
1996 }
1997
1998 /// Get maximum # of store operations permitted for llvm.memmove
1999 ///
2000 /// This function returns the maximum number of store operations permitted
2001 /// to replace a call to llvm.memmove. The value is set by the target at the
2002 /// performance threshold for such a replacement. If OptSize is true,
2003 /// return the limit for functions that have OptSize attribute.
2004 unsigned getMaxStoresPerMemmove(bool OptSize) const;
2005
2006 /// Determine if the target supports unaligned memory accesses.
2007 ///
2008 /// This function returns true if the target allows unaligned memory accesses
2009 /// of the specified type in the given address space. If true, it also returns
2010 /// a relative speed of the unaligned memory access in the last argument by
2011 /// reference. The higher the speed number the faster the operation comparing
2012 /// to a number returned by another such call. This is used, for example, in
2013 /// situations where an array copy/move/set is converted to a sequence of
2014 /// store operations. Its use helps to ensure that such replacements don't
2015 /// generate code that causes an alignment error (trap) on the target machine.
2017 EVT, unsigned AddrSpace = 0, Align Alignment = Align(1),
2019 unsigned * /*Fast*/ = nullptr) const {
2020 return false;
2021 }
2022
2023 /// LLT handling variant.
2025 LLT, unsigned AddrSpace = 0, Align Alignment = Align(1),
2027 unsigned * /*Fast*/ = nullptr) const {
2028 return false;
2029 }
2030
2031 /// This function returns true if the memory access is aligned or if the
2032 /// target allows this specific unaligned memory access. If the access is
2033 /// allowed, the optional final parameter returns a relative speed of the
2034 /// access (as defined by the target).
2035 bool allowsMemoryAccessForAlignment(
2036 LLVMContext &Context, const DataLayout &DL, EVT VT,
2037 unsigned AddrSpace = 0, Align Alignment = Align(1),
2039 unsigned *Fast = nullptr) const;
2040
2041 /// Return true if the memory access of this type is aligned or if the target
2042 /// allows this specific unaligned access for the given MachineMemOperand.
2043 /// If the access is allowed, the optional final parameter returns a relative
2044 /// speed of the access (as defined by the target).
2045 bool allowsMemoryAccessForAlignment(LLVMContext &Context,
2046 const DataLayout &DL, EVT VT,
2047 const MachineMemOperand &MMO,
2048 unsigned *Fast = nullptr) const;
2049
2050 /// Return true if the target supports a memory access of this type for the
2051 /// given address space and alignment. If the access is allowed, the optional
2052 /// final parameter returns the relative speed of the access (as defined by
2053 /// the target).
2054 virtual bool
2055 allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
2056 unsigned AddrSpace = 0, Align Alignment = Align(1),
2058 unsigned *Fast = nullptr) const;
2059
2060 /// Return true if the target supports a memory access of this type for the
2061 /// given MachineMemOperand. If the access is allowed, the optional
2062 /// final parameter returns the relative access speed (as defined by the
2063 /// target).
2064 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
2065 const MachineMemOperand &MMO,
2066 unsigned *Fast = nullptr) const;
2067
2068 /// LLT handling variant.
2069 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, LLT Ty,
2070 const MachineMemOperand &MMO,
2071 unsigned *Fast = nullptr) const;
2072
2073 /// Returns the target specific optimal type for load and store operations as
2074 /// a result of memset, memcpy, and memmove lowering.
2075 /// It returns EVT::Other if the type should be determined using generic
2076 /// target-independent logic.
2077 virtual EVT
2079 const AttributeList & /*FuncAttributes*/) const {
2080 return MVT::Other;
2081 }
2082
2083 /// LLT returning variant.
2084 virtual LLT
2086 const AttributeList & /*FuncAttributes*/) const {
2087 return LLT();
2088 }
2089
2090 /// Returns true if it's safe to use load / store of the specified type to
2091 /// expand memcpy / memset inline.
2092 ///
2093 /// This is mostly true for all types except for some special cases. For
2094 /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
2095 /// fstpl which also does type conversion. Note the specified type doesn't
2096 /// have to be legal as the hook is used before type legalization.
2097 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
2098
2099 /// Return lower limit for number of blocks in a jump table.
2100 virtual unsigned getMinimumJumpTableEntries() const;
2101
2102 /// Return lower limit of the density in a jump table.
2103 unsigned getMinimumJumpTableDensity(bool OptForSize) const;
2104
2105 /// Return upper limit for number of entries in a jump table.
2106 /// Zero if no limit.
2107 unsigned getMaximumJumpTableSize() const;
2108
2109 virtual bool isJumpTableRelative() const;
2110
2111 /// Retuen the minimum of largest number of comparisons in BitTest.
2112 unsigned getMinimumBitTestCmps() const;
2113
2114 /// Return maximum known-legal store size, which can be guaranteed for
2115 /// scalable vectors.
2117 return MaximumLegalStoreInBits;
2118 }
2119
2120 /// If a physical register, this specifies the register that
2121 /// llvm.savestack/llvm.restorestack should save and restore.
2123 return StackPointerRegisterToSaveRestore;
2124 }
2125
2126 /// If a physical register, this returns the register that receives the
2127 /// exception address on entry to an EH pad.
2128 virtual Register
2129 getExceptionPointerRegister(const Constant *PersonalityFn) const {
2130 return Register();
2131 }
2132
2133 /// If a physical register, this returns the register that receives the
2134 /// exception typeid on entry to a landing pad.
2135 virtual Register
2136 getExceptionSelectorRegister(const Constant *PersonalityFn) const {
2137 return Register();
2138 }
2139
2140 virtual bool needsFixedCatchObjects() const {
2141 report_fatal_error("Funclet EH is not implemented for this target");
2142 }
2143
2144 /// Return the minimum stack alignment of an argument.
2146 return MinStackArgumentAlignment;
2147 }
2148
2149 /// Return the minimum function alignment.
2150 Align getMinFunctionAlignment() const { return MinFunctionAlignment; }
2151
2152 /// Return the preferred function alignment.
2153 Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
2154
2155 /// Return the preferred loop alignment.
2156 virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const;
2157
2158 /// Return the maximum amount of bytes allowed to be emitted when padding for
2159 /// alignment
2160 virtual unsigned
2161 getMaxPermittedBytesForAlignment(MachineBasicBlock *MBB) const;
2162
2163 /// Should loops be aligned even when the function is marked OptSize (but not
2164 /// MinSize).
2165 virtual bool alignLoopsWithOptSize() const { return false; }
2166
2167 /// If the target has a standard location for the stack protector guard,
2168 /// returns the address of that location. Otherwise, returns nullptr.
2169 /// DEPRECATED: please override useLoadStackGuardNode and customize
2170 /// LOAD_STACK_GUARD, or customize \@llvm.stackguard().
2171 virtual Value *getIRStackGuard(IRBuilderBase &IRB,
2172 const LibcallLoweringInfo &Libcalls) const;
2173
2174 /// Inserts necessary declarations for SSP (stack protection) purpose.
2175 /// Should be used only when getIRStackGuard returns nullptr.
2176 virtual void insertSSPDeclarations(Module &M,
2177 const LibcallLoweringInfo &Libcalls) const;
2178
2179 /// Return the variable that's previously inserted by insertSSPDeclarations,
2180 /// if any, otherwise return nullptr. Should be used only when
2181 /// getIRStackGuard returns nullptr.
2182 virtual Value *getSDagStackGuard(const Module &M,
2183 const LibcallLoweringInfo &Libcalls) const;
2184
2185 /// If this function returns true, stack protection checks should XOR the
2186 /// frame pointer (or whichever pointer is used to address locals) into the
2187 /// stack guard value before checking it. getIRStackGuard must return nullptr
2188 /// if this returns true.
2189 virtual bool useStackGuardXorFP() const { return false; }
2190
2191 /// If the target has a standard stack protection check function that
2192 /// performs validation and error handling, returns the function. Otherwise,
2193 /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
2194 /// Should be used only when getIRStackGuard returns nullptr.
2195 Function *getSSPStackGuardCheck(const Module &M,
2196 const LibcallLoweringInfo &Libcalls) const;
2197
2198protected:
2199 Value *getDefaultSafeStackPointerLocation(IRBuilderBase &IRB,
2200 bool UseTLS) const;
2201
2202public:
2203 /// Returns the target-specific address of the unsafe stack pointer.
2204 virtual Value *
2205 getSafeStackPointerLocation(IRBuilderBase &IRB,
2206 const LibcallLoweringInfo &Libcalls) const;
2207
2208 /// Returns the name of the symbol used to emit stack probes or the empty
2209 /// string if not applicable.
2210 virtual bool hasStackProbeSymbol(const MachineFunction &MF) const { return false; }
2211
2212 virtual bool hasInlineStackProbe(const MachineFunction &MF) const { return false; }
2213
2215 return "";
2216 }
2217
2218 /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
2219 /// are happy to sink it into basic blocks. A cast may be free, but not
2220 /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer.
2221 virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const;
2222
2223 /// Return true if the pointer arguments to CI should be aligned by aligning
2224 /// the object whose address is being passed. If so then MinSize is set to the
2225 /// minimum size the object must be to be aligned and PrefAlign is set to the
2226 /// preferred alignment.
2227 virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
2228 Align & /*PrefAlign*/) const {
2229 return false;
2230 }
2231
2232 //===--------------------------------------------------------------------===//
2233 /// \name Helpers for TargetTransformInfo implementations
2234 /// @{
2235
2236 /// Get the ISD node that corresponds to the Instruction class opcode.
2237 int InstructionOpcodeToISD(unsigned Opcode) const;
2238
2239 /// Get the ISD node that corresponds to the Intrinsic ID. Returns
2240 /// ISD::DELETED_NODE by default for an unsupported Intrinsic ID.
2241 int IntrinsicIDToISD(Intrinsic::ID ID) const;
2242
2243 /// @}
2244
2245 //===--------------------------------------------------------------------===//
2246 /// \name Helpers for atomic expansion.
2247 /// @{
2248
2249 /// Returns the maximum atomic operation size (in bits) supported by
2250 /// the backend. Atomic operations greater than this size (as well
2251 /// as ones that are not naturally aligned), will be expanded by
2252 /// AtomicExpandPass into an __atomic_* library call.
2254 return MaxAtomicSizeInBitsSupported;
2255 }
2256
2257 /// Returns the size in bits of the maximum div/rem the backend supports.
2258 /// Larger operations will be expanded by ExpandIRInsts.
2260 return MaxDivRemBitWidthSupported;
2261 }
2262
2263 /// Returns the size in bits of the maximum fp to/from int conversion the
2264 /// backend supports. Larger operations will be expanded by ExpandIRInsts.
2266 return MaxLargeFPConvertBitWidthSupported;
2267 }
2268
2269 /// Returns the size of the smallest cmpxchg or ll/sc instruction
2270 /// the backend supports. Any smaller operations are widened in
2271 /// AtomicExpandPass.
2272 ///
2273 /// Note that *unlike* operations above the maximum size, atomic ops
2274 /// are still natively supported below the minimum; they just
2275 /// require a more complex expansion.
2276 unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
2277
2278 /// Whether the target supports unaligned atomic operations.
2279 bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; }
2280
2281 /// Whether AtomicExpandPass should automatically insert fences and reduce
2282 /// ordering for this atomic. This should be true for most architectures with
2283 /// weak memory ordering. Defaults to false.
2284 virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
2285 return false;
2286 }
2287
2288 /// Whether AtomicExpandPass should automatically insert a seq_cst trailing
2289 /// fence without reducing the ordering for this atomic store. Defaults to
2290 /// false.
2291 virtual bool
2293 return false;
2294 }
2295
2296 // The memory ordering that AtomicExpandPass should assign to a atomic
2297 // instruction that it has lowered by adding fences. This can be used
2298 // to "fold" one of the fences into the atomic instruction.
2299 virtual AtomicOrdering
2303
2304 // Whether to issue an atomic load for the initial word value before the
2305 // atomicrmw/cmpxchg emulation loop.
2306 // TODO: For correctness, an atomic load should be issued for all targets.
2307 // Remove this API once this is achieved
2309 return true;
2310 }
2311
2312 /// Perform a load-linked operation on Addr, returning a "Value *" with the
2313 /// corresponding pointee type. This may entail some non-trivial operations to
2314 /// truncate or reconstruct types that will be illegal in the backend. See
2315 /// ARMISelLowering for an example implementation.
2316 virtual Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy,
2317 Value *Addr, AtomicOrdering Ord) const {
2318 llvm_unreachable("Load linked unimplemented on this target");
2319 }
2320
2321 /// Perform a store-conditional operation to Addr. Return the status of the
2322 /// store. This should be 0 if the store succeeded, non-zero otherwise.
2324 Value *Addr, AtomicOrdering Ord) const {
2325 llvm_unreachable("Store conditional unimplemented on this target");
2326 }
2327
2328 /// Perform a masked atomicrmw using a target-specific intrinsic. This
2329 /// represents the core LL/SC loop which will be lowered at a late stage by
2330 /// the backend. The target-specific intrinsic returns the loaded value and
2331 /// is not responsible for masking and shifting the result.
2333 AtomicRMWInst *AI,
2334 Value *AlignedAddr, Value *Incr,
2335 Value *Mask, Value *ShiftAmt,
2336 AtomicOrdering Ord) const {
2337 llvm_unreachable("Masked atomicrmw expansion unimplemented on this target");
2338 }
2339
2340 /// Perform a atomicrmw expansion using a target-specific way. This is
2341 /// expected to be called when masked atomicrmw and bit test atomicrmw don't
2342 /// work, and the target supports another way to lower atomicrmw.
2343 virtual void emitExpandAtomicRMW(AtomicRMWInst *AI) const {
2345 "Generic atomicrmw expansion unimplemented on this target");
2346 }
2347
2348 /// Perform a atomic store using a target-specific way.
2349 virtual void emitExpandAtomicStore(StoreInst *SI) const {
2351 "Generic atomic store expansion unimplemented on this target");
2352 }
2353
2354 /// Perform a atomic load using a target-specific way.
2355 virtual void emitExpandAtomicLoad(LoadInst *LI) const {
2357 "Generic atomic load expansion unimplemented on this target");
2358 }
2359
2360 /// Perform a cmpxchg expansion using a target-specific method.
2362 llvm_unreachable("Generic cmpxchg expansion unimplemented on this target");
2363 }
2364
2365 /// Perform a bit test atomicrmw using a target-specific intrinsic. This
2366 /// represents the combined bit test intrinsic which will be lowered at a late
2367 /// stage by the backend.
2370 "Bit test atomicrmw expansion unimplemented on this target");
2371 }
2372
2373 /// Perform a atomicrmw which the result is only used by comparison, using a
2374 /// target-specific intrinsic. This represents the combined atomic and compare
2375 /// intrinsic which will be lowered at a late stage by the backend.
2378 "Compare arith atomicrmw expansion unimplemented on this target");
2379 }
2380
2381 /// Perform a masked cmpxchg using a target-specific intrinsic. This
2382 /// represents the core LL/SC loop which will be lowered at a late stage by
2383 /// the backend. The target-specific intrinsic returns the loaded value and
2384 /// is not responsible for masking and shifting the result.
2386 IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
2387 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
2388 llvm_unreachable("Masked cmpxchg expansion unimplemented on this target");
2389 }
2390
2391 //===--------------------------------------------------------------------===//
2392 /// \name KCFI check lowering.
2393 /// @{
2394
2397 const TargetInstrInfo *TII) const {
2398 llvm_unreachable("KCFI is not supported on this target");
2399 }
2400
2401 /// @}
2402
2403 /// Inserts in the IR a target-specific intrinsic specifying a fence.
2404 /// It is called by AtomicExpandPass before expanding an
2405 /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
2406 /// if shouldInsertFencesForAtomic returns true.
2407 ///
2408 /// Inst is the original atomic instruction, prior to other expansions that
2409 /// may be performed.
2410 ///
2411 /// This function should either return a nullptr, or a pointer to an IR-level
2412 /// Instruction*. Even complex fence sequences can be represented by a
2413 /// single Instruction* through an intrinsic to be lowered later.
2414 ///
2415 /// The default implementation emits an IR fence before any release (or
2416 /// stronger) operation that stores, and after any acquire (or stronger)
2417 /// operation. This is generally a correct implementation, but backends may
2418 /// override if they wish to use alternative schemes (e.g. the PowerPC
2419 /// standard ABI uses a fence before a seq_cst load instead of after a
2420 /// seq_cst store).
2421 /// @{
2422 virtual Instruction *emitLeadingFence(IRBuilderBase &Builder,
2423 Instruction *Inst,
2424 AtomicOrdering Ord) const;
2425
2426 virtual Instruction *emitTrailingFence(IRBuilderBase &Builder,
2427 Instruction *Inst,
2428 AtomicOrdering Ord) const;
2429 /// @}
2430
2431 // Emits code that executes when the comparison result in the ll/sc
2432 // expansion of a cmpxchg instruction is such that the store-conditional will
2433 // not execute. This makes it possible to balance out the load-linked with
2434 // a dedicated instruction, if desired.
2435 // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
2436 // be unnecessarily held, except if clrex, inserted by this hook, is executed.
2437 virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const {}
2438
2439 /// Returns true if arguments should be sign-extended in lib calls.
2440 virtual bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const {
2441 return IsSigned;
2442 }
2443
2444 /// Returns true if arguments should be extended in lib calls.
2445 virtual bool shouldExtendTypeInLibCall(EVT Type) const {
2446 return true;
2447 }
2448
2449 /// Returns how the given (atomic) load should be expanded by the
2450 /// IR-level AtomicExpand pass.
2454
2455 /// Returns how the given (atomic) load should be cast by the IR-level
2456 /// AtomicExpand pass.
2462
2463 /// Returns how the given (atomic) store should be expanded by the IR-level
2464 /// AtomicExpand pass into. For instance AtomicExpansionKind::CustomExpand
2465 /// will try to use an atomicrmw xchg.
2469
2470 /// Returns how the given (atomic) store should be cast by the IR-level
2471 /// AtomicExpand pass into. For instance AtomicExpansionKind::CastToInteger
2472 /// will try to cast the operands to integer values.
2474 if (SI->getValueOperand()->getType()->isFloatingPointTy())
2477 }
2478
2479 /// Returns how the given atomic cmpxchg should be expanded by the IR-level
2480 /// AtomicExpand pass.
2481 virtual AtomicExpansionKind
2485
2486 /// Returns how the IR-level AtomicExpand pass should expand the given
2487 /// AtomicRMW, if at all. Default is to never expand.
2488 virtual AtomicExpansionKind
2493
2494 /// Returns how the given atomic atomicrmw should be cast by the IR-level
2495 /// AtomicExpand pass.
2496 virtual AtomicExpansionKind
2505
2506 /// On some platforms, an AtomicRMW that never actually modifies the value
2507 /// (such as fetch_add of 0) can be turned into a fence followed by an
2508 /// atomic load. This may sound useless, but it makes it possible for the
2509 /// processor to keep the cacheline shared, dramatically improving
2510 /// performance. And such idempotent RMWs are useful for implementing some
2511 /// kinds of locks, see for example (justification + benchmarks):
2512 /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
2513 /// This method tries doing that transformation, returning the atomic load if
2514 /// it succeeds, and nullptr otherwise.
2515 /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
2516 /// another round of expansion.
2517 virtual LoadInst *
2519 return nullptr;
2520 }
2521
2522 /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
2523 /// SIGN_EXTEND, or ANY_EXTEND).
2525 return ISD::ZERO_EXTEND;
2526 }
2527
2528 /// Returns how the platform's atomic compare and swap expects its comparison
2529 /// value to be extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND). This is
2530 /// separate from getExtendForAtomicOps, which is concerned with the
2531 /// sign-extension of the instruction's output, whereas here we are concerned
2532 /// with the sign-extension of the input. For targets with compare-and-swap
2533 /// instructions (or sub-word comparisons in their LL/SC loop expansions),
2534 /// the input can be ANY_EXTEND, but the output will still have a specific
2535 /// extension.
2537 return ISD::ANY_EXTEND;
2538 }
2539
2540 /// Returns how the platform's atomic rmw operations expect their input
2541 /// argument to be extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND).
2543 return ISD::ANY_EXTEND;
2544 }
2545
2546 /// @}
2547
2548 /// Returns true if we should normalize
2549 /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
2550 /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
2551 /// that it saves us from materializing N0 and N1 in an integer register.
2552 /// Targets that are able to perform and/or on flags should return false here.
2554 EVT VT) const {
2555 // If a target has multiple condition registers, then it likely has logical
2556 // operations on those registers.
2558 return false;
2559 // Only do the transform if the value won't be split into multiple
2560 // registers.
2561 LegalizeTypeAction Action = getTypeAction(Context, VT);
2562 return Action != TypeExpandInteger && Action != TypeExpandFloat &&
2563 Action != TypeSplitVector;
2564 }
2565
2566 virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const { return true; }
2567
2568 /// Return true if a select of constants (select Cond, C1, C2) should be
2569 /// transformed into simple math ops with the condition value. For example:
2570 /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
2571 virtual bool convertSelectOfConstantsToMath(EVT VT) const {
2572 return false;
2573 }
2574
2575 /// Return true if it is profitable to transform an integer
2576 /// multiplication-by-constant into simpler operations like shifts and adds.
2577 /// This may be true if the target does not directly support the
2578 /// multiplication operation for the specified type or the sequence of simpler
2579 /// ops is faster than the multiply.
2581 EVT VT, SDValue C) const {
2582 return false;
2583 }
2584
2585 /// Return true if it may be profitable to transform
2586 /// (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2).
2587 /// This may not be true if c1 and c2 can be represented as immediates but
2588 /// c1*c2 cannot, for example.
2589 /// The target should check if c1, c2 and c1*c2 can be represented as
2590 /// immediates, or have to be materialized into registers. If it is not sure
2591 /// about some cases, a default true can be returned to let the DAGCombiner
2592 /// decide.
2593 /// AddNode is (add x, c1), and ConstNode is c2.
2595 SDValue ConstNode) const {
2596 return true;
2597 }
2598
2599 /// Return true if it is more correct/profitable to use strict FP_TO_INT
2600 /// conversion operations - canonicalizing the FP source value instead of
2601 /// converting all cases and then selecting based on value.
2602 /// This may be true if the target throws exceptions for out of bounds
2603 /// conversions or has fast FP CMOV.
2604 virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
2605 bool IsSigned) const {
2606 return false;
2607 }
2608
2609 /// Return true if it is beneficial to expand an @llvm.powi.* intrinsic.
2610 /// If not optimizing for size, expanding @llvm.powi.* intrinsics is always
2611 /// considered beneficial.
2612 /// If optimizing for size, expansion is only considered beneficial for upto
2613 /// 5 multiplies and a divide (if the exponent is negative).
2614 bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const {
2615 if (Exponent < 0)
2616 Exponent = -Exponent;
2617 uint64_t E = static_cast<uint64_t>(Exponent);
2618 return !OptForSize || (llvm::popcount(E) + Log2_64(E) < 7);
2619 }
2620
2621 //===--------------------------------------------------------------------===//
2622 // TargetLowering Configuration Methods - These methods should be invoked by
2623 // the derived class constructor to configure this object for the target.
2624 //
2625protected:
2626 /// Specify how the target extends the result of integer and floating point
2627 /// boolean values from i1 to a wider type. See getBooleanContents.
2629 BooleanContents = Ty;
2630 BooleanFloatContents = Ty;
2631 }
2632
2633 /// Specify how the target extends the result of integer and floating point
2634 /// boolean values from i1 to a wider type. See getBooleanContents.
2636 BooleanContents = IntTy;
2637 BooleanFloatContents = FloatTy;
2638 }
2639
2640 /// Specify how the target extends the result of a vector boolean value from a
2641 /// vector of i1 to a wider type. See getBooleanContents.
2643 BooleanVectorContents = Ty;
2644 }
2645
2646 /// Specify the target scheduling preference.
2648 SchedPreferenceInfo = Pref;
2649 }
2650
2651 /// Indicate the minimum number of blocks to generate jump tables.
2652 void setMinimumJumpTableEntries(unsigned Val);
2653
2654 /// Indicate the maximum number of entries in jump tables.
2655 /// Set to zero to generate unlimited jump tables.
2656 void setMaximumJumpTableSize(unsigned);
2657
2658 /// Set the minimum of largest of number of comparisons to generate BitTest.
2659 void setMinimumBitTestCmps(unsigned Val);
2660
2661 /// If set to a physical register, this specifies the register that
2662 /// llvm.savestack/llvm.restorestack should save and restore.
2664 StackPointerRegisterToSaveRestore = R;
2665 }
2666
2667 /// Tells the code generator that the target has BitExtract instructions.
2668 /// The code generator will aggressively sink "shift"s into the blocks of
2669 /// their users if the users will generate "and" instructions which can be
2670 /// combined with "shift" to BitExtract instructions.
2671 void setHasExtractBitsInsn(bool hasExtractInsn = true) {
2672 HasExtractBitsInsn = hasExtractInsn;
2673 }
2674
2675 /// Tells the code generator not to expand logic operations on comparison
2676 /// predicates into separate sequences that increase the amount of flow
2677 /// control.
2678 void setJumpIsExpensive(bool isExpensive = true);
2679
2680 /// Tells the code generator which bitwidths to bypass.
2681 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
2682 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
2683 }
2684
2685 /// Add the specified register class as an available regclass for the
2686 /// specified value type. This indicates the selector can handle values of
2687 /// that class natively.
2689 assert((unsigned)VT.SimpleTy < std::size(RegClassForVT));
2690 RegClassForVT[VT.SimpleTy] = RC;
2691 }
2692
2693 /// Return the largest legal super-reg register class of the register class
2694 /// for the specified type and its associated "cost".
2695 virtual std::pair<const TargetRegisterClass *, uint8_t>
2696 findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const;
2697
2698 /// Once all of the register classes are added, this allows us to compute
2699 /// derived properties we expose.
2700 void computeRegisterProperties(const TargetRegisterInfo *TRI);
2701
2702 /// Indicate that the specified operation does not work with the specified
2703 /// type and indicate what to do about it. Note that VT may refer to either
2704 /// the type of a result or that of an operand of Op.
2705 void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action) {
2706 assert(Op < std::size(OpActions[0]) && "Table isn't big enough!");
2707 OpActions[(unsigned)VT.SimpleTy][Op] = Action;
2708 }
2710 LegalizeAction Action) {
2711 for (auto Op : Ops)
2712 setOperationAction(Op, VT, Action);
2713 }
2715 LegalizeAction Action) {
2716 for (auto VT : VTs)
2717 setOperationAction(Ops, VT, Action);
2718 }
2719
2720 /// Indicate that the specified load with extension does not work with the
2721 /// specified type and indicate what to do about it.
2722 void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
2723 LegalizeAction Action) {
2724 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
2725 MemVT.isValid() && "Table isn't big enough!");
2726 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2727 unsigned Shift = 4 * ExtType;
2728 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
2729 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
2730 }
2731 void setLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT, MVT MemVT,
2732 LegalizeAction Action) {
2733 for (auto ExtType : ExtTypes)
2734 setLoadExtAction(ExtType, ValVT, MemVT, Action);
2735 }
2737 ArrayRef<MVT> MemVTs, LegalizeAction Action) {
2738 for (auto MemVT : MemVTs)
2739 setLoadExtAction(ExtTypes, ValVT, MemVT, Action);
2740 }
2741
2742 /// Let target indicate that an extending atomic load of the specified type
2743 /// is legal.
2744 void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
2745 LegalizeAction Action) {
2746 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
2747 MemVT.isValid() && "Table isn't big enough!");
2748 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2749 unsigned Shift = 4 * ExtType;
2750 AtomicLoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &=
2751 ~((uint16_t)0xF << Shift);
2752 AtomicLoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |=
2753 ((uint16_t)Action << Shift);
2754 }
2756 LegalizeAction Action) {
2757 for (auto ExtType : ExtTypes)
2758 setAtomicLoadExtAction(ExtType, ValVT, MemVT, Action);
2759 }
2761 ArrayRef<MVT> MemVTs, LegalizeAction Action) {
2762 for (auto MemVT : MemVTs)
2763 setAtomicLoadExtAction(ExtTypes, ValVT, MemVT, Action);
2764 }
2765
2766 /// Indicate that the specified truncating store does not work with the
2767 /// specified type and indicate what to do about it.
2768 void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action) {
2769 assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
2770 TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
2771 }
2772
2773 /// Indicate that the specified indexed load does or does not work with the
2774 /// specified type and indicate what to do abort it.
2775 ///
2776 /// NOTE: All indexed mode loads are initialized to Expand in
2777 /// TargetLowering.cpp
2779 LegalizeAction Action) {
2780 for (auto IdxMode : IdxModes)
2781 setIndexedModeAction(IdxMode, VT, IMAB_Load, Action);
2782 }
2783
2785 LegalizeAction Action) {
2786 for (auto VT : VTs)
2787 setIndexedLoadAction(IdxModes, VT, Action);
2788 }
2789
2790 /// Indicate that the specified indexed store does or does not work with the
2791 /// specified type and indicate what to do about it.
2792 ///
2793 /// NOTE: All indexed mode stores are initialized to Expand in
2794 /// TargetLowering.cpp
2796 LegalizeAction Action) {
2797 for (auto IdxMode : IdxModes)
2798 setIndexedModeAction(IdxMode, VT, IMAB_Store, Action);
2799 }
2800
2802 LegalizeAction Action) {
2803 for (auto VT : VTs)
2804 setIndexedStoreAction(IdxModes, VT, Action);
2805 }
2806
2807 /// Indicate that the specified indexed masked load does or does not work with
2808 /// the specified type and indicate what to do about it.
2809 ///
2810 /// NOTE: All indexed mode masked loads are initialized to Expand in
2811 /// TargetLowering.cpp
2812 void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT,
2813 LegalizeAction Action) {
2814 setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action);
2815 }
2816
2817 /// Indicate that the specified indexed masked store does or does not work
2818 /// with the specified type and indicate what to do about it.
2819 ///
2820 /// NOTE: All indexed mode masked stores are initialized to Expand in
2821 /// TargetLowering.cpp
2822 void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT,
2823 LegalizeAction Action) {
2824 setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action);
2825 }
2826
2827 /// Indicate that the specified condition code is or isn't supported on the
2828 /// target and indicate what to do about it.
2830 LegalizeAction Action) {
2831 for (auto CC : CCs) {
2832 assert(VT.isValid() && (unsigned)CC < std::size(CondCodeActions) &&
2833 "Table isn't big enough!");
2834 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2835 /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the
2836 /// 32-bit value and the upper 29 bits index into the second dimension of
2837 /// the array to select what 32-bit value to use.
2838 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
2839 CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
2840 CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
2841 }
2842 }
2844 LegalizeAction Action) {
2845 for (auto VT : VTs)
2846 setCondCodeAction(CCs, VT, Action);
2847 }
2848
2849 /// Indicate how a PARTIAL_REDUCE_U/SMLA node with Acc type AccVT and Input
2850 /// type InputVT should be treated by the target. Either it's legal, needs to
2851 /// be promoted to a larger size, needs to be expanded to some other code
2852 /// sequence, or the target has a custom expander for it.
2853 void setPartialReduceMLAAction(unsigned Opc, MVT AccVT, MVT InputVT,
2854 LegalizeAction Action) {
2857 assert(AccVT.isValid() && InputVT.isValid() &&
2858 "setPartialReduceMLAAction types aren't valid");
2859 PartialReduceActionTypes Key = {Opc, AccVT.SimpleTy, InputVT.SimpleTy};
2860 PartialReduceMLAActions[Key] = Action;
2861 }
2863 MVT InputVT, LegalizeAction Action) {
2864 for (unsigned Opc : Opcodes)
2865 setPartialReduceMLAAction(Opc, AccVT, InputVT, Action);
2866 }
2867
2868 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
2869 /// to trying a larger integer/fp until it can find one that works. If that
2870 /// default is insufficient, this method can be used by the target to override
2871 /// the default.
2872 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2873 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
2874 }
2875
2876 /// Convenience method to set an operation to Promote and specify the type
2877 /// in a single call.
2878 void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2879 setOperationAction(Opc, OrigVT, Promote);
2880 AddPromotedToType(Opc, OrigVT, DestVT);
2881 }
2883 MVT DestVT) {
2884 for (auto Op : Ops) {
2885 setOperationAction(Op, OrigVT, Promote);
2886 AddPromotedToType(Op, OrigVT, DestVT);
2887 }
2888 }
2889
2890 /// Targets should invoke this method for each target independent node that
2891 /// they want to provide a custom DAG combiner for by implementing the
2892 /// PerformDAGCombine virtual method.
2894 for (auto NT : NTs) {
2895 assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray));
2896 TargetDAGCombineArray[NT >> 3] |= 1 << (NT & 7);
2897 }
2898 }
2899
2900 /// Set the target's minimum function alignment.
2902 MinFunctionAlignment = Alignment;
2903 }
2904
2905 /// Set the target's preferred function alignment. This should be set if
2906 /// there is a performance benefit to higher-than-minimum alignment
2908 PrefFunctionAlignment = Alignment;
2909 }
2910
2911 /// Set the target's preferred loop alignment. Default alignment is one, it
2912 /// means the target does not care about loop alignment. The target may also
2913 /// override getPrefLoopAlignment to provide per-loop values.
2914 void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; }
2915 void setMaxBytesForAlignment(unsigned MaxBytes) {
2916 MaxBytesForAlignment = MaxBytes;
2917 }
2918
2919 /// Set the minimum stack alignment of an argument.
2921 MinStackArgumentAlignment = Alignment;
2922 }
2923
2924 /// Set the maximum atomic operation size supported by the
2925 /// backend. Atomic operations greater than this size (as well as
2926 /// ones that are not naturally aligned), will be expanded by
2927 /// AtomicExpandPass into an __atomic_* library call.
2928 void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
2929 MaxAtomicSizeInBitsSupported = SizeInBits;
2930 }
2931
2932 /// Set the size in bits of the maximum div/rem the backend supports.
2933 /// Larger operations will be expanded by ExpandIRInsts.
2934 void setMaxDivRemBitWidthSupported(unsigned SizeInBits) {
2935 MaxDivRemBitWidthSupported = SizeInBits;
2936 }
2937
2938 /// Set the size in bits of the maximum fp to/from int conversion the backend
2939 /// supports. Larger operations will be expanded by ExpandIRInsts.
2940 void setMaxLargeFPConvertBitWidthSupported(unsigned SizeInBits) {
2941 MaxLargeFPConvertBitWidthSupported = SizeInBits;
2942 }
2943
2944 /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
2945 void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
2946 MinCmpXchgSizeInBits = SizeInBits;
2947 }
2948
2949 /// Sets whether unaligned atomic operations are supported.
2950 void setSupportsUnalignedAtomics(bool UnalignedSupported) {
2951 SupportsUnalignedAtomics = UnalignedSupported;
2952 }
2953
2954public:
2955 //===--------------------------------------------------------------------===//
2956 // Addressing mode description hooks (used by LSR etc).
2957 //
2958
2959 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
2960 /// instructions reading the address. This allows as much computation as
2961 /// possible to be done in the address mode for that operand. This hook lets
2962 /// targets also pass back when this should be done on intrinsics which
2963 /// load/store.
2964 virtual bool getAddrModeArguments(const IntrinsicInst * /*I*/,
2965 SmallVectorImpl<Value *> & /*Ops*/,
2966 Type *& /*AccessTy*/) const {
2967 return false;
2968 }
2969
2970 /// This represents an addressing mode of:
2971 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*vscale
2972 /// If BaseGV is null, there is no BaseGV.
2973 /// If BaseOffs is zero, there is no base offset.
2974 /// If HasBaseReg is false, there is no base register.
2975 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
2976 /// no scale.
2977 /// If ScalableOffset is zero, there is no scalable offset.
2978 struct AddrMode {
2980 int64_t BaseOffs = 0;
2981 bool HasBaseReg = false;
2982 int64_t Scale = 0;
2983 int64_t ScalableOffset = 0;
2984 AddrMode() = default;
2985 };
2986
2987 /// Return true if the addressing mode represented by AM is legal for this
2988 /// target, for a load/store of the specified type.
2989 ///
2990 /// The type may be VoidTy, in which case only return true if the addressing
2991 /// mode is legal for a load/store of any legal type. TODO: Handle
2992 /// pre/postinc as well.
2993 ///
2994 /// If the address space cannot be determined, it will be -1.
2995 ///
2996 /// TODO: Remove default argument
2997 virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
2998 Type *Ty, unsigned AddrSpace,
2999 Instruction *I = nullptr) const;
3000
3001 /// Returns true if the targets addressing mode can target thread local
3002 /// storage (TLS).
3003 virtual bool addressingModeSupportsTLS(const GlobalValue &) const {
3004 return false;
3005 }
3006
3007 /// Return the prefered common base offset.
3008 virtual int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset,
3009 int64_t MaxOffset) const {
3010 return 0;
3011 }
3012
3013 /// Return true if the specified immediate is legal icmp immediate, that is
3014 /// the target has icmp instructions which can compare a register against the
3015 /// immediate without having to materialize the immediate into a register.
3016 virtual bool isLegalICmpImmediate(int64_t) const {
3017 return true;
3018 }
3019
3020 /// Return true if the specified immediate is legal add immediate, that is the
3021 /// target has add instructions which can add a register with the immediate
3022 /// without having to materialize the immediate into a register.
3023 virtual bool isLegalAddImmediate(int64_t) const {
3024 return true;
3025 }
3026
3027 /// Return true if adding the specified scalable immediate is legal, that is
3028 /// the target has add instructions which can add a register with the
3029 /// immediate (multiplied by vscale) without having to materialize the
3030 /// immediate into a register.
3031 virtual bool isLegalAddScalableImmediate(int64_t) const { return false; }
3032
3033 /// Return true if the specified immediate is legal for the value input of a
3034 /// store instruction.
3035 virtual bool isLegalStoreImmediate(int64_t Value) const {
3036 // Default implementation assumes that at least 0 works since it is likely
3037 // that a zero register exists or a zero immediate is allowed.
3038 return Value == 0;
3039 }
3040
3041 /// Given a shuffle vector SVI representing a vector splat, return a new
3042 /// scalar type of size equal to SVI's scalar type if the new type is more
3043 /// profitable. Returns nullptr otherwise. For example under MVE float splats
3044 /// are converted to integer to prevent the need to move from SPR to GPR
3045 /// registers.
3047 return nullptr;
3048 }
3049
3050 /// Given a set in interconnected phis of type 'From' that are loaded/stored
3051 /// or bitcast to type 'To', return true if the set should be converted to
3052 /// 'To'.
3053 virtual bool shouldConvertPhiType(Type *From, Type *To) const {
3054 return (From->isIntegerTy() || From->isFloatingPointTy()) &&
3055 (To->isIntegerTy() || To->isFloatingPointTy());
3056 }
3057
3058 /// Returns true if the opcode is a commutative binary operation.
3059 virtual bool isCommutativeBinOp(unsigned Opcode) const {
3060 // FIXME: This should get its info from the td file.
3061 switch (Opcode) {
3062 case ISD::ADD:
3063 case ISD::SMIN:
3064 case ISD::SMAX:
3065 case ISD::UMIN:
3066 case ISD::UMAX:
3067 case ISD::MUL:
3068 case ISD::CLMUL:
3069 case ISD::CLMULH:
3070 case ISD::CLMULR:
3071 case ISD::MULHU:
3072 case ISD::MULHS:
3073 case ISD::SMUL_LOHI:
3074 case ISD::UMUL_LOHI:
3075 case ISD::FADD:
3076 case ISD::FMUL:
3077 case ISD::AND:
3078 case ISD::OR:
3079 case ISD::XOR:
3080 case ISD::SADDO:
3081 case ISD::UADDO:
3082 case ISD::ADDC:
3083 case ISD::ADDE:
3084 case ISD::SADDSAT:
3085 case ISD::UADDSAT:
3086 case ISD::FMINNUM:
3087 case ISD::FMAXNUM:
3088 case ISD::FMINNUM_IEEE:
3089 case ISD::FMAXNUM_IEEE:
3090 case ISD::FMINIMUM:
3091 case ISD::FMAXIMUM:
3092 case ISD::FMINIMUMNUM:
3093 case ISD::FMAXIMUMNUM:
3094 case ISD::AVGFLOORS:
3095 case ISD::AVGFLOORU:
3096 case ISD::AVGCEILS:
3097 case ISD::AVGCEILU:
3098 case ISD::ABDS:
3099 case ISD::ABDU:
3100 return true;
3101 default: return false;
3102 }
3103 }
3104
3105 /// Return true if the node is a math/logic binary operator.
3106 virtual bool isBinOp(unsigned Opcode) const {
3107 // A commutative binop must be a binop.
3108 if (isCommutativeBinOp(Opcode))
3109 return true;
3110 // These are non-commutative binops.
3111 switch (Opcode) {
3112 case ISD::SUB:
3113 case ISD::SHL:
3114 case ISD::SRL:
3115 case ISD::SRA:
3116 case ISD::ROTL:
3117 case ISD::ROTR:
3118 case ISD::SDIV:
3119 case ISD::UDIV:
3120 case ISD::SREM:
3121 case ISD::UREM:
3122 case ISD::SSUBSAT:
3123 case ISD::USUBSAT:
3124 case ISD::FSUB:
3125 case ISD::FDIV:
3126 case ISD::FREM:
3127 return true;
3128 default:
3129 return false;
3130 }
3131 }
3132
3133 /// Return true if it's free to truncate a value of type FromTy to type
3134 /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
3135 /// by referencing its sub-register AX.
3136 /// Targets must return false when FromTy <= ToTy.
3137 virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
3138 return false;
3139 }
3140
3141 /// Return true if a truncation from FromTy to ToTy is permitted when deciding
3142 /// whether a call is in tail position. Typically this means that both results
3143 /// would be assigned to the same register or stack slot, but it could mean
3144 /// the target performs adequate checks of its own before proceeding with the
3145 /// tail call. Targets must return false when FromTy <= ToTy.
3146 virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
3147 return false;
3148 }
3149
3150 virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const { return false; }
3151 virtual bool isTruncateFree(LLT FromTy, LLT ToTy, LLVMContext &Ctx) const {
3152 return isTruncateFree(getApproximateEVTForLLT(FromTy, Ctx),
3153 getApproximateEVTForLLT(ToTy, Ctx));
3154 }
3155
3156 /// Return true if truncating the specific node Val to type VT2 is free.
3157 virtual bool isTruncateFree(SDValue Val, EVT VT2) const {
3158 // Fallback to type matching.
3159 return isTruncateFree(Val.getValueType(), VT2);
3160 }
3161
3162 virtual bool isProfitableToHoist(Instruction *I) const { return true; }
3163
3164 /// Return true if the extension represented by \p I is free.
3165 /// Unlikely the is[Z|FP]ExtFree family which is based on types,
3166 /// this method can use the context provided by \p I to decide
3167 /// whether or not \p I is free.
3168 /// This method extends the behavior of the is[Z|FP]ExtFree family.
3169 /// In other words, if is[Z|FP]Free returns true, then this method
3170 /// returns true as well. The converse is not true.
3171 /// The target can perform the adequate checks by overriding isExtFreeImpl.
3172 /// \pre \p I must be a sign, zero, or fp extension.
3173 bool isExtFree(const Instruction *I) const {
3174 switch (I->getOpcode()) {
3175 case Instruction::FPExt:
3176 if (isFPExtFree(EVT::getEVT(I->getType()),
3177 EVT::getEVT(I->getOperand(0)->getType())))
3178 return true;
3179 break;
3180 case Instruction::ZExt:
3181 if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
3182 return true;
3183 break;
3184 case Instruction::SExt:
3185 break;
3186 default:
3187 llvm_unreachable("Instruction is not an extension");
3188 }
3189 return isExtFreeImpl(I);
3190 }
3191
3192 /// Return true if \p Load and \p Ext can form an ExtLoad.
3193 /// For example, in AArch64
3194 /// %L = load i8, i8* %ptr
3195 /// %E = zext i8 %L to i32
3196 /// can be lowered into one load instruction
3197 /// ldrb w0, [x0]
3198 bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
3199 const DataLayout &DL) const {
3200 EVT VT = getValueType(DL, Ext->getType());
3201 EVT LoadVT = getValueType(DL, Load->getType());
3202
3203 // If the load has other users and the truncate is not free, the ext
3204 // probably isn't free.
3205 if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
3206 !isTruncateFree(Ext->getType(), Load->getType()))
3207 return false;
3208
3209 // Check whether the target supports casts folded into loads.
3210 unsigned LType;
3211 if (isa<ZExtInst>(Ext))
3212 LType = ISD::ZEXTLOAD;
3213 else {
3214 assert(isa<SExtInst>(Ext) && "Unexpected ext type!");
3215 LType = ISD::SEXTLOAD;
3216 }
3217
3218 return isLoadLegal(VT, LoadVT, Load->getAlign(),
3219 Load->getPointerAddressSpace(), LType, false);
3220 }
3221
3222 /// Return true if any actual instruction that defines a value of type FromTy
3223 /// implicitly zero-extends the value to ToTy in the result register.
3224 ///
3225 /// The function should return true when it is likely that the truncate can
3226 /// be freely folded with an instruction defining a value of FromTy. If
3227 /// the defining instruction is unknown (because you're looking at a
3228 /// function argument, PHI, etc.) then the target may require an
3229 /// explicit truncate, which is not necessarily free, but this function
3230 /// does not deal with those cases.
3231 /// Targets must return false when FromTy >= ToTy.
3232 virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
3233 return false;
3234 }
3235
3236 virtual bool isZExtFree(EVT FromTy, EVT ToTy) const { return false; }
3237 virtual bool isZExtFree(LLT FromTy, LLT ToTy, LLVMContext &Ctx) const {
3238 return isZExtFree(getApproximateEVTForLLT(FromTy, Ctx),
3239 getApproximateEVTForLLT(ToTy, Ctx));
3240 }
3241
3242 /// Return true if zero-extending the specific node Val to type VT2 is free
3243 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
3244 /// because it's folded such as X86 zero-extending loads).
3245 virtual bool isZExtFree(SDValue Val, EVT VT2) const {
3246 return isZExtFree(Val.getValueType(), VT2);
3247 }
3248
3249 /// Return true if sign-extension from FromTy to ToTy is cheaper than
3250 /// zero-extension.
3251 virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const {
3252 return false;
3253 }
3254
3255 /// Return true if this constant should be sign extended when promoting to
3256 /// a larger type.
3257 virtual bool signExtendConstant(const ConstantInt *C) const { return false; }
3258
3259 /// Try to optimize extending or truncating conversion instructions (like
3260 /// zext, trunc, fptoui, uitofp) for the target.
3261 virtual bool
3263 const TargetTransformInfo &TTI) const {
3264 return false;
3265 }
3266
3267 /// Return true if the target supplies and combines to a paired load
3268 /// two loaded values of type LoadedType next to each other in memory.
3269 /// RequiredAlignment gives the minimal alignment constraints that must be met
3270 /// to be able to select this paired load.
3271 ///
3272 /// This information is *not* used to generate actual paired loads, but it is
3273 /// used to generate a sequence of loads that is easier to combine into a
3274 /// paired load.
3275 /// For instance, something like this:
3276 /// a = load i64* addr
3277 /// b = trunc i64 a to i32
3278 /// c = lshr i64 a, 32
3279 /// d = trunc i64 c to i32
3280 /// will be optimized into:
3281 /// b = load i32* addr1
3282 /// d = load i32* addr2
3283 /// Where addr1 = addr2 +/- sizeof(i32).
3284 ///
3285 /// In other words, unless the target performs a post-isel load combining,
3286 /// this information should not be provided because it will generate more
3287 /// loads.
3288 virtual bool hasPairedLoad(EVT /*LoadedType*/,
3289 Align & /*RequiredAlignment*/) const {
3290 return false;
3291 }
3292
3293 /// Return true if the target has a vector blend instruction.
3294 virtual bool hasVectorBlend() const { return false; }
3295
3296 /// Get the maximum supported factor for interleaved memory accesses.
3297 /// Default to be the minimum interleave factor: 2.
3298 virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
3299
3300 /// Lower an interleaved load to target specific intrinsics. Return
3301 /// true on success.
3302 ///
3303 /// \p Load is the vector load instruction. Can be either a plain load
3304 /// instruction or a vp.load intrinsic.
3305 /// \p Mask is a per-segment (i.e. number of lanes equal to that of one
3306 /// component being interwoven) mask. Can be nullptr, in which case the
3307 /// result is uncondiitional.
3308 /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
3309 /// \p Indices is the corresponding indices for each shufflevector.
3310 /// \p Factor is the interleave factor.
3311 /// \p GapMask is a mask with zeros for components / fields that may not be
3312 /// accessed.
3313 virtual bool lowerInterleavedLoad(Instruction *Load, Value *Mask,
3315 ArrayRef<unsigned> Indices, unsigned Factor,
3316 const APInt &GapMask) const {
3317 return false;
3318 }
3319
3320 /// Lower an interleaved store to target specific intrinsics. Return
3321 /// true on success.
3322 ///
3323 /// \p SI is the vector store instruction. Can be either a plain store
3324 /// or a vp.store.
3325 /// \p Mask is a per-segment (i.e. number of lanes equal to that of one
3326 /// component being interwoven) mask. Can be nullptr, in which case the
3327 /// result is unconditional.
3328 /// \p SVI is the shufflevector to RE-interleave the stored vector.
3329 /// \p Factor is the interleave factor.
3330 /// \p GapMask is a mask with zeros for components / fields that may not be
3331 /// accessed.
3332 virtual bool lowerInterleavedStore(Instruction *Store, Value *Mask,
3333 ShuffleVectorInst *SVI, unsigned Factor,
3334 const APInt &GapMask) const {
3335 return false;
3336 }
3337
3338 /// Lower a deinterleave intrinsic to a target specific load intrinsic.
3339 /// Return true on success. Currently only supports
3340 /// llvm.vector.deinterleave{2,3,5,7}
3341 ///
3342 /// \p Load is the accompanying load instruction. Can be either a plain load
3343 /// instruction or a vp.load intrinsic.
3344 /// \p DI represents the deinterleaveN intrinsic.
3345 /// \p GapMask is a mask with zeros for components / fields that may not be
3346 /// accessed.
3348 IntrinsicInst *DI,
3349 const APInt &GapMask) const {
3350 return false;
3351 }
3352
3353 /// Lower an interleave intrinsic to a target specific store intrinsic.
3354 /// Return true on success. Currently only supports
3355 /// llvm.vector.interleave{2,3,5,7}
3356 ///
3357 /// \p Store is the accompanying store instruction. Can be either a plain
3358 /// store or a vp.store intrinsic.
3359 /// \p Mask is a per-segment (i.e. number of lanes equal to that of one
3360 /// component being interwoven) mask. Can be nullptr, in which case the
3361 /// result is uncondiitional.
3362 /// \p InterleaveValues contains the interleaved values.
3363 virtual bool
3365 ArrayRef<Value *> InterleaveValues) const {
3366 return false;
3367 }
3368
3369 /// Return true if an fpext operation is free (for instance, because
3370 /// single-precision floating-point numbers are implicitly extended to
3371 /// double-precision).
3372 virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const {
3373 assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() &&
3374 "invalid fpext types");
3375 return false;
3376 }
3377
3378 /// Return true if an fpext operation input to an \p Opcode operation is free
3379 /// (for instance, because half-precision floating-point numbers are
3380 /// implicitly extended to float-precision) for an FMA instruction.
3381 virtual bool isFPExtFoldable(const MachineInstr &MI, unsigned Opcode,
3382 LLT DestTy, LLT SrcTy) const {
3383 return false;
3384 }
3385
3386 /// Return true if an fpext operation input to an \p Opcode operation is free
3387 /// (for instance, because half-precision floating-point numbers are
3388 /// implicitly extended to float-precision) for an FMA instruction.
3389 virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode,
3390 EVT DestVT, EVT SrcVT) const {
3391 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
3392 "invalid fpext types");
3393 return isFPExtFree(DestVT, SrcVT);
3394 }
3395
3396 /// Return true if folding a vector load into ExtVal (a sign, zero, or any
3397 /// extend node) is profitable.
3398 virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
3399
3400 /// Return true if an fneg operation is free to the point where it is never
3401 /// worthwhile to replace it with a bitwise operation.
3402 virtual bool isFNegFree(EVT VT) const {
3403 assert(VT.isFloatingPoint());
3404 return false;
3405 }
3406
3407 /// Return true if an fabs operation is free to the point where it is never
3408 /// worthwhile to replace it with a bitwise operation.
3409 virtual bool isFAbsFree(EVT VT) const {
3410 assert(VT.isFloatingPoint());
3411 return false;
3412 }
3413
3414 /// Return true if an FMA operation is faster than a pair of fmul and fadd
3415 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
3416 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
3417 ///
3418 /// NOTE: This may be called before legalization on types for which FMAs are
3419 /// not legal, but should return true if those types will eventually legalize
3420 /// to types that support FMAs. After legalization, it will only be called on
3421 /// types that support FMAs (via Legal or Custom actions)
3422 ///
3423 /// Targets that care about soft float support should return false when soft
3424 /// float code is being generated (i.e. use-soft-float).
3426 EVT) const {
3427 return false;
3428 }
3429
3430 /// Return true if an FMA operation is faster than a pair of fmul and fadd
3431 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
3432 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
3433 ///
3434 /// NOTE: This may be called before legalization on types for which FMAs are
3435 /// not legal, but should return true if those types will eventually legalize
3436 /// to types that support FMAs. After legalization, it will only be called on
3437 /// types that support FMAs (via Legal or Custom actions)
3439 LLT) const {
3440 return false;
3441 }
3442
3443 /// IR version
3444 virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const {
3445 return false;
3446 }
3447
3448 /// Returns true if \p MI can be combined with another instruction to
3449 /// form TargetOpcode::G_FMAD. \p N may be an TargetOpcode::G_FADD,
3450 /// TargetOpcode::G_FSUB, or an TargetOpcode::G_FMUL which will be
3451 /// distributed into an fadd/fsub.
3452 virtual bool isFMADLegal(const MachineInstr &MI, LLT Ty) const {
3453 assert((MI.getOpcode() == TargetOpcode::G_FADD ||
3454 MI.getOpcode() == TargetOpcode::G_FSUB ||
3455 MI.getOpcode() == TargetOpcode::G_FMUL) &&
3456 "unexpected node in FMAD forming combine");
3457 switch (Ty.getScalarSizeInBits()) {
3458 case 16:
3459 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f16);
3460 case 32:
3461 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f32);
3462 case 64:
3463 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f64);
3464 default:
3465 break;
3466 }
3467
3468 return false;
3469 }
3470
3471 /// Returns true if be combined with to form an ISD::FMAD. \p N may be an
3472 /// ISD::FADD, ISD::FSUB, or an ISD::FMUL which will be distributed into an
3473 /// fadd/fsub.
3474 virtual bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const {
3475 assert((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB ||
3476 N->getOpcode() == ISD::FMUL) &&
3477 "unexpected node in FMAD forming combine");
3478 return isOperationLegal(ISD::FMAD, N->getValueType(0));
3479 }
3480
3481 // Return true when the decision to generate FMA's (or FMS, FMLA etc) rather
3482 // than FMUL and ADD is delegated to the machine combiner.
3484 CodeGenOptLevel OptLevel) const {
3485 return false;
3486 }
3487
3488 /// Return true if it's profitable to narrow operations of type SrcVT to
3489 /// DestVT. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
3490 /// i32 to i16.
3491 virtual bool isNarrowingProfitable(SDNode *N, EVT SrcVT, EVT DestVT) const {
3492 return false;
3493 }
3494
3495 /// Return true if pulling a binary operation into a select with an identity
3496 /// constant is profitable. This is the inverse of an IR transform.
3497 /// Example: X + (Cond ? Y : 0) --> Cond ? (X + Y) : X
3498 virtual bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT,
3499 unsigned SelectOpcode,
3500 SDValue X,
3501 SDValue Y) const {
3502 return false;
3503 }
3504
3505 /// Return true if it is beneficial to convert a load of a constant to
3506 /// just the constant itself.
3507 /// On some targets it might be more efficient to use a combination of
3508 /// arithmetic instructions to materialize the constant instead of loading it
3509 /// from a constant pool.
3511 Type *Ty) const {
3512 return false;
3513 }
3514
3515 /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type
3516 /// from this source type with this index. This is needed because
3517 /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of
3518 /// the first element, and only the target knows which lowering is cheap.
3519 virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
3520 unsigned Index) const {
3521 return false;
3522 }
3523
3524 /// Try to convert an extract element of a vector binary operation into an
3525 /// extract element followed by a scalar operation.
3526 virtual bool shouldScalarizeBinop(SDValue VecOp) const {
3527 return false;
3528 }
3529
3530 /// Return true if extraction of a scalar element from the given vector type
3531 /// at the given index is cheap. For example, if scalar operations occur on
3532 /// the same register file as vector operations, then an extract element may
3533 /// be a sub-register rename rather than an actual instruction.
3534 virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const {
3535 return false;
3536 }
3537
3538 /// Try to convert math with an overflow comparison into the corresponding DAG
3539 /// node operation. Targets may want to override this independently of whether
3540 /// the operation is legal/custom for the given type because it may obscure
3541 /// matching of other patterns.
3542 virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
3543 bool MathUsed) const {
3544 // Form it if it is legal.
3545 if (isOperationLegal(Opcode, VT))
3546 return true;
3547
3548 // TODO: The default logic is inherited from code in CodeGenPrepare.
3549 // The opcode should not make a difference by default?
3550 if (Opcode != ISD::UADDO)
3551 return false;
3552
3553 // Allow the transform as long as we have an integer type that is not
3554 // obviously illegal and unsupported and if the math result is used
3555 // besides the overflow check. On some targets (e.g. SPARC), it is
3556 // not profitable to form on overflow op if the math result has no
3557 // concrete users.
3558 if (VT.isVector())
3559 return false;
3560 return MathUsed && (VT.isSimple() || !isOperationExpand(Opcode, VT));
3561 }
3562
3563 // Return true if the target wants to optimize the mul overflow intrinsic
3564 // for the given \p VT.
3566 EVT VT) const {
3567 return false;
3568 }
3569
3570 // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
3571 // even if the vector itself has multiple uses.
3572 virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
3573 return false;
3574 }
3575
3576 // Return true if CodeGenPrepare should consider splitting large offset of a
3577 // GEP to make the GEP fit into the addressing mode and can be sunk into the
3578 // same blocks of its users.
3579 virtual bool shouldConsiderGEPOffsetSplit() const { return false; }
3580
3581 /// Return true if creating a shift of the type by the given
3582 /// amount is not profitable.
3583 virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const {
3584 return false;
3585 }
3586
3587 // Should we fold (select_cc seteq (and x, y), 0, 0, A) -> (and (sra (shl x))
3588 // A) where y has a single bit set?
3590 const APInt &AndMask) const {
3591 unsigned ShCt = AndMask.getBitWidth() - 1;
3592 return !shouldAvoidTransformToShift(VT, ShCt);
3593 }
3594
3595 /// Does this target require the clearing of high-order bits in a register
3596 /// passed to the fp16 to fp conversion library function.
3597 virtual bool shouldKeepZExtForFP16Conv() const { return false; }
3598
3599 /// Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT
3600 /// from min(max(fptoi)) saturation patterns.
3601 virtual bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const {
3602 return isOperationLegalOrCustom(Op, VT);
3603 }
3604
3605 /// Should we prefer selects to doing arithmetic on boolean types
3607 return false;
3608 }
3609
3610 /// True if target has some particular form of dealing with pointer arithmetic
3611 /// semantics for pointers with the given value type. False if pointer
3612 /// arithmetic should not be preserved for passes such as instruction
3613 /// selection, and can fallback to regular arithmetic.
3614 /// This should be removed when PTRADD nodes are widely supported by backends.
3615 virtual bool shouldPreservePtrArith(const Function &F, EVT PtrVT) const {
3616 return false;
3617 }
3618
3619 /// True if the target allows transformations of in-bounds pointer
3620 /// arithmetic that cause out-of-bounds intermediate results.
3622 EVT PtrVT) const {
3623 return false;
3624 }
3625
3626 /// Does this target support complex deinterleaving
3627 virtual bool isComplexDeinterleavingSupported() const { return false; }
3628
3629 /// Does this target support complex deinterleaving with the given operation
3630 /// and type
3633 return false;
3634 }
3635
3636 // Get the preferred opcode for FP_TO_XINT nodes.
3637 // By default, this checks if the provded operation is an illegal FP_TO_UINT
3638 // and if so, checks if FP_TO_SINT is legal or custom for use as a
3639 // replacement. If both UINT and SINT conversions are Custom, we choose SINT
3640 // by default because that's the right thing on PPC.
3641 virtual unsigned getPreferredFPToIntOpcode(unsigned Op, EVT FromVT,
3642 EVT ToVT) const {
3643 if (isOperationLegal(Op, ToVT))
3644 return Op;
3645 switch (Op) {
3646 case ISD::FP_TO_UINT:
3648 return ISD::FP_TO_SINT;
3649 break;
3653 break;
3654 case ISD::VP_FP_TO_UINT:
3655 if (isOperationLegalOrCustom(ISD::VP_FP_TO_SINT, ToVT))
3656 return ISD::VP_FP_TO_SINT;
3657 break;
3658 default:
3659 break;
3660 }
3661 return Op;
3662 }
3663
3664 /// Create the IR node for the given complex deinterleaving operation.
3665 /// If one cannot be created using all the given inputs, nullptr should be
3666 /// returned.
3669 ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
3670 Value *Accumulator = nullptr) const {
3671 return nullptr;
3672 }
3673
3675 return RuntimeLibcallInfo;
3676 }
3677
3678 const LibcallLoweringInfo &getLibcallLoweringInfo() const { return Libcalls; }
3679
3680 void setLibcallImpl(RTLIB::Libcall Call, RTLIB::LibcallImpl Impl) {
3681 Libcalls.setLibcallImpl(Call, Impl);
3682 }
3683
3684 /// Get the libcall impl routine name for the specified libcall.
3685 RTLIB::LibcallImpl getLibcallImpl(RTLIB::Libcall Call) const {
3686 return Libcalls.getLibcallImpl(Call);
3687 }
3688
3689 /// Get the libcall routine name for the specified libcall.
3690 // FIXME: This should be removed. Only LibcallImpl should have a name.
3691 const char *getLibcallName(RTLIB::Libcall Call) const {
3692 return Libcalls.getLibcallName(Call);
3693 }
3694
3695 /// Get the libcall routine name for the specified libcall implementation
3699
3700 RTLIB::LibcallImpl getMemcpyImpl() const { return Libcalls.getMemcpyImpl(); }
3701
3702 /// Check if this is valid libcall for the current module, otherwise
3703 /// RTLIB::Unsupported.
3704 RTLIB::LibcallImpl getSupportedLibcallImpl(StringRef FuncName) const {
3705 return RuntimeLibcallInfo.getSupportedLibcallImpl(FuncName);
3706 }
3707
3708 /// Get the comparison predicate that's to be used to test the result of the
3709 /// comparison libcall against zero. This should only be used with
3710 /// floating-point compare libcalls.
3711 ISD::CondCode getSoftFloatCmpLibcallPredicate(RTLIB::LibcallImpl Call) const;
3712
3713 /// Get the CallingConv that should be used for the specified libcall
3714 /// implementation.
3716 return Libcalls.getLibcallImplCallingConv(Call);
3717 }
3718
3719 /// Get the CallingConv that should be used for the specified libcall.
3720 // FIXME: Remove this wrapper and directly use the used LibcallImpl
3722 return Libcalls.getLibcallCallingConv(Call);
3723 }
3724
3725 /// Execute target specific actions to finalize target lowering.
3726 /// This is used to set extra flags in MachineFrameInformation and freezing
3727 /// the set of reserved registers.
3728 /// The default implementation just freezes the set of reserved registers.
3729 virtual void finalizeLowering(MachineFunction &MF) const;
3730
3731 /// Returns true if it's profitable to allow merging store of loads when there
3732 /// are functions calls between the load and the store.
3733 virtual bool shouldMergeStoreOfLoadsOverCall(EVT, EVT) const { return true; }
3734
3735 //===----------------------------------------------------------------------===//
3736 // GlobalISel Hooks
3737 //===----------------------------------------------------------------------===//
3738 /// Check whether or not \p MI needs to be moved close to its uses.
3739 virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const;
3740
3741
3742private:
3743 const TargetMachine &TM;
3744
3745 /// Tells the code generator that the target has BitExtract instructions.
3746 /// The code generator will aggressively sink "shift"s into the blocks of
3747 /// their users if the users will generate "and" instructions which can be
3748 /// combined with "shift" to BitExtract instructions.
3749 bool HasExtractBitsInsn;
3750
3751 /// Tells the code generator to bypass slow divide or remainder
3752 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
3753 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
3754 /// div/rem when the operands are positive and less than 256.
3755 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
3756
3757 /// Tells the code generator that it shouldn't generate extra flow control
3758 /// instructions and should attempt to combine flow control instructions via
3759 /// predication.
3760 bool JumpIsExpensive;
3761
3762 /// Information about the contents of the high-bits in boolean values held in
3763 /// a type wider than i1. See getBooleanContents.
3764 BooleanContent BooleanContents;
3765
3766 /// Information about the contents of the high-bits in boolean values held in
3767 /// a type wider than i1. See getBooleanContents.
3768 BooleanContent BooleanFloatContents;
3769
3770 /// Information about the contents of the high-bits in boolean vector values
3771 /// when the element type is wider than i1. See getBooleanContents.
3772 BooleanContent BooleanVectorContents;
3773
3774 /// The target scheduling preference: shortest possible total cycles or lowest
3775 /// register usage.
3776 Sched::Preference SchedPreferenceInfo;
3777
3778 /// The minimum alignment that any argument on the stack needs to have.
3779 Align MinStackArgumentAlignment;
3780
3781 /// The minimum function alignment (used when optimizing for size, and to
3782 /// prevent explicitly provided alignment from leading to incorrect code).
3783 Align MinFunctionAlignment;
3784
3785 /// The preferred function alignment (used when alignment unspecified and
3786 /// optimizing for speed).
3787 Align PrefFunctionAlignment;
3788
3789 /// The preferred loop alignment (in log2 bot in bytes).
3790 Align PrefLoopAlignment;
3791 /// The maximum amount of bytes permitted to be emitted for alignment.
3792 unsigned MaxBytesForAlignment;
3793
3794 /// Size in bits of the maximum atomics size the backend supports.
3795 /// Accesses larger than this will be expanded by AtomicExpandPass.
3796 unsigned MaxAtomicSizeInBitsSupported;
3797
3798 /// Size in bits of the maximum div/rem size the backend supports.
3799 /// Larger operations will be expanded by ExpandIRInsts.
3800 unsigned MaxDivRemBitWidthSupported;
3801
3802 /// Size in bits of the maximum fp to/from int conversion size the
3803 /// backend supports. Larger operations will be expanded by
3804 /// ExpandIRInsts.
3805 unsigned MaxLargeFPConvertBitWidthSupported;
3806
3807 /// Size in bits of the minimum cmpxchg or ll/sc operation the
3808 /// backend supports.
3809 unsigned MinCmpXchgSizeInBits;
3810
3811 /// The minimum of largest number of comparisons to use bit test for switch.
3812 unsigned MinimumBitTestCmps;
3813
3814 /// Maximum known-legal store size, which can be guaranteed for scalable
3815 /// vectors.
3816 unsigned MaximumLegalStoreInBits;
3817
3818 /// This indicates if the target supports unaligned atomic operations.
3819 bool SupportsUnalignedAtomics;
3820
3821 /// If set to a physical register, this specifies the register that
3822 /// llvm.savestack/llvm.restorestack should save and restore.
3823 Register StackPointerRegisterToSaveRestore;
3824
3825 /// This indicates the default register class to use for each ValueType the
3826 /// target supports natively.
3827 const TargetRegisterClass *RegClassForVT[MVT::VALUETYPE_SIZE];
3828 uint16_t NumRegistersForVT[MVT::VALUETYPE_SIZE];
3829 MVT RegisterTypeForVT[MVT::VALUETYPE_SIZE];
3830
3831 /// This indicates the "representative" register class to use for each
3832 /// ValueType the target supports natively. This information is used by the
3833 /// scheduler to track register pressure. By default, the representative
3834 /// register class is the largest legal super-reg register class of the
3835 /// register class of the specified type. e.g. On x86, i8, i16, and i32's
3836 /// representative class would be GR32.
3837 const TargetRegisterClass *RepRegClassForVT[MVT::VALUETYPE_SIZE] = {nullptr};
3838
3839 /// This indicates the "cost" of the "representative" register class for each
3840 /// ValueType. The cost is used by the scheduler to approximate register
3841 /// pressure.
3842 uint8_t RepRegClassCostForVT[MVT::VALUETYPE_SIZE];
3843
3844 /// For any value types we are promoting or expanding, this contains the value
3845 /// type that we are changing to. For Expanded types, this contains one step
3846 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
3847 /// (e.g. i64 -> i16). For types natively supported by the system, this holds
3848 /// the same type (e.g. i32 -> i32).
3849 MVT TransformToType[MVT::VALUETYPE_SIZE];
3850
3851 /// For each operation and each value type, keep a LegalizeAction that
3852 /// indicates how instruction selection should deal with the operation. Most
3853 /// operations are Legal (aka, supported natively by the target), but
3854 /// operations that are not should be described. Note that operations on
3855 /// non-legal value types are not described here.
3856 LegalizeAction OpActions[MVT::VALUETYPE_SIZE][ISD::BUILTIN_OP_END];
3857
3858 /// For each load extension type and each value type, keep a LegalizeAction
3859 /// that indicates how instruction selection should deal with a load of a
3860 /// specific value type and extension type. Uses 4-bits to store the action
3861 /// for each of the 4 load ext types.
3862 uint16_t LoadExtActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE];
3863
3864 /// Similar to LoadExtActions, but for atomic loads. Only Legal or Expand
3865 /// (default) values are supported.
3866 uint16_t AtomicLoadExtActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE];
3867
3868 /// For each value type pair keep a LegalizeAction that indicates whether a
3869 /// truncating store of a specific value type and truncating type is legal.
3870 LegalizeAction TruncStoreActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE];
3871
3872 /// For each indexed mode and each value type, keep a quad of LegalizeAction
3873 /// that indicates how instruction selection should deal with the load /
3874 /// store / maskedload / maskedstore.
3875 ///
3876 /// The first dimension is the value_type for the reference. The second
3877 /// dimension represents the various modes for load store.
3878 uint16_t IndexedModeActions[MVT::VALUETYPE_SIZE][ISD::LAST_INDEXED_MODE];
3879
3880 /// For each condition code (ISD::CondCode) keep a LegalizeAction that
3881 /// indicates how instruction selection should deal with the condition code.
3882 ///
3883 /// Because each CC action takes up 4 bits, we need to have the array size be
3884 /// large enough to fit all of the value types. This can be done by rounding
3885 /// up the MVT::VALUETYPE_SIZE value to the next multiple of 8.
3886 uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::VALUETYPE_SIZE + 7) / 8];
3887
3888 using PartialReduceActionTypes =
3889 std::tuple<unsigned, MVT::SimpleValueType, MVT::SimpleValueType>;
3890 /// For each partial reduce opcode, result type and input type combination,
3891 /// keep a LegalizeAction which indicates how instruction selection should
3892 /// deal with this operation.
3893 DenseMap<PartialReduceActionTypes, LegalizeAction> PartialReduceMLAActions;
3894
3895 ValueTypeActionImpl ValueTypeActions;
3896
3897private:
3898 /// Targets can specify ISD nodes that they would like PerformDAGCombine
3899 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
3900 /// array.
3901 unsigned char
3902 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
3903
3904 /// For operations that must be promoted to a specific type, this holds the
3905 /// destination type. This map should be sparse, so don't hold it as an
3906 /// array.
3907 ///
3908 /// Targets add entries to this map with AddPromotedToType(..), clients access
3909 /// this with getTypeToPromoteTo(..).
3910 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
3911 PromoteToType;
3912
3913 /// FIXME: This should not live here; it should come from an analysis.
3914 const RTLIB::RuntimeLibcallsInfo RuntimeLibcallInfo;
3915
3916 /// The list of libcalls that the target will use.
3917 /// FIXME: This should not live here; it should come from an analysis.
3918 LibcallLoweringInfo Libcalls;
3919
3920 /// The bits of IndexedModeActions used to store the legalisation actions
3921 /// We store the data as | ML | MS | L | S | each taking 4 bits.
3922 enum IndexedModeActionsBits {
3923 IMAB_Store = 0,
3924 IMAB_Load = 4,
3925 IMAB_MaskedStore = 8,
3926 IMAB_MaskedLoad = 12
3927 };
3928
3929 void setIndexedModeAction(unsigned IdxMode, MVT VT, unsigned Shift,
3930 LegalizeAction Action) {
3931 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
3932 (unsigned)Action < 0xf && "Table isn't big enough!");
3933 unsigned Ty = (unsigned)VT.SimpleTy;
3934 IndexedModeActions[Ty][IdxMode] &= ~(0xf << Shift);
3935 IndexedModeActions[Ty][IdxMode] |= ((uint16_t)Action) << Shift;
3936 }
3937
3938 LegalizeAction getIndexedModeAction(unsigned IdxMode, MVT VT,
3939 unsigned Shift) const {
3940 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
3941 "Table isn't big enough!");
3942 unsigned Ty = (unsigned)VT.SimpleTy;
3943 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] >> Shift) & 0xf);
3944 }
3945
3946protected:
3947 /// Return true if the extension represented by \p I is free.
3948 /// \pre \p I is a sign, zero, or fp extension and
3949 /// is[Z|FP]ExtFree of the related types is not true.
3950 virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
3951
3952 /// Depth that GatherAllAliases should continue looking for chain
3953 /// dependencies when trying to find a more preferable chain. As an
3954 /// approximation, this should be more than the number of consecutive stores
3955 /// expected to be merged.
3957
3958 /// \brief Specify maximum number of store instructions per memset call.
3959 ///
3960 /// When lowering \@llvm.memset this field specifies the maximum number of
3961 /// store operations that may be substituted for the call to memset. Targets
3962 /// must set this value based on the cost threshold for that target. Targets
3963 /// should assume that the memset will be done using as many of the largest
3964 /// store operations first, followed by smaller ones, if necessary, per
3965 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
3966 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
3967 /// store. This only applies to setting a constant array of a constant size.
3969 /// Likewise for functions with the OptSize attribute.
3971
3972 /// \brief Specify maximum number of store instructions per memcpy call.
3973 ///
3974 /// When lowering \@llvm.memcpy this field specifies the maximum number of
3975 /// store operations that may be substituted for a call to memcpy. Targets
3976 /// must set this value based on the cost threshold for that target. Targets
3977 /// should assume that the memcpy will be done using as many of the largest
3978 /// store operations first, followed by smaller ones, if necessary, per
3979 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
3980 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
3981 /// and one 1-byte store. This only applies to copying a constant array of
3982 /// constant size.
3984 /// Likewise for functions with the OptSize attribute.
3986 /// \brief Specify max number of store instructions to glue in inlined memcpy.
3987 ///
3988 /// When memcpy is inlined based on MaxStoresPerMemcpy, specify maximum number
3989 /// of store instructions to keep together. This helps in pairing and
3990 // vectorization later on.
3992
3993 /// \brief Specify maximum number of load instructions per memcmp call.
3994 ///
3995 /// When lowering \@llvm.memcmp this field specifies the maximum number of
3996 /// pairs of load operations that may be substituted for a call to memcmp.
3997 /// Targets must set this value based on the cost threshold for that target.
3998 /// Targets should assume that the memcmp will be done using as many of the
3999 /// largest load operations first, followed by smaller ones, if necessary, per
4000 /// alignment restrictions. For example, loading 7 bytes on a 32-bit machine
4001 /// with 32-bit alignment would result in one 4-byte load, a one 2-byte load
4002 /// and one 1-byte load. This only applies to copying a constant array of
4003 /// constant size.
4005 /// Likewise for functions with the OptSize attribute.
4007
4008 /// \brief Specify maximum number of store instructions per memmove call.
4009 ///
4010 /// When lowering \@llvm.memmove this field specifies the maximum number of
4011 /// store instructions that may be substituted for a call to memmove. Targets
4012 /// must set this value based on the cost threshold for that target. Targets
4013 /// should assume that the memmove will be done using as many of the largest
4014 /// store operations first, followed by smaller ones, if necessary, per
4015 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
4016 /// with 8-bit alignment would result in nine 1-byte stores. This only
4017 /// applies to copying a constant array of constant size.
4019 /// Likewise for functions with the OptSize attribute.
4021
4022 /// Tells the code generator that select is more expensive than a branch if
4023 /// the branch is usually predicted right.
4025
4026 /// \see enableExtLdPromotion.
4028
4029 /// Return true if the value types that can be represented by the specified
4030 /// register class are all legal.
4031 bool isLegalRC(const TargetRegisterInfo &TRI,
4032 const TargetRegisterClass &RC) const;
4033
4034 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
4035 /// sequence of memory operands that is recognized by PrologEpilogInserter.
4037 MachineBasicBlock *MBB) const;
4038
4040};
4041
4042/// This class defines information used to lower LLVM code to legal SelectionDAG
4043/// operators that the target instruction selector can accept natively.
4044///
4045/// This class also defines callbacks that targets must implement to lower
4046/// target-specific constructs to SelectionDAG operators.
4048public:
4049 struct DAGCombinerInfo;
4050 struct MakeLibCallOptions;
4051
4054
4055 explicit TargetLowering(const TargetMachine &TM,
4056 const TargetSubtargetInfo &STI);
4058
4059 bool isPositionIndependent() const;
4060
4061 // If set to true, SelectionDAG nodes will be consistently processed in
4062 // topological order. This is a temporary hook until sorting can be
4063 // enabled globally.
4064 virtual bool useTopologicalSorting() const { return false; }
4065
4068 UniformityInfo *UA) const {
4069 return false;
4070 }
4071
4072 // Lets target to control the following reassociation of operands: (op (op x,
4073 // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By
4074 // default consider profitable any case where N0 has single use. This
4075 // behavior reflects the condition replaced by this target hook call in the
4076 // DAGCombiner. Any particular target can implement its own heuristic to
4077 // restrict common combiner.
4079 SDValue N1) const {
4080 return N0.hasOneUse();
4081 }
4082
4083 // Lets target to control the following reassociation of operands: (op (op x,
4084 // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By
4085 // default consider profitable any case where N0 has single use. This
4086 // behavior reflects the condition replaced by this target hook call in the
4087 // combiner. Any particular target can implement its own heuristic to
4088 // restrict common combiner.
4090 Register N1) const {
4091 return MRI.hasOneNonDBGUse(N0);
4092 }
4093
4094 virtual bool isSDNodeAlwaysUniform(const SDNode * N) const {
4095 return false;
4096 }
4097
4098 /// Returns true by value, base pointer and offset pointer and addressing mode
4099 /// by reference if the node's address can be legally represented as
4100 /// pre-indexed load / store address.
4101 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
4102 SDValue &/*Offset*/,
4103 ISD::MemIndexedMode &/*AM*/,
4104 SelectionDAG &/*DAG*/) const {
4105 return false;
4106 }
4107
4108 /// Returns true by value, base pointer and offset pointer and addressing mode
4109 /// by reference if this node can be combined with a load / store to form a
4110 /// post-indexed load / store.
4111 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
4112 SDValue &/*Base*/,
4113 SDValue &/*Offset*/,
4114 ISD::MemIndexedMode &/*AM*/,
4115 SelectionDAG &/*DAG*/) const {
4116 return false;
4117 }
4118
4119 /// Returns true if the specified base+offset is a legal indexed addressing
4120 /// mode for this target. \p MI is the load or store instruction that is being
4121 /// considered for transformation.
4123 bool IsPre, MachineRegisterInfo &MRI) const {
4124 return false;
4125 }
4126
4127 /// Return the entry encoding for a jump table in the current function. The
4128 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
4129 virtual unsigned getJumpTableEncoding() const;
4130
4131 virtual MVT getJumpTableRegTy(const DataLayout &DL) const {
4132 return getPointerTy(DL);
4133 }
4134
4135 virtual const MCExpr *
4137 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
4138 MCContext &/*Ctx*/) const {
4139 llvm_unreachable("Need to implement this hook if target has custom JTIs");
4140 }
4141
4142 /// Returns relocation base for the given PIC jumptable.
4143 virtual SDValue getPICJumpTableRelocBase(SDValue Table,
4144 SelectionDAG &DAG) const;
4145
4146 /// This returns the relocation base for the given PIC jumptable, the same as
4147 /// getPICJumpTableRelocBase, but as an MCExpr.
4148 virtual const MCExpr *
4149 getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
4150 unsigned JTI, MCContext &Ctx) const;
4151
4152 /// Return true if folding a constant offset with the given GlobalAddress is
4153 /// legal. It is frequently not legal in PIC relocation models.
4154 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
4155
4156 /// On x86, return true if the operand with index OpNo is a CALL or JUMP
4157 /// instruction, which can use either a memory constraint or an address
4158 /// constraint. -fasm-blocks "__asm call foo" lowers to
4159 /// call void asm sideeffect inteldialect "call ${0:P}", "*m..."
4160 ///
4161 /// This function is used by a hack to choose the address constraint,
4162 /// lowering to a direct call.
4163 virtual bool
4165 unsigned OpNo) const {
4166 return false;
4167 }
4168
4170 SDValue &Chain) const;
4171
4172 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
4173 SDValue &NewRHS, ISD::CondCode &CCCode,
4174 const SDLoc &DL, const SDValue OldLHS,
4175 const SDValue OldRHS) const;
4176
4177 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
4178 SDValue &NewRHS, ISD::CondCode &CCCode,
4179 const SDLoc &DL, const SDValue OldLHS,
4180 const SDValue OldRHS, SDValue &Chain,
4181 bool IsSignaling = false) const;
4182
4184 SDValue Chain, MachineMemOperand *MMO,
4185 SDValue &NewLoad, SDValue Ptr,
4186 SDValue PassThru, SDValue Mask) const {
4187 llvm_unreachable("Not Implemented");
4188 }
4189
4191 SDValue Chain, MachineMemOperand *MMO,
4192 SDValue Ptr, SDValue Val,
4193 SDValue Mask) const {
4194 llvm_unreachable("Not Implemented");
4195 }
4196
4197 /// Returns a pair of (return value, chain).
4198 /// It is an error to pass RTLIB::Unsupported as \p LibcallImpl
4199 std::pair<SDValue, SDValue>
4200 makeLibCall(SelectionDAG &DAG, RTLIB::LibcallImpl LibcallImpl, EVT RetVT,
4201 ArrayRef<SDValue> Ops, MakeLibCallOptions CallOptions,
4202 const SDLoc &dl, SDValue Chain = SDValue()) const;
4203
4204 /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
4205 std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
4206 EVT RetVT, ArrayRef<SDValue> Ops,
4207 MakeLibCallOptions CallOptions,
4208 const SDLoc &dl,
4209 SDValue Chain = SDValue()) const {
4210 return makeLibCall(DAG, getLibcallImpl(LC), RetVT, Ops, CallOptions, dl,
4211 Chain);
4212 }
4213
4214 /// Check whether parameters to a call that are passed in callee saved
4215 /// registers are the same as from the calling function. This needs to be
4216 /// checked for tail call eligibility.
4217 bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
4218 const uint32_t *CallerPreservedMask,
4219 const SmallVectorImpl<CCValAssign> &ArgLocs,
4220 const SmallVectorImpl<SDValue> &OutVals) const;
4221
4222 //===--------------------------------------------------------------------===//
4223 // TargetLowering Optimization Methods
4224 //
4225
4226 /// A convenience struct that encapsulates a DAG, and two SDValues for
4227 /// returning information from TargetLowering to its clients that want to
4228 /// combine.
4235
4237 bool LT, bool LO) :
4238 DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
4239
4240 bool LegalTypes() const { return LegalTys; }
4241 bool LegalOperations() const { return LegalOps; }
4242
4244 Old = O;
4245 New = N;
4246 return true;
4247 }
4248 };
4249
4250 /// Determines the optimal series of memory ops to replace the memset /
4251 /// memcpy. Return true if the number of memory ops is below the threshold
4252 /// (Limit). Note that this is always the case when Limit is ~0. It returns
4253 /// the types of the sequence of memory ops to perform memset / memcpy by
4254 /// reference. If LargestVT is non-null, the target may set it to the largest
4255 /// EVT that should be used for generating the memset value (e.g., for vector
4256 /// splats). If LargestVT is null or left unchanged, the caller will compute
4257 /// it from MemOps.
4258 virtual bool findOptimalMemOpLowering(LLVMContext &Context,
4259 std::vector<EVT> &MemOps,
4260 unsigned Limit, const MemOp &Op,
4261 unsigned DstAS, unsigned SrcAS,
4262 const AttributeList &FuncAttributes,
4263 EVT *LargestVT = nullptr) const;
4264
4265 /// Check to see if the specified operand of the specified instruction is a
4266 /// constant integer. If so, check to see if there are any bits set in the
4267 /// constant that are not demanded. If so, shrink the constant and return
4268 /// true.
4270 const APInt &DemandedElts,
4271 TargetLoweringOpt &TLO) const;
4272
4273 /// Helper wrapper around ShrinkDemandedConstant, demanding all elements.
4275 TargetLoweringOpt &TLO) const;
4276
4277 // Target hook to do target-specific const optimization, which is called by
4278 // ShrinkDemandedConstant. This function should return true if the target
4279 // doesn't want ShrinkDemandedConstant to further optimize the constant.
4281 const APInt &DemandedBits,
4282 const APInt &DemandedElts,
4283 TargetLoweringOpt &TLO) const {
4284 return false;
4285 }
4286
4287 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
4288 /// This uses isTruncateFree/isZExtFree and ANY_EXTEND for the widening cast,
4289 /// but it could be generalized for targets with other types of implicit
4290 /// widening casts.
4291 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth,
4292 const APInt &DemandedBits,
4293 TargetLoweringOpt &TLO) const;
4294
4295 /// Look at Op. At this point, we know that only the DemandedBits bits of the
4296 /// result of Op are ever used downstream. If we can use this information to
4297 /// simplify Op, create a new simplified DAG node and return true, returning
4298 /// the original and new nodes in Old and New. Otherwise, analyze the
4299 /// expression and return a mask of KnownOne and KnownZero bits for the
4300 /// expression (used to simplify the caller). The KnownZero/One bits may only
4301 /// be accurate for those bits in the Demanded masks.
4302 /// \p AssumeSingleUse When this parameter is true, this function will
4303 /// attempt to simplify \p Op even if there are multiple uses.
4304 /// Callers are responsible for correctly updating the DAG based on the
4305 /// results of this function, because simply replacing TLO.Old
4306 /// with TLO.New will be incorrect when this parameter is true and TLO.Old
4307 /// has multiple uses.
4308 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
4309 const APInt &DemandedElts, KnownBits &Known,
4310 TargetLoweringOpt &TLO, unsigned Depth = 0,
4311 bool AssumeSingleUse = false) const;
4312
4313 /// Helper wrapper around SimplifyDemandedBits, demanding all elements.
4314 /// Adds Op back to the worklist upon success.
4315 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
4316 KnownBits &Known, TargetLoweringOpt &TLO,
4317 unsigned Depth = 0,
4318 bool AssumeSingleUse = false) const;
4319
4320 /// Helper wrapper around SimplifyDemandedBits.
4321 /// Adds Op back to the worklist upon success.
4322 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
4323 DAGCombinerInfo &DCI) const;
4324
4325 /// Helper wrapper around SimplifyDemandedBits.
4326 /// Adds Op back to the worklist upon success.
4327 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
4328 const APInt &DemandedElts,
4329 DAGCombinerInfo &DCI) const;
4330
4331 /// More limited version of SimplifyDemandedBits that can be used to "look
4332 /// through" ops that don't contribute to the DemandedBits/DemandedElts -
4333 /// bitwise ops etc.
4334 SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits,
4335 const APInt &DemandedElts,
4336 SelectionDAG &DAG,
4337 unsigned Depth = 0) const;
4338
4339 /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
4340 /// elements.
4341 SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits,
4342 SelectionDAG &DAG,
4343 unsigned Depth = 0) const;
4344
4345 /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
4346 /// bits from only some vector elements.
4347 SDValue SimplifyMultipleUseDemandedVectorElts(SDValue Op,
4348 const APInt &DemandedElts,
4349 SelectionDAG &DAG,
4350 unsigned Depth = 0) const;
4351
4352 /// Look at Vector Op. At this point, we know that only the DemandedElts
4353 /// elements of the result of Op are ever used downstream. If we can use
4354 /// this information to simplify Op, create a new simplified DAG node and
4355 /// return true, storing the original and new nodes in TLO.
4356 /// Otherwise, analyze the expression and return a mask of KnownUndef and
4357 /// KnownZero elements for the expression (used to simplify the caller).
4358 /// The KnownUndef/Zero elements may only be accurate for those bits
4359 /// in the DemandedMask.
4360 /// \p AssumeSingleUse When this parameter is true, this function will
4361 /// attempt to simplify \p Op even if there are multiple uses.
4362 /// Callers are responsible for correctly updating the DAG based on the
4363 /// results of this function, because simply replacing TLO.Old
4364 /// with TLO.New will be incorrect when this parameter is true and TLO.Old
4365 /// has multiple uses.
4366 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask,
4367 APInt &KnownUndef, APInt &KnownZero,
4368 TargetLoweringOpt &TLO, unsigned Depth = 0,
4369 bool AssumeSingleUse = false) const;
4370
4371 /// Helper wrapper around SimplifyDemandedVectorElts.
4372 /// Adds Op back to the worklist upon success.
4373 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
4374 DAGCombinerInfo &DCI) const;
4375
4376 /// Return true if the target supports simplifying demanded vector elements by
4377 /// converting them to undefs.
4378 virtual bool
4380 const TargetLoweringOpt &TLO) const {
4381 return true;
4382 }
4383
4384 /// Determine which of the bits specified in Mask are known to be either zero
4385 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
4386 /// argument allows us to only collect the known bits that are shared by the
4387 /// requested vector elements.
4388 virtual void computeKnownBitsForTargetNode(const SDValue Op,
4389 KnownBits &Known,
4390 const APInt &DemandedElts,
4391 const SelectionDAG &DAG,
4392 unsigned Depth = 0) const;
4393
4394 /// Determine which of the bits specified in Mask are known to be either zero
4395 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
4396 /// argument allows us to only collect the known bits that are shared by the
4397 /// requested vector elements. This is for GISel.
4398 virtual void computeKnownBitsForTargetInstr(GISelValueTracking &Analysis,
4399 Register R, KnownBits &Known,
4400 const APInt &DemandedElts,
4401 const MachineRegisterInfo &MRI,
4402 unsigned Depth = 0) const;
4403
4404 virtual void computeKnownFPClassForTargetInstr(GISelValueTracking &Analysis,
4405 Register R,
4406 KnownFPClass &Known,
4407 const APInt &DemandedElts,
4408 const MachineRegisterInfo &MRI,
4409 unsigned Depth = 0) const;
4410
4411 /// Determine the known alignment for the pointer value \p R. This is can
4412 /// typically be inferred from the number of low known 0 bits. However, for a
4413 /// pointer with a non-integral address space, the alignment value may be
4414 /// independent from the known low bits.
4415 virtual Align computeKnownAlignForTargetInstr(GISelValueTracking &Analysis,
4416 Register R,
4417 const MachineRegisterInfo &MRI,
4418 unsigned Depth = 0) const;
4419
4420 /// Determine which of the bits of FrameIndex \p FIOp are known to be 0.
4421 /// Default implementation computes low bits based on alignment
4422 /// information. This should preserve known bits passed into it.
4423 virtual void computeKnownBitsForFrameIndex(int FIOp,
4424 KnownBits &Known,
4425 const MachineFunction &MF) const;
4426
4427 /// This method can be implemented by targets that want to expose additional
4428 /// information about sign bits to the DAG Combiner. The DemandedElts
4429 /// argument allows us to only collect the minimum sign bits that are shared
4430 /// by the requested vector elements.
4431 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
4432 const APInt &DemandedElts,
4433 const SelectionDAG &DAG,
4434 unsigned Depth = 0) const;
4435
4436 /// This method can be implemented by targets that want to expose additional
4437 /// information about sign bits to GlobalISel combiners. The DemandedElts
4438 /// argument allows us to only collect the minimum sign bits that are shared
4439 /// by the requested vector elements.
4440 virtual unsigned computeNumSignBitsForTargetInstr(
4441 GISelValueTracking &Analysis, Register R, const APInt &DemandedElts,
4442 const MachineRegisterInfo &MRI, unsigned Depth = 0) const;
4443
4444 /// Attempt to simplify any target nodes based on the demanded vector
4445 /// elements, returning true on success. Otherwise, analyze the expression and
4446 /// return a mask of KnownUndef and KnownZero elements for the expression
4447 /// (used to simplify the caller). The KnownUndef/Zero elements may only be
4448 /// accurate for those bits in the DemandedMask.
4449 virtual bool SimplifyDemandedVectorEltsForTargetNode(
4450 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef,
4451 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const;
4452
4453 /// Attempt to simplify any target nodes based on the demanded bits/elts,
4454 /// returning true on success. Otherwise, analyze the
4455 /// expression and return a mask of KnownOne and KnownZero bits for the
4456 /// expression (used to simplify the caller). The KnownZero/One bits may only
4457 /// be accurate for those bits in the Demanded masks.
4458 virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op,
4459 const APInt &DemandedBits,
4460 const APInt &DemandedElts,
4461 KnownBits &Known,
4462 TargetLoweringOpt &TLO,
4463 unsigned Depth = 0) const;
4464
4465 /// More limited version of SimplifyDemandedBits that can be used to "look
4466 /// through" ops that don't contribute to the DemandedBits/DemandedElts -
4467 /// bitwise ops etc.
4468 virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode(
4469 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
4470 SelectionDAG &DAG, unsigned Depth) const;
4471
4472 /// Return true if this function can prove that \p Op is never poison
4473 /// and, \p Kind can be used to track poison and/or undef bits. The
4474 /// DemandedElts argument limits the check to the requested vector elements.
4475 virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(
4476 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
4477 UndefPoisonKind Kind, unsigned Depth) const;
4478
4479 /// Return true if Op can create undef or poison from non-undef & non-poison
4480 /// operands. The DemandedElts argument limits the check to the requested
4481 /// vector elements.
4482 virtual bool canCreateUndefOrPoisonForTargetNode(
4483 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
4484 UndefPoisonKind Kind, bool ConsiderFlags, unsigned Depth) const;
4485
4486 /// Tries to build a legal vector shuffle using the provided parameters
4487 /// or equivalent variations. The Mask argument maybe be modified as the
4488 /// function tries different variations.
4489 /// Returns an empty SDValue if the operation fails.
4490 SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0,
4492 SelectionDAG &DAG) const;
4493
4494 /// This method returns the constant pool value that will be loaded by LD.
4495 /// NOTE: You must check for implicit extensions of the constant by LD.
4496 virtual const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const;
4497
4498 /// Determine floating-point class information for a target node. The
4499 /// DemandedElts argument allows us to only collect the known FP classes
4500 /// that are shared by the requested vector elements.
4501 virtual void computeKnownFPClassForTargetNode(const SDValue Op,
4502 KnownFPClass &Known,
4503 const APInt &DemandedElts,
4504 const SelectionDAG &DAG,
4505 unsigned Depth = 0) const;
4506
4507 /// If \p SNaN is false, \returns true if \p Op is known to never be any
4508 /// NaN. If \p sNaN is true, returns if \p Op is known to never be a signaling
4509 /// NaN.
4510 virtual bool isKnownNeverNaNForTargetNode(SDValue Op,
4511 const APInt &DemandedElts,
4512 const SelectionDAG &DAG,
4513 bool SNaN = false,
4514 unsigned Depth = 0) const;
4515
4516 /// Return true if vector \p Op has the same value across all \p DemandedElts,
4517 /// indicating any elements which may be undef in the output \p UndefElts.
4518 virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts,
4519 APInt &UndefElts,
4520 const SelectionDAG &DAG,
4521 unsigned Depth = 0) const;
4522
4523 /// Returns true if the given Opc is considered a canonical constant for the
4524 /// target, which should not be transformed back into a BUILD_VECTOR.
4526 return Op.getOpcode() == ISD::SPLAT_VECTOR ||
4527 Op.getOpcode() == ISD::SPLAT_VECTOR_PARTS;
4528 }
4529
4530 /// Return true if the given select/vselect should be considered canonical and
4531 /// not be transformed. Currently only used for "vselect (not Cond), N1, N2 ->
4532 /// vselect Cond, N2, N1".
4533 virtual bool isTargetCanonicalSelect(SDNode *N) const { return false; }
4534
4536 void *DC; // The DAG Combiner object.
4539
4540 public:
4542
4543 DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
4544 : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
4545
4546 bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
4548 bool isAfterLegalizeDAG() const { return Level >= AfterLegalizeDAG; }
4551
4552 LLVM_ABI void AddToWorklist(SDNode *N);
4553 LLVM_ABI SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To,
4554 bool AddTo = true);
4555 LLVM_ABI SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
4556 LLVM_ABI SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1,
4557 bool AddTo = true);
4558
4559 LLVM_ABI bool recursivelyDeleteUnusedNodes(SDNode *N);
4560
4561 LLVM_ABI void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
4562 };
4563
4564 /// Return if the N is a constant or constant vector equal to the true value
4565 /// from getBooleanContents().
4566 bool isConstTrueVal(SDValue N) const;
4567
4568 /// Return if the N is a constant or constant vector equal to the false value
4569 /// from getBooleanContents().
4570 bool isConstFalseVal(SDValue N) const;
4571
4572 /// Return if \p N is a True value when extended to \p VT.
4573 bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const;
4574
4575 /// Try to simplify a setcc built with the specified operands and cc. If it is
4576 /// unable to simplify it, return a null SDValue.
4577 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
4578 bool foldBooleans, DAGCombinerInfo &DCI,
4579 const SDLoc &dl) const;
4580
4581 // For targets which wrap address, unwrap for analysis.
4582 virtual SDValue unwrapAddress(SDValue N) const { return N; }
4583
4584 /// Returns true (and the GlobalValue and the offset) if the node is a
4585 /// GlobalAddress + offset.
4586 virtual bool
4587 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
4588
4589 /// This method will be invoked for all target nodes and for any
4590 /// target-independent nodes that the target has registered with invoke it
4591 /// for.
4592 ///
4593 /// The semantics are as follows:
4594 /// Return Value:
4595 /// SDValue.Val == 0 - No change was made
4596 /// SDValue.Val == N - N was replaced, is dead, and is already handled.
4597 /// otherwise - N should be replaced by the returned Operand.
4598 ///
4599 /// In addition, methods provided by DAGCombinerInfo may be used to perform
4600 /// more complex transformations.
4601 ///
4602 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
4603
4604 /// Return true if it is profitable to move this shift by a constant amount
4605 /// through its operand, adjusting any immediate operands as necessary to
4606 /// preserve semantics. This transformation may not be desirable if it
4607 /// disrupts a particularly auspicious target-specific tree (e.g. bitfield
4608 /// extraction in AArch64). By default, it returns true.
4609 ///
4610 /// @param N the shift node
4611 /// @param Level the current DAGCombine legalization level.
4613 CombineLevel Level) const {
4614 SDValue ShiftLHS = N->getOperand(0);
4615 if (!ShiftLHS->hasOneUse())
4616 return false;
4617 if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
4618 !ShiftLHS.getOperand(0)->hasOneUse())
4619 return false;
4620 return true;
4621 }
4622
4623 /// GlobalISel - return true if it is profitable to move this shift by a
4624 /// constant amount through its operand, adjusting any immediate operands as
4625 /// necessary to preserve semantics. This transformation may not be desirable
4626 /// if it disrupts a particularly auspicious target-specific tree (e.g.
4627 /// bitfield extraction in AArch64). By default, it returns true.
4628 ///
4629 /// @param MI the shift instruction
4630 /// @param IsAfterLegal true if running after legalization.
4632 bool IsAfterLegal) const {
4633 return true;
4634 }
4635
4636 /// GlobalISel - return true if it's profitable to perform the combine:
4637 /// shl ([sza]ext x), y => zext (shl x, y)
4638 virtual bool isDesirableToPullExtFromShl(const MachineInstr &MI) const {
4639 return true;
4640 }
4641
4642 // Return AndOrSETCCFoldKind::{AddAnd, ABS} if its desirable to try and
4643 // optimize LogicOp(SETCC0, SETCC1). An example (what is implemented as of
4644 // writing this) is:
4645 // With C as a power of 2 and C != 0 and C != INT_MIN:
4646 // AddAnd:
4647 // (icmp eq A, C) | (icmp eq A, -C)
4648 // -> (icmp eq and(add(A, C), ~(C + C)), 0)
4649 // (icmp ne A, C) & (icmp ne A, -C)w
4650 // -> (icmp ne and(add(A, C), ~(C + C)), 0)
4651 // ABS:
4652 // (icmp eq A, C) | (icmp eq A, -C)
4653 // -> (icmp eq Abs(A), C)
4654 // (icmp ne A, C) & (icmp ne A, -C)w
4655 // -> (icmp ne Abs(A), C)
4656 //
4657 // @param LogicOp the logic op
4658 // @param SETCC0 the first of the SETCC nodes
4659 // @param SETCC0 the second of the SETCC nodes
4661 const SDNode *LogicOp, const SDNode *SETCC0, const SDNode *SETCC1) const {
4663 }
4664
4665 /// Return true if it is profitable to combine an XOR of a logical shift
4666 /// to create a logical shift of NOT. This transformation may not be desirable
4667 /// if it disrupts a particularly auspicious target-specific tree (e.g.
4668 /// BIC on ARM/AArch64). By default, it returns true.
4669 virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const {
4670 return true;
4671 }
4672
4673 /// Return true if the target has native support for the specified value type
4674 /// and it is 'desirable' to use the type for the given node type. e.g. On x86
4675 /// i16 is legal, but undesirable since i16 instruction encodings are longer
4676 /// and some i16 instructions are slow.
4677 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
4678 // By default, assume all legal types are desirable.
4679 return isTypeLegal(VT);
4680 }
4681
4682 /// Return true if it is profitable for dag combiner to transform a floating
4683 /// point op of specified opcode to a equivalent op of an integer
4684 /// type. e.g. f32 load -> i32 load can be profitable on ARM.
4685 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
4686 EVT /*VT*/) const {
4687 return false;
4688 }
4689
4690 /// This method query the target whether it is beneficial for dag combiner to
4691 /// promote the specified node. If true, it should return the desired
4692 /// promotion type by reference.
4693 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
4694 return false;
4695 }
4696
4697 /// Return true if the target supports swifterror attribute. It optimizes
4698 /// loads and stores to reading and writing a specific register.
4699 virtual bool supportSwiftError() const {
4700 return false;
4701 }
4702
4703 /// Return true if the target supports that a subset of CSRs for the given
4704 /// machine function is handled explicitly via copies.
4705 virtual bool supportSplitCSR(MachineFunction *MF) const {
4706 return false;
4707 }
4708
4709 /// Return true if the target supports kcfi operand bundles.
4710 virtual bool supportKCFIBundles() const { return false; }
4711
4712 /// Return true if the target supports ptrauth operand bundles.
4713 virtual bool supportPtrAuthBundles() const { return false; }
4714
4715 /// Perform necessary initialization to handle a subset of CSRs explicitly
4716 /// via copies. This function is called at the beginning of instruction
4717 /// selection.
4718 virtual void initializeSplitCSR(MachineBasicBlock *Entry) const {
4719 llvm_unreachable("Not Implemented");
4720 }
4721
4722 /// Insert explicit copies in entry and exit blocks. We copy a subset of
4723 /// CSRs to virtual registers in the entry block, and copy them back to
4724 /// physical registers in the exit blocks. This function is called at the end
4725 /// of instruction selection.
4727 MachineBasicBlock *Entry,
4728 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
4729 llvm_unreachable("Not Implemented");
4730 }
4731
4732 /// Return the newly negated expression if the cost is not expensive and
4733 /// set the cost in \p Cost to indicate that if it is cheaper or neutral to
4734 /// do the negation.
4735 virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG,
4736 bool LegalOps, bool OptForSize,
4737 NegatibleCost &Cost,
4738 unsigned Depth = 0) const;
4739
4741 SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize,
4743 unsigned Depth = 0) const {
4745 SDValue Neg =
4746 getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth);
4747 if (!Neg)
4748 return SDValue();
4749
4750 if (Cost <= CostThreshold)
4751 return Neg;
4752
4753 // Remove the new created node to avoid the side effect to the DAG.
4754 if (Neg->use_empty())
4755 DAG.RemoveDeadNode(Neg.getNode());
4756 return SDValue();
4757 }
4758
4759 /// This is the helper function to return the newly negated expression only
4760 /// when the cost is cheaper.
4762 bool LegalOps, bool OptForSize,
4763 unsigned Depth = 0) const {
4764 return getCheaperOrNeutralNegatedExpression(Op, DAG, LegalOps, OptForSize,
4766 }
4767
4768 /// This is the helper function to return the newly negated expression if
4769 /// the cost is not expensive.
4771 bool OptForSize, unsigned Depth = 0) const {
4773 return getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth);
4774 }
4775
4776 //===--------------------------------------------------------------------===//
4777 // Lowering methods - These methods must be implemented by targets so that
4778 // the SelectionDAGBuilder code knows how to lower these.
4779 //
4780
4781 /// Target-specific splitting of values into parts that fit a register
4782 /// storing a legal type
4784 SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
4785 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
4786 return false;
4787 }
4788
4789 /// Target-specific combining of register parts into its original value
4790 virtual SDValue
4792 const SDValue *Parts, unsigned NumParts,
4793 MVT PartVT, EVT ValueVT,
4794 std::optional<CallingConv::ID> CC) const {
4795 return SDValue();
4796 }
4797
4798 /// This hook must be implemented to lower the incoming (formal) arguments,
4799 /// described by the Ins array, into the specified DAG. The implementation
4800 /// should fill in the InVals array with legal-type argument values, and
4801 /// return the resulting token chain value.
4803 SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
4804 const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/,
4805 SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const {
4806 llvm_unreachable("Not Implemented");
4807 }
4808
4809 /// Optional target hook to add target-specific actions when entering EH pad
4810 /// blocks. The implementation should return the resulting token chain value.
4811 virtual SDValue lowerEHPadEntry(SDValue Chain, const SDLoc &DL,
4812 SelectionDAG &DAG) const {
4813 return SDValue();
4814 }
4815
4816 virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC,
4817 ArgListTy &Args) const {}
4818
4819 /// This structure contains the information necessary for lowering
4820 /// pointer-authenticating indirect calls. It is equivalent to the "ptrauth"
4821 /// operand bundle found on the call instruction, if any.
4826
4827 /// This structure contains all information that is necessary for lowering
4828 /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
4829 /// needs to lower a call, and targets will see this struct in their LowerCall
4830 /// implementation.
4833 /// Original unlegalized return type.
4834 Type *OrigRetTy = nullptr;
4835 /// Same as OrigRetTy, or partially legalized for soft float libcalls.
4836 Type *RetTy = nullptr;
4837 bool RetSExt : 1;
4838 bool RetZExt : 1;
4839 bool IsVarArg : 1;
4840 bool IsInReg : 1;
4846 bool NoMerge : 1;
4847
4848 // IsTailCall should be modified by implementations of
4849 // TargetLowering::LowerCall that perform tail call conversions.
4850 bool IsTailCall = false;
4851
4852 // Is Call lowering done post SelectionDAG type legalization.
4854
4855 unsigned NumFixedArgs = -1;
4861 const CallBase *CB = nullptr;
4866 const ConstantInt *CFIType = nullptr;
4869
4870 std::optional<PtrAuthInfo> PAI;
4871
4877
4879 DL = dl;
4880 return *this;
4881 }
4882
4884 Chain = InChain;
4885 return *this;
4886 }
4887
4888 // setCallee with target/module-specific attributes
4890 SDValue Target, ArgListTy &&ArgsList) {
4891 return setLibCallee(CC, ResultType, ResultType, Target,
4892 std::move(ArgsList));
4893 }
4894
4896 Type *OrigResultType, SDValue Target,
4897 ArgListTy &&ArgsList) {
4898 OrigRetTy = OrigResultType;
4899 RetTy = ResultType;
4900 Callee = Target;
4901 CallConv = CC;
4902 NumFixedArgs = ArgsList.size();
4903 Args = std::move(ArgsList);
4904
4905 DAG.getTargetLoweringInfo().markLibCallAttributes(
4906 &(DAG.getMachineFunction()), CC, Args);
4907 return *this;
4908 }
4909
4911 SDValue Target, ArgListTy &&ArgsList,
4912 AttributeSet ResultAttrs = {}) {
4913 RetTy = OrigRetTy = ResultType;
4914 IsInReg = ResultAttrs.hasAttribute(Attribute::InReg);
4915 RetSExt = ResultAttrs.hasAttribute(Attribute::SExt);
4916 RetZExt = ResultAttrs.hasAttribute(Attribute::ZExt);
4917 NoMerge = ResultAttrs.hasAttribute(Attribute::NoMerge);
4918
4919 Callee = Target;
4920 CallConv = CC;
4921 NumFixedArgs = ArgsList.size();
4922 Args = std::move(ArgsList);
4923 return *this;
4924 }
4925
4927 SDValue Target, ArgListTy &&ArgsList,
4928 const CallBase &Call) {
4929 RetTy = OrigRetTy = ResultType;
4930
4931 IsInReg = Call.hasRetAttr(Attribute::InReg);
4933 Call.doesNotReturn() ||
4934 (!isa<InvokeInst>(Call) && isa<UnreachableInst>(Call.getNextNode()));
4935 IsVarArg = FTy->isVarArg();
4936 IsReturnValueUsed = !Call.use_empty();
4937 RetSExt = Call.hasRetAttr(Attribute::SExt);
4938 RetZExt = Call.hasRetAttr(Attribute::ZExt);
4939 NoMerge = Call.hasFnAttr(Attribute::NoMerge);
4940
4941 Callee = Target;
4942
4943 CallConv = Call.getCallingConv();
4944 NumFixedArgs = FTy->getNumParams();
4945 Args = std::move(ArgsList);
4946
4947 CB = &Call;
4948
4949 return *this;
4950 }
4951
4953 IsInReg = Value;
4954 return *this;
4955 }
4956
4959 return *this;
4960 }
4961
4963 IsVarArg = Value;
4964 return *this;
4965 }
4966
4968 IsTailCall = Value;
4969 return *this;
4970 }
4971
4974 return *this;
4975 }
4976
4979 return *this;
4980 }
4981
4983 RetSExt = Value;
4984 return *this;
4985 }
4986
4988 RetZExt = Value;
4989 return *this;
4990 }
4991
4994 return *this;
4995 }
4996
4999 return *this;
5000 }
5001
5003 PAI = Value;
5004 return *this;
5005 }
5006
5009 return *this;
5010 }
5011
5013 CFIType = Type;
5014 return *this;
5015 }
5016
5019 return *this;
5020 }
5021
5023 DeactivationSymbol = Sym;
5024 return *this;
5025 }
5026
5028 return Args;
5029 }
5030 };
5031
5032 /// This structure is used to pass arguments to makeLibCall function.
5034 // By passing type list before soften to makeLibCall, the target hook
5035 // shouldExtendTypeInLibCall can get the original type before soften.
5039
5040 bool IsSigned : 1;
5044 bool IsSoften : 1;
5045
5049
5051 IsSigned = Value;
5052 return *this;
5053 }
5054
5057 return *this;
5058 }
5059
5062 return *this;
5063 }
5064
5067 return *this;
5068 }
5069
5071 OpsVTBeforeSoften = OpsVT;
5072 RetVTBeforeSoften = RetVT;
5073 IsSoften = true;
5074 return *this;
5075 }
5076
5077 /// Override the argument type for an operand. Leave the type as null to use
5078 /// the type from the operand's node.
5080 OpsTypeOverrides = OpsTypes;
5081 return *this;
5082 }
5083 };
5084
5085 /// This function lowers an abstract call to a function into an actual call.
5086 /// This returns a pair of operands. The first element is the return value
5087 /// for the function (if RetTy is not VoidTy). The second element is the
5088 /// outgoing token chain. It calls LowerCall to do the actual lowering.
5089 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
5090
5091 /// This hook must be implemented to lower calls into the specified
5092 /// DAG. The outgoing arguments to the call are described by the Outs array,
5093 /// and the values to be returned by the call are described by the Ins
5094 /// array. The implementation should fill in the InVals array with legal-type
5095 /// return values from the call, and return the resulting token chain value.
5096 virtual SDValue
5098 SmallVectorImpl<SDValue> &/*InVals*/) const {
5099 llvm_unreachable("Not Implemented");
5100 }
5101
5102 /// Target-specific cleanup for formal ByVal parameters.
5103 virtual void HandleByVal(CCState *, unsigned &, Align) const {}
5104
5105 /// This hook should be implemented to check whether the return values
5106 /// described by the Outs array can fit into the return registers. If false
5107 /// is returned, an sret-demotion is performed.
5108 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
5109 MachineFunction &/*MF*/, bool /*isVarArg*/,
5110 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
5111 LLVMContext &/*Context*/, const Type *RetTy) const
5112 {
5113 // Return true by default to get preexisting behavior.
5114 return true;
5115 }
5116
5117 /// This hook must be implemented to lower outgoing return values, described
5118 /// by the Outs array, into the specified DAG. The implementation should
5119 /// return the resulting token chain value.
5120 virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
5121 bool /*isVarArg*/,
5122 const SmallVectorImpl<ISD::OutputArg> & /*Outs*/,
5123 const SmallVectorImpl<SDValue> & /*OutVals*/,
5124 const SDLoc & /*dl*/,
5125 SelectionDAG & /*DAG*/) const {
5126 llvm_unreachable("Not Implemented");
5127 }
5128
5129 /// Return true if result of the specified node is used by a return node
5130 /// only. It also compute and return the input chain for the tail call.
5131 ///
5132 /// This is used to determine whether it is possible to codegen a libcall as
5133 /// tail call at legalization time.
5134 virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
5135 return false;
5136 }
5137
5138 /// Return true if the target may be able emit the call instruction as a tail
5139 /// call. This is used by optimization passes to determine if it's profitable
5140 /// to duplicate return instructions to enable tailcall optimization.
5141 virtual bool mayBeEmittedAsTailCall(const CallInst *) const {
5142 return false;
5143 }
5144
5145 /// Return the register ID of the name passed in. Used by named register
5146 /// global variables extension. There is no target-independent behaviour
5147 /// so the default action is to bail.
5148 virtual Register getRegisterByName(const char* RegName, LLT Ty,
5149 const MachineFunction &MF) const {
5150 report_fatal_error("Named registers not implemented for this target");
5151 }
5152
5153 /// Return the type that should be used to zero or sign extend a
5154 /// zeroext/signext integer return value. FIXME: Some C calling conventions
5155 /// require the return type to be promoted, but this is not true all the time,
5156 /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling
5157 /// conventions. The frontend should handle this and include all of the
5158 /// necessary information.
5160 ISD::NodeType /*ExtendKind*/) const {
5161 EVT MinVT = getRegisterType(MVT::i32);
5162 return VT.bitsLT(MinVT) ? MinVT : VT;
5163 }
5164
5165 /// For some targets, an LLVM struct type must be broken down into multiple
5166 /// simple types, but the calling convention specifies that the entire struct
5167 /// must be passed in a block of consecutive registers.
5168 virtual bool
5170 bool isVarArg,
5171 const DataLayout &DL) const {
5172 return false;
5173 }
5174
5175 /// For most targets, an LLVM type must be broken down into multiple
5176 /// smaller types. Usually the halves are ordered according to the endianness
5177 /// but for some platform that would break. So this method will default to
5178 /// matching the endianness but can be overridden.
5179 virtual bool
5181 return DL.isLittleEndian();
5182 }
5183
5184 /// Returns a 0 terminated array of registers that can be safely used as
5185 /// scratch registers.
5187 return nullptr;
5188 }
5189
5190 /// Returns a 0 terminated array of rounding control registers that can be
5191 /// attached into strict FP call.
5195
5196 /// This callback is used to prepare for a volatile or atomic load.
5197 /// It takes a chain node as input and returns the chain for the load itself.
5198 ///
5199 /// Having a callback like this is necessary for targets like SystemZ,
5200 /// which allows a CPU to reuse the result of a previous load indefinitely,
5201 /// even if a cache-coherent store is performed by another CPU. The default
5202 /// implementation does nothing.
5204 SelectionDAG &DAG) const {
5205 return Chain;
5206 }
5207
5208 /// This callback is invoked by the type legalizer to legalize nodes with an
5209 /// illegal operand type but legal result types. It replaces the
5210 /// LowerOperation callback in the type Legalizer. The reason we can not do
5211 /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
5212 /// use this callback.
5213 ///
5214 /// TODO: Consider merging with ReplaceNodeResults.
5215 ///
5216 /// The target places new result values for the node in Results (their number
5217 /// and types must exactly match those of the original return values of
5218 /// the node), or leaves Results empty, which indicates that the node is not
5219 /// to be custom lowered after all.
5220 /// The default implementation calls LowerOperation.
5221 virtual void LowerOperationWrapper(SDNode *N,
5223 SelectionDAG &DAG) const;
5224
5225 /// This callback is invoked for operations that are unsupported by the
5226 /// target, which are registered to use 'custom' lowering, and whose defined
5227 /// values are all legal. If the target has no operations that require custom
5228 /// lowering, it need not implement this. The default implementation of this
5229 /// aborts.
5230 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
5231
5232 /// This callback is invoked when a node result type is illegal for the
5233 /// target, and the operation was registered to use 'custom' lowering for that
5234 /// result type. The target places new result values for the node in Results
5235 /// (their number and types must exactly match those of the original return
5236 /// values of the node), or leaves Results empty, which indicates that the
5237 /// node is not to be custom lowered after all.
5238 ///
5239 /// If the target has no operations that require custom lowering, it need not
5240 /// implement this. The default implementation aborts.
5241 virtual void ReplaceNodeResults(SDNode * /*N*/,
5242 SmallVectorImpl<SDValue> &/*Results*/,
5243 SelectionDAG &/*DAG*/) const {
5244 llvm_unreachable("ReplaceNodeResults not implemented for this target!");
5245 }
5246
5247 /// This method returns the name of a target specific DAG node.
5248 virtual const char *getTargetNodeName(unsigned Opcode) const;
5249
5250 /// This method returns a target specific FastISel object, or null if the
5251 /// target does not support "fast" ISel.
5253 const TargetLibraryInfo *,
5254 const LibcallLoweringInfo *) const {
5255 return nullptr;
5256 }
5257
5258 //===--------------------------------------------------------------------===//
5259 // Inline Asm Support hooks
5260 //
5261
5263 C_Register, // Constraint represents specific register(s).
5264 C_RegisterClass, // Constraint represents any of register(s) in class.
5265 C_Memory, // Memory constraint.
5266 C_Address, // Address constraint.
5267 C_Immediate, // Requires an immediate.
5268 C_Other, // Something else.
5269 C_Unknown // Unsupported constraint.
5270 };
5271
5273 // Generic weights.
5274 CW_Invalid = -1, // No match.
5275 CW_Okay = 0, // Acceptable.
5276 CW_Good = 1, // Good weight.
5277 CW_Better = 2, // Better weight.
5278 CW_Best = 3, // Best weight.
5279
5280 // Well-known weights.
5281 CW_SpecificReg = CW_Okay, // Specific register operands.
5282 CW_Register = CW_Good, // Register operands.
5283 CW_Memory = CW_Better, // Memory operands.
5284 CW_Constant = CW_Best, // Constant operand.
5285 CW_Default = CW_Okay // Default or don't know type.
5286 };
5287
5288 /// This contains information for each constraint that we are lowering.
5290 /// This contains the actual string for the code, like "m". TargetLowering
5291 /// picks the 'best' code from ConstraintInfo::Codes that most closely
5292 /// matches the operand.
5293 std::string ConstraintCode;
5294
5295 /// Information about the constraint code, e.g. Register, RegisterClass,
5296 /// Memory, Other, Unknown.
5298
5299 /// If this is the result output operand or a clobber, this is null,
5300 /// otherwise it is the incoming operand to the CallInst. This gets
5301 /// modified as the asm is processed.
5303
5304 /// The ValueType for the operand value.
5305 MVT ConstraintVT = MVT::Other;
5306
5307 /// Copy constructor for copying from a ConstraintInfo.
5310
5311 /// Return true of this is an input operand that is a matching constraint
5312 /// like "4".
5313 LLVM_ABI bool isMatchingInputConstraint() const;
5314
5315 /// If this is an input matching constraint, this method returns the output
5316 /// operand it matches.
5317 LLVM_ABI unsigned getMatchedOperand() const;
5318 };
5319
5320 using AsmOperandInfoVector = std::vector<AsmOperandInfo>;
5321
5322 /// Split up the constraint string from the inline assembly value into the
5323 /// specific constraints and their prefixes, and also tie in the associated
5324 /// operand values. If this returns an empty vector, and if the constraint
5325 /// string itself isn't empty, there was an error parsing.
5327 const TargetRegisterInfo *TRI,
5328 const CallBase &Call) const;
5329
5330 /// Examine constraint type and operand type and determine a weight value.
5331 /// The operand object must already have been set up with the operand type.
5333 AsmOperandInfo &info, int maIndex) const;
5334
5335 /// Examine constraint string and operand type and determine a weight value.
5336 /// The operand object must already have been set up with the operand type.
5338 AsmOperandInfo &info, const char *constraint) const;
5339
5340 /// Determines the constraint code and constraint type to use for the specific
5341 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
5342 /// If the actual operand being passed in is available, it can be passed in as
5343 /// Op, otherwise an empty SDValue can be passed.
5344 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
5345 SDValue Op,
5346 SelectionDAG *DAG = nullptr) const;
5347
5348 /// Given a constraint, return the type of constraint it is for this target.
5349 virtual ConstraintType getConstraintType(StringRef Constraint) const;
5350
5351 using ConstraintPair = std::pair<StringRef, TargetLowering::ConstraintType>;
5353 /// Given an OpInfo with list of constraints codes as strings, return a
5354 /// sorted Vector of pairs of constraint codes and their types in priority of
5355 /// what we'd prefer to lower them as. This may contain immediates that
5356 /// cannot be lowered, but it is meant to be a machine agnostic order of
5357 /// preferences.
5359
5360 /// Given a physical register constraint (e.g. {edx}), return the register
5361 /// number and the register class for the register.
5362 ///
5363 /// Given a register class constraint, like 'r', if this corresponds directly
5364 /// to an LLVM register class, return a register of 0 and the register class
5365 /// pointer.
5366 ///
5367 /// This should only be used for C_Register constraints. On error, this
5368 /// returns a register number of 0 and a null register class pointer.
5369 virtual std::pair<unsigned, const TargetRegisterClass *>
5371 StringRef Constraint, MVT VT) const;
5372
5374 getInlineAsmMemConstraint(StringRef ConstraintCode) const {
5375 if (ConstraintCode == "m")
5377 if (ConstraintCode == "o")
5379 if (ConstraintCode == "X")
5381 if (ConstraintCode == "p")
5384 }
5385
5386 /// Try to replace an X constraint, which matches anything, with another that
5387 /// has more specific requirements based on the type of the corresponding
5388 /// operand. This returns null if there is no replacement to make.
5389 virtual const char *LowerXConstraint(EVT ConstraintVT) const;
5390
5391 /// Lower the specified operand into the Ops vector. If it is invalid, don't
5392 /// add anything to Ops.
5393 virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint,
5394 std::vector<SDValue> &Ops,
5395 SelectionDAG &DAG) const;
5396
5397 // Lower custom output constraints. If invalid, return SDValue().
5398 virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue,
5399 const SDLoc &DL,
5400 const AsmOperandInfo &OpInfo,
5401 SelectionDAG &DAG) const;
5402
5403 // Targets may override this function to collect operands from the CallInst
5404 // and for example, lower them into the SelectionDAG operands.
5405 virtual void CollectTargetIntrinsicOperands(const CallInst &I,
5407 SelectionDAG &DAG) const;
5408
5409 //===--------------------------------------------------------------------===//
5410 // Div utility functions
5411 //
5412
5413 SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
5414 bool IsAfterLegalTypes,
5415 SmallVectorImpl<SDNode *> &Created) const;
5416 SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
5417 bool IsAfterLegalTypes,
5418 SmallVectorImpl<SDNode *> &Created) const;
5419 // Build sdiv by power-of-2 with conditional move instructions
5420 SDValue buildSDIVPow2WithCMov(SDNode *N, const APInt &Divisor,
5421 SelectionDAG &DAG,
5422 SmallVectorImpl<SDNode *> &Created) const;
5423
5424 /// Targets may override this function to provide custom SDIV lowering for
5425 /// power-of-2 denominators. If the target returns an empty SDValue, LLVM
5426 /// assumes SDIV is expensive and replaces it with a series of other integer
5427 /// operations.
5428 virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
5429 SelectionDAG &DAG,
5430 SmallVectorImpl<SDNode *> &Created) const;
5431
5432 /// Targets may override this function to provide custom SREM lowering for
5433 /// power-of-2 denominators. If the target returns an empty SDValue, LLVM
5434 /// assumes SREM is expensive and replaces it with a series of other integer
5435 /// operations.
5436 virtual SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor,
5437 SelectionDAG &DAG,
5438 SmallVectorImpl<SDNode *> &Created) const;
5439
5440 /// Indicate whether this target prefers to combine FDIVs with the same
5441 /// divisor. If the transform should never be done, return zero. If the
5442 /// transform should be done, return the minimum number of divisor uses
5443 /// that must exist.
5444 virtual unsigned combineRepeatedFPDivisors() const {
5445 return 0;
5446 }
5447
5448 /// Hooks for building estimates in place of slower divisions and square
5449 /// roots.
5450
5451 /// Return either a square root or its reciprocal estimate value for the input
5452 /// operand.
5453 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
5454 /// 'Enabled' as set by a potential default override attribute.
5455 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
5456 /// refinement iterations required to generate a sufficient (though not
5457 /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
5458 /// The boolean UseOneConstNR output is used to select a Newton-Raphson
5459 /// algorithm implementation that uses either one or two constants.
5460 /// The boolean Reciprocal is used to select whether the estimate is for the
5461 /// square root of the input operand or the reciprocal of its square root.
5462 /// A target may choose to implement its own refinement within this function.
5463 /// If that's true, then return '0' as the number of RefinementSteps to avoid
5464 /// any further refinement of the estimate.
5465 /// An empty SDValue return means no estimate sequence can be created.
5467 int Enabled, int &RefinementSteps,
5468 bool &UseOneConstNR, bool Reciprocal) const {
5469 return SDValue();
5470 }
5471
5472 /// Try to convert the fminnum/fmaxnum to a compare/select sequence. This is
5473 /// required for correctness since InstCombine might have canonicalized a
5474 /// fcmp+select sequence to a FMINNUM/FMAXNUM intrinsic. If we were to fall
5475 /// through to the default expansion/soften to libcall, we might introduce a
5476 /// link-time dependency on libm into a file that originally did not have one.
5477 SDValue createSelectForFMINNUM_FMAXNUM(SDNode *Node, SelectionDAG &DAG) const;
5478
5479 /// Return a reciprocal estimate value for the input operand.
5480 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
5481 /// 'Enabled' as set by a potential default override attribute.
5482 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
5483 /// refinement iterations required to generate a sufficient (though not
5484 /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
5485 /// A target may choose to implement its own refinement within this function.
5486 /// If that's true, then return '0' as the number of RefinementSteps to avoid
5487 /// any further refinement of the estimate.
5488 /// An empty SDValue return means no estimate sequence can be created.
5490 int Enabled, int &RefinementSteps) const {
5491 return SDValue();
5492 }
5493
5494 /// Return a target-dependent comparison result if the input operand is
5495 /// suitable for use with a square root estimate calculation. For example, the
5496 /// comparison may check if the operand is NAN, INF, zero, normal, etc. The
5497 /// result should be used as the condition operand for a select or branch.
5498 virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
5499 const DenormalMode &Mode,
5500 SDNodeFlags Flags = {}) const;
5501
5502 /// Return a target-dependent result if the input operand is not suitable for
5503 /// use with a square root estimate calculation.
5505 SelectionDAG &DAG) const {
5506 return DAG.getConstantFP(0.0, SDLoc(Operand), Operand.getValueType());
5507 }
5508
5509 //===--------------------------------------------------------------------===//
5510 // Legalization utility functions
5511 //
5512
5513 /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes,
5514 /// respectively, each computing an n/2-bit part of the result.
5515 /// \param Result A vector that will be filled with the parts of the result
5516 /// in little-endian order.
5517 /// \param LL Low bits of the LHS of the MUL. You can use this parameter
5518 /// if you want to control how low bits are extracted from the LHS.
5519 /// \param LH High bits of the LHS of the MUL. See LL for meaning.
5520 /// \param RL Low bits of the RHS of the MUL. See LL for meaning
5521 /// \param RH High bits of the RHS of the MUL. See LL for meaning.
5522 /// \returns true if the node has been expanded, false if it has not
5523 bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS,
5524 SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT,
5525 SelectionDAG &DAG, MulExpansionKind Kind,
5526 SDValue LL = SDValue(), SDValue LH = SDValue(),
5527 SDValue RL = SDValue(), SDValue RH = SDValue()) const;
5528
5529 /// Expand a MUL into two nodes. One that computes the high bits of
5530 /// the result and one that computes the low bits.
5531 /// \param HiLoVT The value type to use for the Lo and Hi nodes.
5532 /// \param LL Low bits of the LHS of the MUL. You can use this parameter
5533 /// if you want to control how low bits are extracted from the LHS.
5534 /// \param LH High bits of the LHS of the MUL. See LL for meaning.
5535 /// \param RL Low bits of the RHS of the MUL. See LL for meaning
5536 /// \param RH High bits of the RHS of the MUL. See LL for meaning.
5537 /// \returns true if the node has been expanded. false if it has not
5538 bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
5539 SelectionDAG &DAG, MulExpansionKind Kind,
5540 SDValue LL = SDValue(), SDValue LH = SDValue(),
5541 SDValue RL = SDValue(), SDValue RH = SDValue()) const;
5542
5543 /// Attempt to expand an n-bit div/rem/divrem by constant using an n/2-bit
5544 /// algorithm. First, attempt to expand the division using a n/2-bit urem by
5545 /// constant and other arithmetic ops. The n/2-bit urem by constant will be
5546 /// expanded by DAGCombiner. As this is not possible for all constant
5547 /// divisors, this method falls back to an implementation of the magic
5548 /// algorithm using n/2-bit operations.
5549 /// \param N Node to expand
5550 /// \param Result A vector that will be filled with the lo and high parts of
5551 /// the results. For *DIVREM, this will be the quotient parts followed
5552 /// by the remainder parts.
5553 /// \param HiLoVT The value type to use for the Lo and Hi parts. Should be
5554 /// half of VT.
5555 /// \param LL Low bits of the LHS of the operation. You can use this
5556 /// parameter if you want to control how low bits are extracted from
5557 /// the LHS.
5558 /// \param LH High bits of the LHS of the operation. See LL for meaning.
5559 /// \returns true if the node has been expanded, false if it has not.
5560 bool expandDIVREMByConstant(SDNode *N, SmallVectorImpl<SDValue> &Result,
5561 EVT HiLoVT, SelectionDAG &DAG,
5562 SDValue LL = SDValue(),
5563 SDValue LH = SDValue()) const;
5564
5565 /// Expand funnel shift.
5566 /// \param N Node to expand
5567 /// \returns The expansion if successful, SDValue() otherwise
5568 SDValue expandFunnelShift(SDNode *N, SelectionDAG &DAG) const;
5569
5570 /// Expand carryless multiply.
5571 /// \param N Node to expand
5572 /// \returns The expansion if successful, SDValue() otherwise
5573 SDValue expandCLMUL(SDNode *N, SelectionDAG &DAG) const;
5574
5575 /// Expand rotations.
5576 /// \param N Node to expand
5577 /// \param AllowVectorOps expand vector rotate, this should only be performed
5578 /// if the legalization is happening outside of LegalizeVectorOps
5579 /// \returns The expansion if successful, SDValue() otherwise
5580 SDValue expandROT(SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const;
5581
5582 /// Expand shift-by-parts.
5583 /// \param N Node to expand
5584 /// \param Lo lower-output-part after conversion
5585 /// \param Hi upper-output-part after conversion
5586 void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi,
5587 SelectionDAG &DAG) const;
5588
5589 /// Expand float(f32) to SINT(i64) conversion
5590 /// \param N Node to expand
5591 /// \param Result output after conversion
5592 /// \returns True, if the expansion was successful, false otherwise
5593 bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
5594
5595 /// Expand float to UINT conversion
5596 /// \param N Node to expand
5597 /// \param Result output after conversion
5598 /// \param Chain output chain after conversion
5599 /// \returns True, if the expansion was successful, false otherwise
5600 bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain,
5601 SelectionDAG &DAG) const;
5602
5603 /// Expand UINT(i64) to double(f64) conversion
5604 /// \param N Node to expand
5605 /// \param Result output after conversion
5606 /// \param Chain output chain after conversion
5607 /// \returns True, if the expansion was successful, false otherwise
5608 bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain,
5609 SelectionDAG &DAG) const;
5610
5611 /// Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
5612 SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const;
5613
5614 /// Expand fminimum/fmaximum into multiple comparison with selects.
5615 SDValue expandFMINIMUM_FMAXIMUM(SDNode *N, SelectionDAG &DAG) const;
5616
5617 /// Expand fminimumnum/fmaximumnum into multiple comparison with selects.
5618 SDValue expandFMINIMUMNUM_FMAXIMUMNUM(SDNode *N, SelectionDAG &DAG) const;
5619
5620 /// Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max.
5621 /// \param N Node to expand
5622 /// \returns The expansion result
5623 SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const;
5624
5625 /// Truncate Op to ResultVT. If the result is exact, leave it alone. If it is
5626 /// not exact, force the result to be odd.
5627 /// \param ResultVT The type of result.
5628 /// \param Op The value to round.
5629 /// \returns The expansion result
5630 SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL,
5631 SelectionDAG &DAG) const;
5632
5633 /// Expand round(fp) to fp conversion
5634 /// \param N Node to expand
5635 /// \returns The expansion result
5636 SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const;
5637
5638 /// Expand check for floating point class.
5639 /// \param ResultVT The type of intrinsic call result.
5640 /// \param Op The tested value.
5641 /// \param Test The test to perform.
5642 /// \param Flags The optimization flags.
5643 /// \returns The expansion result or SDValue() if it fails.
5644 SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test,
5645 SDNodeFlags Flags, const SDLoc &DL,
5646 SelectionDAG &DAG) const;
5647
5648 /// Expand FCANONICALIZE to FMUL with 1.
5649 /// \param NodeNode to expand
5650 /// \returns The expansion result
5651 SDValue expandFCANONICALIZE(SDNode *Node, SelectionDAG &DAG) const;
5652
5653 /// Expand CONVERT_FROM_ARBITRARY_FP using bit manipulation.
5654 /// \param Node Node to expand.
5655 /// \returns The expansion result, or SDValue() if fails.
5656 SDValue expandCONVERT_FROM_ARBITRARY_FP(SDNode *Node,
5657 SelectionDAG &DAG) const;
5658
5659 /// Expand CTPOP nodes. Expands vector/scalar CTPOP nodes,
5660 /// vector nodes can only succeed if all operations are legal/custom.
5661 /// \param N Node to expand
5662 /// \returns The expansion result or SDValue() if it fails.
5663 SDValue expandCTPOP(SDNode *N, SelectionDAG &DAG) const;
5664
5665 /// Expand VP_CTPOP nodes.
5666 /// \returns The expansion result or SDValue() if it fails.
5667 SDValue expandVPCTPOP(SDNode *N, SelectionDAG &DAG) const;
5668
5669 /// Expand CTLZ/CTLZ_ZERO_POISON nodes. Expands vector/scalar CTLZ nodes,
5670 /// vector nodes can only succeed if all operations are legal/custom.
5671 /// \param N Node to expand
5672 /// \returns The expansion result or SDValue() if it fails.
5673 SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const;
5674
5675 /// Expand VP_CTLZ/VP_CTLZ_ZERO_POISON nodes.
5676 /// \param N Node to expand
5677 /// \returns The expansion result or SDValue() if it fails.
5678 SDValue expandVPCTLZ(SDNode *N, SelectionDAG &DAG) const;
5679
5680 /// Expand CTLS (count leading sign bits) nodes.
5681 /// CTLS(x) = CTLZ(OR(SHL(XOR(x, SRA(x, BW-1)), 1), 1))
5682 /// \param N Node to expand
5683 /// \returns The expansion result or SDValue() if it fails.
5684 SDValue expandCTLS(SDNode *N, SelectionDAG &DAG) const;
5685
5686 /// Expand CTTZ via Table Lookup.
5687 /// \param N Node to expand
5688 /// \returns The expansion result or SDValue() if it fails.
5689 SDValue CTTZTableLookup(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, EVT VT,
5690 SDValue Op, unsigned NumBitsPerElt) const;
5691
5692 /// Expand CTTZ/CTTZ_ZERO_POISON nodes. Expands vector/scalar CTTZ nodes,
5693 /// vector nodes can only succeed if all operations are legal/custom.
5694 /// \param N Node to expand
5695 /// \returns The expansion result or SDValue() if it fails.
5696 SDValue expandCTTZ(SDNode *N, SelectionDAG &DAG) const;
5697
5698 /// Expand VP_CTTZ/VP_CTTZ_ZERO_POISON nodes.
5699 /// \param N Node to expand
5700 /// \returns The expansion result or SDValue() if it fails.
5701 SDValue expandVPCTTZ(SDNode *N, SelectionDAG &DAG) const;
5702
5703 /// Expand VP_CTTZ_ELTS/VP_CTTZ_ELTS_ZERO_POISON nodes.
5704 /// \param N Node to expand
5705 /// \returns The expansion result or SDValue() if it fails.
5706 SDValue expandVPCTTZElements(SDNode *N, SelectionDAG &DAG) const;
5707
5708 /// Expand VECTOR_FIND_LAST_ACTIVE nodes
5709 /// \param N Node to expand
5710 /// \returns The expansion result or SDValue() if it fails.
5711 SDValue expandVectorFindLastActive(SDNode *N, SelectionDAG &DAG) const;
5712
5713 /// Expand LOOP_DEPENDENCE_MASK nodes
5714 /// \param N Node to expand
5715 /// \returns The expansion result or SDValue() if it fails.
5716 SDValue expandLoopDependenceMask(SDNode *N, SelectionDAG &DAG) const;
5717
5718 /// Expand ABS nodes. Expands vector/scalar ABS nodes,
5719 /// vector nodes can only succeed if all operations are legal/custom.
5720 /// (ABS x) -> (XOR (ADD x, (SRA x, type_size)), (SRA x, type_size))
5721 /// \param N Node to expand
5722 /// \param IsNegative indicate negated abs
5723 /// \returns The expansion result or SDValue() if it fails.
5724 SDValue expandABS(SDNode *N, SelectionDAG &DAG,
5725 bool IsNegative = false) const;
5726
5727 /// Expand ABDS/ABDU nodes. Expands vector/scalar ABDS/ABDU nodes.
5728 /// \param N Node to expand
5729 /// \returns The expansion result or SDValue() if it fails.
5730 SDValue expandABD(SDNode *N, SelectionDAG &DAG) const;
5731
5732 /// Expand vector/scalar AVGCEILS/AVGCEILU/AVGFLOORS/AVGFLOORU nodes.
5733 /// \param N Node to expand
5734 /// \returns The expansion result or SDValue() if it fails.
5735 SDValue expandAVG(SDNode *N, SelectionDAG &DAG) const;
5736
5737 /// Expand BSWAP nodes. Expands scalar/vector BSWAP nodes with i16/i32/i64
5738 /// scalar types. Returns SDValue() if expand fails.
5739 /// \param N Node to expand
5740 /// \returns The expansion result or SDValue() if it fails.
5741 SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const;
5742
5743 /// Expand VP_BSWAP nodes. Expands VP_BSWAP nodes with
5744 /// i16/i32/i64 scalar types. Returns SDValue() if expand fails. \param N Node
5745 /// to expand \returns The expansion result or SDValue() if it fails.
5746 SDValue expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const;
5747
5748 /// Expand BITREVERSE nodes. Expands scalar/vector BITREVERSE nodes.
5749 /// Returns SDValue() if expand fails.
5750 /// \param N Node to expand
5751 /// \returns The expansion result or SDValue() if it fails.
5752 SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const;
5753
5754 /// Expand VP_BITREVERSE nodes. Expands VP_BITREVERSE nodes with
5755 /// i8/i16/i32/i64 scalar types. \param N Node to expand \returns The
5756 /// expansion result or SDValue() if it fails.
5757 SDValue expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const;
5758
5759 /// Turn load of vector type into a load of the individual elements.
5760 /// \param LD load to expand
5761 /// \returns BUILD_VECTOR and TokenFactor nodes.
5762 std::pair<SDValue, SDValue> scalarizeVectorLoad(LoadSDNode *LD,
5763 SelectionDAG &DAG) const;
5764
5765 // Turn a store of a vector type into stores of the individual elements.
5766 /// \param ST Store with a vector value type
5767 /// \returns TokenFactor of the individual store chains.
5769
5770 /// Expands an unaligned load to 2 half-size loads for an integer, and
5771 /// possibly more for vectors.
5772 std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD,
5773 SelectionDAG &DAG) const;
5774
5775 /// Expands an unaligned store to 2 half-size stores for integer values, and
5776 /// possibly more for vectors.
5777 SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const;
5778
5779 /// Increments memory address \p Addr according to the type of the value
5780 /// \p DataVT that should be stored. If the data is stored in compressed
5781 /// form, the memory address should be incremented according to the number of
5782 /// the stored elements. This number is equal to the number of '1's bits
5783 /// in the \p Mask.
5784 /// \p DataVT is a vector type. \p Mask is a vector value.
5785 /// \p DataVT and \p Mask have the same number of vector elements.
5786 SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL,
5787 EVT DataVT, SelectionDAG &DAG,
5788 bool IsCompressedMemory) const;
5789
5790 /// Get a pointer to vector element \p Idx located in memory for a vector of
5791 /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of
5792 /// bounds the returned pointer is unspecified, but will be within the vector
5793 /// bounds. \p PtrArithFlags can be used to mark that arithmetic within the
5794 /// vector in memory is known to not wrap or to be inbounds.
5795 SDValue getVectorElementPointer(
5796 SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index,
5797 const SDNodeFlags PtrArithFlags = SDNodeFlags()) const;
5798
5799 /// Get a pointer to vector element \p Idx located in memory for a vector of
5800 /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of
5801 /// bounds the returned pointer is unspecified, but will be within the vector
5802 /// bounds. \p VecPtr is guaranteed to point to the beginning of a memory
5803 /// location large enough for the vector.
5805 EVT VecVT, SDValue Index) const {
5806 return getVectorElementPointer(DAG, VecPtr, VecVT, Index,
5809 }
5810
5811 /// Get a pointer to a sub-vector of type \p SubVecVT at index \p Idx located
5812 /// in memory for a vector of type \p VecVT starting at a base address of
5813 /// \p VecPtr. If \p Idx plus the size of \p SubVecVT is out of bounds the
5814 /// returned pointer is unspecified, but the value returned will be such that
5815 /// the entire subvector would be within the vector bounds. \p PtrArithFlags
5816 /// can be used to mark that arithmetic within the vector in memory is known
5817 /// to not wrap or to be inbounds.
5818 SDValue
5819 getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT,
5820 EVT SubVecVT, SDValue Index,
5821 const SDNodeFlags PtrArithFlags = SDNodeFlags()) const;
5822
5823 /// Method for building the DAG expansion of ISD::[US][MIN|MAX]. This
5824 /// method accepts integers as its arguments.
5825 SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const;
5826
5827 /// Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT. This
5828 /// method accepts integers as its arguments.
5829 SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const;
5830
5831 /// Method for building the DAG expansion of ISD::[US]CMP. This
5832 /// method accepts integers as its arguments
5833 SDValue expandCMP(SDNode *Node, SelectionDAG &DAG) const;
5834
5835 /// Method for building the DAG expansion of ISD::[US]SHLSAT. This
5836 /// method accepts integers as its arguments.
5837 SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const;
5838
5839 /// Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT]. This
5840 /// method accepts integers as its arguments.
5841 SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const;
5842
5843 /// Method for building the DAG expansion of ISD::[US]DIVFIX[SAT]. This
5844 /// method accepts integers as its arguments.
5845 /// Note: This method may fail if the division could not be performed
5846 /// within the type. Clients must retry with a wider type if this happens.
5847 SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl,
5849 unsigned Scale, SelectionDAG &DAG) const;
5850
5851 /// Method for building the DAG expansion of ISD::U(ADD|SUB)O. Expansion
5852 /// always suceeds and populates the Result and Overflow arguments.
5853 void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow,
5854 SelectionDAG &DAG) const;
5855
5856 /// Method for building the DAG expansion of ISD::S(ADD|SUB)O. Expansion
5857 /// always suceeds and populates the Result and Overflow arguments.
5858 void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow,
5859 SelectionDAG &DAG) const;
5860
5861 /// Method for building the DAG expansion of ISD::[US]MULO. Returns whether
5862 /// expansion was successful and populates the Result and Overflow arguments.
5863 bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow,
5864 SelectionDAG &DAG) const;
5865
5866 /// Calculate the product twice the width of LHS and RHS. If HiLHS/HiRHS are
5867 /// non-null they will be included in the multiplication. The expansion works
5868 /// by splitting the 2 inputs into 4 pieces that we can multiply and add
5869 /// together without neding MULH or MUL_LOHI.
5870 void forceExpandMultiply(SelectionDAG &DAG, const SDLoc &dl, bool Signed,
5872 SDValue HiLHS = SDValue(),
5873 SDValue HiRHS = SDValue()) const;
5874
5875 /// Calculate full product of LHS and RHS either via a libcall or through
5876 /// brute force expansion of the multiplication. The expansion works by
5877 /// splitting the 2 inputs into 4 pieces that we can multiply and add together
5878 /// without needing MULH or MUL_LOHI.
5879 void forceExpandWideMUL(SelectionDAG &DAG, const SDLoc &dl, bool Signed,
5880 const SDValue LHS, const SDValue RHS, SDValue &Lo,
5881 SDValue &Hi) const;
5882
5883 /// Expand a VECREDUCE_* into an explicit calculation. If Count is specified,
5884 /// only the first Count elements of the vector are used.
5885 SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const;
5886
5887 /// Expand a VECREDUCE_SEQ_* into an explicit ordered calculation.
5888 SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const;
5889
5890 /// Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
5891 /// Returns true if the expansion was successful.
5892 bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const;
5893
5894 /// Method for building the DAG expansion of ISD::VECTOR_SPLICE. This
5895 /// method accepts vectors as its arguments.
5896 SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const;
5897
5898 /// Expand a vector VECTOR_COMPRESS into a sequence of extract element, store
5899 /// temporarily, advance store position, before re-loading the final vector.
5900 SDValue expandVECTOR_COMPRESS(SDNode *Node, SelectionDAG &DAG) const;
5901
5902 /// Expand a CTTZ_ELTS or CTTZ_ELTS_ZERO_POISON by calculating (VL - i) for
5903 /// each active lane (i), getting the maximum and subtracting it from VL.
5904 SDValue expandCttzElts(SDNode *Node, SelectionDAG &DAG) const;
5905
5906 /// Expands PARTIAL_REDUCE_S/UMLA nodes to a series of simpler operations,
5907 /// consisting of zext/sext, extract_subvector, mul and add operations.
5908 SDValue expandPartialReduceMLA(SDNode *Node, SelectionDAG &DAG) const;
5909
5910 /// Expands a node with multiple results to an FP or vector libcall. The
5911 /// libcall is expected to take all the operands of the \p Node followed by
5912 /// output pointers for each of the results. \p CallRetResNo can be optionally
5913 /// set to indicate that one of the results comes from the libcall's return
5914 /// value.
5915 bool expandMultipleResultFPLibCall(
5916 SelectionDAG &DAG, RTLIB::Libcall LC, SDNode *Node,
5918 std::optional<unsigned> CallRetResNo = {}) const;
5919
5920 /// Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC
5921 /// on the current target. A VP_SETCC will additionally be given a Mask
5922 /// and/or EVL not equal to SDValue().
5923 ///
5924 /// If the SETCC has been legalized using AND / OR, then the legalized node
5925 /// will be stored in LHS. RHS and CC will be set to SDValue(). NeedInvert
5926 /// will be set to false. This will also hold if the VP_SETCC has been
5927 /// legalized using VP_AND / VP_OR.
5928 ///
5929 /// If the SETCC / VP_SETCC has been legalized by using
5930 /// getSetCCSwappedOperands(), then the values of LHS and RHS will be
5931 /// swapped, CC will be set to the new condition, and NeedInvert will be set
5932 /// to false.
5933 ///
5934 /// If the SETCC / VP_SETCC has been legalized using the inverse condcode,
5935 /// then LHS and RHS will be unchanged, CC will set to the inverted condcode,
5936 /// and NeedInvert will be set to true. The caller must invert the result of
5937 /// the SETCC with SelectionDAG::getLogicalNOT() or take equivalent action to
5938 /// swap the effect of a true/false result.
5939 ///
5940 /// \returns true if the SETCC / VP_SETCC has been legalized, false if it
5941 /// hasn't.
5942 bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS,
5943 SDValue &RHS, SDValue &CC, SDValue Mask,
5944 SDValue EVL, bool &NeedInvert, const SDLoc &dl,
5945 SDValue &Chain, bool IsSignaling = false) const;
5946
5947 //===--------------------------------------------------------------------===//
5948 // Instruction Emitting Hooks
5949 //
5950
5951 /// This method should be implemented by targets that mark instructions with
5952 /// the 'usesCustomInserter' flag. These instructions are special in various
5953 /// ways, which require special support to insert. The specified MachineInstr
5954 /// is created but not inserted into any basic blocks, and this method is
5955 /// called to expand it into a sequence of instructions, potentially also
5956 /// creating new basic blocks and control flow.
5957 /// As long as the returned basic block is different (i.e., we created a new
5958 /// one), the custom inserter is free to modify the rest of \p MBB.
5959 virtual MachineBasicBlock *
5960 EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const;
5961
5962 /// This method should be implemented by targets that mark instructions with
5963 /// the 'hasPostISelHook' flag. These instructions must be adjusted after
5964 /// instruction selection by target hooks. e.g. To fill in optional defs for
5965 /// ARM 's' setting instructions.
5966 virtual void AdjustInstrPostInstrSelection(MachineInstr &MI,
5967 SDNode *Node) const;
5968
5969 /// If this function returns true, SelectionDAGBuilder emits a
5970 /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
5971 virtual bool useLoadStackGuardNode(const Module &M) const { return false; }
5972
5974 const SDLoc &DL) const {
5975 llvm_unreachable("not implemented for this target");
5976 }
5977
5978 /// Lower TLS global address SDNode for target independent emulated TLS model.
5979 virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
5980 SelectionDAG &DAG) const;
5981
5982 /// Expands target specific indirect branch for the case of JumpTable
5983 /// expansion.
5984 virtual SDValue expandIndirectJTBranch(const SDLoc &dl, SDValue Value,
5985 SDValue Addr, int JTI,
5986 SelectionDAG &DAG) const;
5987
5988 // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits)))
5989 // If we're comparing for equality to zero and isCtlzFast is true, expose the
5990 // fact that this can be implemented as a ctlz/srl pair, so that the dag
5991 // combiner can fold the new nodes.
5992 SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const;
5993
5994 // Return true if `X & Y eq/ne 0` is preferable to `X & Y ne/eq Y`
5996 return true;
5997 }
5998
5999 // Expand vector operation by dividing it into smaller length operations and
6000 // joining their results. SDValue() is returned when expansion did not happen.
6001 SDValue expandVectorNaryOpBySplitting(SDNode *Node, SelectionDAG &DAG) const;
6002
6003 /// Replace an extraction of a load with a narrowed load.
6004 ///
6005 /// \param ResultVT type of the result extraction.
6006 /// \param InVecVT type of the input vector to with bitcasts resolved.
6007 /// \param EltNo index of the vector element to load.
6008 /// \param OriginalLoad vector load that to be replaced.
6009 /// \returns \p ResultVT Load on success SDValue() on failure.
6010 SDValue scalarizeExtractedVectorLoad(EVT ResultVT, const SDLoc &DL,
6011 EVT InVecVT, SDValue EltNo,
6012 LoadSDNode *OriginalLoad,
6013 SelectionDAG &DAG) const;
6014
6015protected:
6016 void setTypeIdForCallsiteInfo(const CallBase *CB, MachineFunction &MF,
6017 MachineFunction::CallSiteInfo &CSInfo) const;
6018
6019private:
6020 SDValue foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
6021 const SDLoc &DL, DAGCombinerInfo &DCI) const;
6022 SDValue foldSetCCWithOr(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
6023 const SDLoc &DL, DAGCombinerInfo &DCI) const;
6024 SDValue foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
6025 const SDLoc &DL, DAGCombinerInfo &DCI) const;
6026
6027 SDValue optimizeSetCCOfSignedTruncationCheck(EVT SCCVT, SDValue N0,
6029 DAGCombinerInfo &DCI,
6030 const SDLoc &DL) const;
6031
6032 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0
6033 SDValue optimizeSetCCByHoistingAndByConstFromLogicalShift(
6034 EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond,
6035 DAGCombinerInfo &DCI, const SDLoc &DL) const;
6036
6037 SDValue prepareUREMEqFold(EVT SETCCVT, SDValue REMNode,
6038 SDValue CompTargetNode, ISD::CondCode Cond,
6039 DAGCombinerInfo &DCI, const SDLoc &DL,
6040 SmallVectorImpl<SDNode *> &Created) const;
6041 SDValue buildUREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode,
6042 ISD::CondCode Cond, DAGCombinerInfo &DCI,
6043 const SDLoc &DL) const;
6044
6045 SDValue prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
6046 SDValue CompTargetNode, ISD::CondCode Cond,
6047 DAGCombinerInfo &DCI, const SDLoc &DL,
6048 SmallVectorImpl<SDNode *> &Created) const;
6049 SDValue buildSREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode,
6050 ISD::CondCode Cond, DAGCombinerInfo &DCI,
6051 const SDLoc &DL) const;
6052
6053 bool expandUDIVREMByConstantViaUREMDecomposition(
6054 SDNode *N, APInt Divisor, SmallVectorImpl<SDValue> &Result, EVT HiLoVT,
6055 SelectionDAG &DAG, SDValue LL, SDValue LH) const;
6056
6057 bool expandUDIVREMByConstantViaUMulHiMagic(SDNode *N, const APInt &Divisor,
6059 EVT HiLoVT, SelectionDAG &DAG,
6060 SDValue LL, SDValue LH) const;
6061};
6062
6063/// Given an LLVM IR type and return type attributes, compute the return value
6064/// EVTs and flags, and optionally also the offsets, if the return value is
6065/// being lowered to memory.
6066LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType,
6067 AttributeList attr,
6068 SmallVectorImpl<ISD::OutputArg> &Outs,
6069 const TargetLowering &TLI, const DataLayout &DL);
6070
6071} // end namespace llvm
6072
6073#endif // LLVM_CODEGEN_TARGETLOWERING_H
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
#define X(NUM, ENUM, NAME)
Definition ELF.h:853
block Block Frequency Analysis
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_READONLY
Definition Compiler.h:322
This file defines the DenseMap class.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, const APInt &Demanded)
Check to see if the specified operand of the specified instruction is a constant integer.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define RegName(no)
lazy value info
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
PowerPC Reduce CR logical Operation
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static Type * getValueType(Value *V, bool LookThroughCmp=false)
Returns the "element type" of the given value/instruction V.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT, SelectionDAG &DAG)
Scalarize a vector store, bitcasting to TargetVT to determine the scalar type.
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition APInt.h:78
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1511
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
bool isFloatingPointOperation() const
BinOp getOperation() const
This class holds the attributes for a particular argument, parameter, function, or return value.
Definition Attributes.h:407
LLVM_ABI bool getValueAsBool() const
Return the attribute's value as a boolean.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
Definition Constants.h:87
This class represents a range of values.
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
unsigned size() const
Definition DenseMap.h:110
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
bool isVarArg() const
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition Function.cpp:763
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
A wrapper class for inspecting calls to intrinsic functions.
static LLT integer(unsigned SizeInBits)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Tracks which library functions to use for a particular subtarget.
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Context object for machine code objects.
Definition MCContext.h:83
Base class for the full range of assembler expressions which are needed for parsing.
Definition MCExpr.h:34
Machine Value Type.
@ INVALID_SIMPLE_VALUE_TYPE
SimpleValueType SimpleTy
uint64_t getScalarSizeInBits() const
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
ElementCount getVectorElementCount() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
bool isValid() const
Return true if this is a valid simple valuetype.
static MVT getIntegerVT(unsigned BitWidth)
Instructions::iterator instr_iterator
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
This is an abstract virtual class for memory operations.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Represent a mutable reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:294
A discriminated union of two or more pointer types, with the discriminator in the low bits of the poi...
Analysis providing profile information.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool hasOneUse() const
Return true if there is exactly one use of this node.
bool use_empty() const
Return true if there are no uses of this node.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
const DataLayout & getDataLayout() const
LLVM_ABI void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVMContext * getContext() const
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
Represent a constant reference to a string, i.e.
Definition StringRef.h:56
Multiway switch.
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
ArgListEntry(Value *Val, SDValue Node=SDValue())
ArgListEntry(Value *Val, SDValue Node, Type *Ty)
Type * Ty
Same as OrigTy, or partially legalized for soft float libcalls.
Type * OrigTy
Original unlegalized argument type.
LegalizeTypeAction getTypeAction(MVT VT) const
void setTypeAction(MVT VT, LegalizeTypeAction Action)
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
virtual Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const
Perform a store-conditional operation to Addr.
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
virtual bool enableAggressiveFMAFusion(LLT Ty) const
Return true if target always benefits from combining into FMA for a given value type.
virtual void emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const
Perform a bit test atomicrmw using a target-specific intrinsic.
void setOperationAction(ArrayRef< unsigned > Ops, ArrayRef< MVT > VTs, LegalizeAction Action)
virtual bool requiresUniformRegister(MachineFunction &MF, const Value *) const
Allows target to decide about the register class of the specific value that is live outside the defin...
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
virtual unsigned getVaListSizeInBits(const DataLayout &DL) const
Returns the size of the platform's va_list object.
virtual bool lowerDeinterleaveIntrinsicToLoad(Instruction *Load, Value *Mask, IntrinsicInst *DI, const APInt &GapMask) const
Lower a deinterleave intrinsic to a target specific load intrinsic.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual bool preferSextInRegOfTruncate(EVT TruncVT, EVT VT, EVT ExtVT) const
virtual bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
void setMaxDivRemBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum div/rem the backend supports.
virtual bool hasAndNot(SDValue X) const
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
ReciprocalEstimate
Reciprocal estimate status values used by the functions below.
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
virtual bool isShuffleMaskLegal(ArrayRef< int >, EVT) const
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...
virtual bool enableAggressiveFMAFusion(EVT VT) const
Return true if target always benefits from combining into FMA for a given value type.
virtual bool isComplexDeinterleavingOperationSupported(ComplexDeinterleavingOperation Operation, Type *Ty) const
Does this target support complex deinterleaving with the given operation and type.
virtual bool shouldRemoveRedundantExtend(SDValue Op) const
Return true (the default) if it is profitable to remove a sext_inreg(x) where the sext is redundant,...
bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
SDValue promoteTargetBoolean(SelectionDAG &DAG, SDValue Bool, EVT ValVT) const
Promote the given target boolean to a target boolean of the given type.
virtual bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const
Returns true if be combined with to form an ISD::FMAD.
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, std::optional< unsigned > ByteOffset=std::nullopt) const
Return true if it is profitable to reduce a load to a smaller type.
virtual bool hasStandaloneRem(EVT VT) const
Return true if the target can handle a standalone remainder operation.
virtual bool isExtFreeImpl(const Instruction *I) const
Return true if the extension represented by I is free.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool shouldExpandBuildVectorWithShuffles(EVT, unsigned DefinedValues) const
LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const
Return how the indexed store should be treated: either it is legal, needs to be promoted to a larger ...
virtual bool isSelectSupported(SelectSupportKind) const
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual bool isEqualityCmpFoldedWithSignedCmp() const
Return true if instruction generated for equality comparison is folded with instruction generated for...
virtual bool preferSelectsOverBooleanArithmetic(EVT VT) const
Should we prefer selects to doing arithmetic on boolean types.
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const
Use bitwise logic to make pairs of compares more efficient.
void setAtomicLoadExtAction(ArrayRef< unsigned > ExtTypes, MVT ValVT, ArrayRef< MVT > MemVTs, LegalizeAction Action)
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const
Try to convert math with an overflow comparison into the corresponding DAG node operation.
ShiftLegalizationStrategy
Return the preferred strategy to legalize tihs SHIFT instruction, with ExpansionFactor being the recu...
virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const
Return true if folding a vector load into ExtVal (a sign, zero, or any extend node) is profitable.
virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const
Return if the target supports combining a chain like:
virtual Value * createComplexDeinterleavingIR(IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, Value *Accumulator=nullptr) const
Create the IR node for the given complex deinterleaving operation.
virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const
Return true if it is beneficial to convert a load of a constant to just the constant itself.
virtual MVT::SimpleValueType getCmpLibcallReturnType() const
Return the ValueType for comparison libcalls.
virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT, unsigned Scale) const
Custom method defined by each target to indicate if an operation which may require a scale is support...
void setLoadExtAction(ArrayRef< unsigned > ExtTypes, MVT ValVT, MVT MemVT, LegalizeAction Action)
unsigned getMaximumLegalStoreInBits() const
Return maximum known-legal store size, which can be guaranteed for scalable vectors.
virtual bool shouldOptimizeMulOverflowWithZeroHighBits(LLVMContext &Context, EVT VT) const
virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(const AtomicRMWInst *RMW) const
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
virtual Sched::Preference getSchedulingPreference(SDNode *) const
Some scheduler, e.g.
virtual MachineInstr * EmitKCFICheck(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator &MBBI, const TargetInstrInfo *TII) const
void setMinStackArgumentAlignment(Align Alignment)
Set the minimum stack alignment of an argument.
bool isExtLoad(const LoadInst *Load, const Instruction *Ext, const DataLayout &DL) const
Return true if Load and Ext can form an ExtLoad.
LegalizeTypeAction getTypeAction(MVT VT) const
virtual bool isLegalScaleForGatherScatter(uint64_t Scale, uint64_t ElemSize) const
EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
virtual bool shouldInsertFencesForAtomic(const Instruction *I) const
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
virtual AtomicOrdering atomicOperationOrderAfterFenceSplit(const Instruction *I) const
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
bool isOperationExpandOrLibCall(unsigned Op, EVT VT) const
virtual bool allowsMisalignedMemoryAccesses(LLT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
LLT handling variant.
virtual bool isSafeMemOpType(MVT) const
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline.
virtual void emitExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) const
Perform a cmpxchg expansion using a target-specific method.
virtual CondMergingParams getJumpConditionMergingParams(Instruction::BinaryOps, const Value *, const Value *) const
virtual ISD::NodeType getExtendForAtomicRMWArg(unsigned Op) const
Returns how the platform's atomic rmw operations expect their input argument to be extended (ZERO_EXT...
const TargetMachine & getTargetMachine() const
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
bool rangeFitsInWord(const APInt &Low, const APInt &High, const DataLayout &DL) const
Check whether the range [Low,High] fits in a machine word.
virtual bool isCtpopFast(EVT VT) const
Return true if ctpop instruction is fast.
virtual MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand fl...
unsigned MaxGluedStoresPerMemcpy
Specify max number of store instructions to glue in inlined memcpy.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
bool isPaddedAtMostSignificantBitsWhenStored(EVT VT) const
Indicates if any padding is guaranteed to go at the most significant bits when storing the type to me...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
Convenience method to set an operation to Promote and specify the type in a single call.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
unsigned getMinCmpXchgSizeInBits() const
Returns the size of the smallest cmpxchg or ll/sc instruction the backend supports.
virtual Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const
Perform a masked atomicrmw using a target-specific intrinsic.
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
virtual LegalizeAction getCustomTruncStoreAction(EVT ValVT, EVT MemVT, Align Alignment, unsigned AddrSpace) const
Returns an alternative action to use when the coarser lookups (configured through setTruncStoreAction...
bool enableExtLdPromotion() const
Return true if the target wants to use the optimization that turns ext(promotableInst1(....
virtual bool isFPExtFoldable(const MachineInstr &MI, unsigned Opcode, LLT DestTy, LLT SrcTy) const
Return true if an fpext operation input to an Opcode operation is free (for instance,...
void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed masked load does or does not work with the specified type and ind...
void setMaxBytesForAlignment(unsigned MaxBytes)
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
void setHasExtractBitsInsn(bool hasExtractInsn=true)
Tells the code generator that the target has BitExtract instructions.
void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)
Tells the code generator which bitwidths to bypass.
virtual bool hasBitTest(SDValue X, SDValue Y) const
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...
MVT getRegisterType(LLVMContext &Context, EVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(const AtomicCmpXchgInst *AI) const
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
virtual bool needsFixedCatchObjects() const
EVT getLegalTypeToTransformTo(LLVMContext &Context, EVT VT) const
Perform getTypeToTransformTo repeatedly until a legal type is obtained.
virtual Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
void setMaxLargeFPConvertBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum fp to/from int conversion the backend supports.
const LibcallLoweringInfo & getLibcallLoweringInfo() const
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
virtual bool isCheapToSpeculateCttz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic cttz.
unsigned getMinimumBitTestCmps() const
Retuen the minimum of largest number of comparisons in BitTest.
bool isJumpExpensive() const
Return true if Flow Control is an expensive operation that should be avoided.
virtual bool useFPRegsForHalfType() const
LegalizeAction getCondCodeAction(ISD::CondCode CC, MVT VT) const
Return how the condition code should be treated: either it is legal, needs to be expanded to some oth...
bool hasExtractBitsInsn() const
Return true if the target has BitExtract instructions.
virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT, const SelectionDAG &DAG, const MachineMemOperand &MMO) const
Return true if the following transform is beneficial: fold (conv (load x)) -> (load (conv*)x) On arch...
LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const
Return how the indexed store should be treated: either it is legal, needs to be promoted to a larger ...
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
CallingConv::ID getLibcallImplCallingConv(RTLIB::LibcallImpl Call) const
Get the CallingConv that should be used for the specified libcall implementation.
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
virtual bool areTwoSDNodeTargetMMOFlagsMergeable(const MemSDNode &NodeX, const MemSDNode &NodeY) const
Return true if it is valid to merge the TargetMMOFlags in two SDNodes.
virtual bool isCommutativeBinOp(unsigned Opcode) const
Returns true if the opcode is a commutative binary operation.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual bool isFPImmLegal(const APFloat &, EVT, bool ForCodeSize=false) const
Returns true if the target can instruction select the specified FP immediate natively.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT, Align Alignment, unsigned AddrSpace) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
virtual unsigned getPreferredFPToIntOpcode(unsigned Op, EVT FromVT, EVT ToVT) const
virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const
Return true if extraction of a scalar element from the given vector type at the given index is cheap.
void setOperationAction(ArrayRef< unsigned > Ops, MVT VT, LegalizeAction Action)
virtual bool optimizeFMulOrFDivAsShiftAddBitcast(SDNode *N, SDValue FPConst, SDValue IntPow2) const
SelectSupportKind
Enum that describes what type of support for selects the target has.
RTLIB::LibcallImpl getMemcpyImpl() const
LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const
Return how the indexed load should be treated: either it is legal, needs to be promoted to a larger s...
virtual bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, EVT DestVT, EVT SrcVT) const
Return true if an fpext operation input to an Opcode operation is free (for instance,...
bool isLegalRC(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC) const
Return true if the value types that can be represented by the specified register class are all legal.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Let target indicate that an extending atomic load of the specified type is legal.
virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context, EVT VT) const
Returns true if we should normalize select(N0&N1, X, Y) => select(N0, select(N1, X,...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
virtual StringRef getStackProbeSymbolName(const MachineFunction &MF) const
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
virtual bool preferScalarizeSplat(SDNode *N) const
bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
virtual ISD::NodeType getExtendForAtomicOps() const
Returns how the platform's atomic operations are extended (ZERO_EXTEND, SIGN_EXTEND,...
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
virtual LLT getOptimalMemOpLLT(const MemOp &Op, const AttributeList &) const
LLT returning variant.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
virtual void emitExpandAtomicRMW(AtomicRMWInst *AI) const
Perform a atomicrmw expansion using a target-specific way.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const
Return true if it is profitable to convert a select of FP constants into a constant pool load whose a...
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?
virtual bool hasStackProbeSymbol(const MachineFunction &MF) const
Returns the name of the symbol used to emit stack probes or the empty string if not applicable.
bool isSlowDivBypassed() const
Returns true if target has indicated at least one type should be bypassed.
virtual Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requi...
virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with ...
virtual bool isMulAddWithConstProfitable(SDValue AddNode, SDValue ConstNode) const
Return true if it may be profitable to transform (mul (add x, c1), c2) -> (add (mul x,...
virtual bool shouldExtendTypeInLibCall(EVT Type) const
Returns true if arguments should be extended in lib calls.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
bool isPartialReduceMLALegalOrCustom(unsigned Opc, EVT AccVT, EVT InputVT) const
Return true if a PARTIAL_REDUCE_U/SMLA node with the specified types is legal or custom for this targ...
virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const
Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
bool isSuitableForBitTests(const DenseMap< const BasicBlock *, unsigned int > &DestCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
virtual bool shallExtractConstSplatVectorElementToStore(Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const
Return true if the target shall perform extract vector element and store given that the vector is kno...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual bool hasMultipleConditionRegisters(EVT VT) const
Does the target have multiple (allocatable) condition registers that can be used to store the results...
unsigned getMaxExpandSizeMemcmp(bool OptSize) const
Get maximum # of load operations permitted for memcmp.
bool isStrictFPEnabled() const
Return true if the target support strict float operation.
virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const
Return true if creating a shift of the type by the given amount is not profitable.
virtual bool shouldPreservePtrArith(const Function &F, EVT PtrVT) const
True if target has some particular form of dealing with pointer arithmetic semantics for pointers wit...
virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
virtual bool lowerInterleavedStore(Instruction *Store, Value *Mask, ShuffleVectorInst *SVI, unsigned Factor, const APInt &GapMask) const
Lower an interleaved store to target specific intrinsics.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
virtual bool shouldFoldSelectWithSingleBitTest(EVT VT, const APInt &AndMask) const
MVT getSimpleValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the MVT corresponding to this LLVM type. See getValueType.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
virtual bool shouldReassociateReduction(unsigned RedOpc, EVT VT) const
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal for a comparison of the specified types on this ...
virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, unsigned &Cost) const
Return true if the target can combine store(extractelement VectorTy,Idx).
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool shouldFoldConstantShiftPairToMask(const SDNode *N) const
Return true if it is profitable to fold a pair of shifts into a mask.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const
void setSupportsUnalignedAtomics(bool UnalignedSupported)
Sets whether unaligned atomic operations are supported.
void setLoadExtAction(ArrayRef< unsigned > ExtTypes, MVT ValVT, ArrayRef< MVT > MemVTs, LegalizeAction Action)
virtual void emitExpandAtomicStore(StoreInst *SI) const
Perform a atomic store using a target-specific way.
virtual bool preferIncOfAddToSubOfNot(EVT VT) const
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
virtual bool ShouldShrinkFPConstant(EVT) const
If true, then instruction selection should seek to shrink the FP constant of the specified type to a ...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned getMaxDivRemBitWidthSupported() const
Returns the size in bits of the maximum div/rem the backend supports.
virtual bool isLegalAddImmediate(int64_t) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
virtual unsigned getMaxSupportedInterleaveFactor() const
Get the maximum supported factor for interleaved memory accesses.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual bool shouldKeepZExtForFP16Conv() const
Does this target require the clearing of high-order bits in a register passed to the fp16 to fp conve...
virtual AtomicExpansionKind shouldCastAtomicRMWIInIR(AtomicRMWInst *RMWI) const
Returns how the given atomic atomicrmw should be cast by the IR-level AtomicExpand pass.
void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed masked store does or does not work with the specified type and in...
virtual bool canTransformPtrArithOutOfBounds(const Function &F, EVT PtrVT) const
True if the target allows transformations of in-bounds pointer arithmetic that cause out-of-bounds in...
virtual bool shouldConsiderGEPOffsetSplit() const
const ValueTypeActionImpl & getValueTypeActions() const
virtual bool canCombineTruncStore(EVT ValVT, EVT MemVT, Align Alignment, unsigned AddrSpace, bool LegalOnly) const
TargetLoweringBase(const TargetMachine &TM, const TargetSubtargetInfo &STI)
NOTE: The TargetMachine owns TLOF.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
virtual bool isTruncateFree(SDValue Val, EVT VT2) const
Return true if truncating the specific node Val to type VT2 is free.
virtual bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const
virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const
Return the maximum number of "x & (x - 1)" operations that can be done instead of deferring to a cust...
virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>>...
virtual bool shouldInsertTrailingSeqCstFenceForAtomicStore(const Instruction *I) const
Whether AtomicExpandPass should automatically insert a seq_cst trailing fence without reducing the or...
virtual bool isFNegFree(EVT VT) const
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
void setPartialReduceMLAAction(unsigned Opc, MVT AccVT, MVT InputVT, LegalizeAction Action)
Indicate how a PARTIAL_REDUCE_U/SMLA node with Acc type AccVT and Input type InputVT should be treate...
virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
bool isExtFree(const Instruction *I) const
Return true if the extension represented by I is free.
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
virtual Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const
Perform a masked cmpxchg using a target-specific intrinsic.
virtual bool isZExtFree(EVT FromTy, EVT ToTy) const
virtual ISD::NodeType getExtendForAtomicCmpSwapArg() const
Returns how the platform's atomic compare and swap expects its comparison value to be extended (ZERO_...
virtual bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT, unsigned SelectOpcode, SDValue X, SDValue Y) const
Return true if pulling a binary operation into a select with an identity constant is profitable.
BooleanContent
Enum that describes how the target represents true/false values.
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, bool IsScalable) const
virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const
Return true if integer divide is usually cheaper than a sequence of several shifts,...
virtual ShiftLegalizationStrategy preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const
virtual uint8_t getRepRegClassCostFor(MVT VT) const
Return the cost of the 'representative' register class for the specified value type.
virtual bool isZExtFree(LLT FromTy, LLT ToTy, LLVMContext &Ctx) const
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
LegalizeAction getPartialReduceMLAAction(unsigned Opc, EVT AccVT, EVT InputVT) const
Return how a PARTIAL_REDUCE_U/SMLA node with Acc type AccVT and Input type InputVT should be treated.
bool isPredictableSelectExpensive() const
Return true if selects are only cheaper than branches if the branch is unlikely to be predicted right...
virtual bool mergeStoresAfterLegalization(EVT MemVT) const
Allow store merging for the specified type after legalization in addition to before legalization.
virtual bool shouldIssueAtomicLoadForAtomicEmulationLoop(void) const
virtual bool shouldMergeStoreOfLoadsOverCall(EVT, EVT) const
Returns true if it's profitable to allow merging store of loads when there are functions calls betwee...
RTLIB::LibcallImpl getSupportedLibcallImpl(StringRef FuncName) const
Check if this is valid libcall for the current module, otherwise RTLIB::Unsupported.
virtual bool isProfitableToHoist(Instruction *I) const
unsigned getGatherAllAliasesMaxDepth() const
virtual LegalizeAction getCustomOperationAction(SDNode &Op) const
How to legalize this custom operation?
virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const
IR version.
virtual bool hasAndNotCompare(SDValue Y) const
Return true if the target should transform: (X & Y) == Y ---> (~X & Y) == 0 (X & Y) !...
virtual bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT, unsigned NumElem, unsigned AddrSpace) const
Return true if it is expected to be cheaper to do a store of vector constant with the given size and ...
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
virtual bool isNarrowingProfitable(SDNode *N, EVT SrcVT, EVT DestVT) const
Return true if it's profitable to narrow operations of type SrcVT to DestVT.
LegalizeAction getLoadAction(EVT ValVT, EVT MemVT, Align Alignment, unsigned AddrSpace, unsigned ExtType, bool Atomic) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const
Return true if it is cheaper to split the store of a merged int val from a pair of smaller values int...
TargetLoweringBase(const TargetLoweringBase &)=delete
virtual unsigned getMaxGluedStoresPerMemcpy() const
Get maximum # of store operations to be glued together.
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
virtual bool shouldFoldMaskToVariableShiftPair(SDValue X) const
There are two ways to clear extreme bits (either low or high): Mask: x & (-1 << y) (the instcombine c...
virtual bool alignLoopsWithOptSize() const
Should loops be aligned even when the function is marked OptSize (but not MinSize).
unsigned getMaxAtomicSizeInBitsSupported() const
Returns the maximum atomic operation size (in bits) supported by the backend.
bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
virtual bool canMergeStoresTo(unsigned AS, EVT MemVT, const MachineFunction &MF) const
Returns if it's reasonable to merge stores to MemVT size.
void setPartialReduceMLAAction(ArrayRef< unsigned > Opcodes, MVT AccVT, MVT InputVT, LegalizeAction Action)
LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
virtual bool preferABDSToABSWithNSW(EVT VT) const
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
virtual bool getAddrModeArguments(const IntrinsicInst *, SmallVectorImpl< Value * > &, Type *&) const
CodeGenPrepare sinks address calculations into the same BB as Load/Store instructions reading the add...
virtual bool hasInlineStackProbe(const MachineFunction &MF) const
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
const DenseMap< unsigned int, unsigned int > & getBypassSlowDivWidths() const
Returns map of slow types for division or remainder with corresponding fast types.
void setOperationPromotedToType(ArrayRef< unsigned > Ops, MVT OrigVT, MVT DestVT)
unsigned getMaxLargeFPConvertBitWidthSupported() const
Returns the size in bits of the maximum fp to/from int conversion the backend supports.
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, LLT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const
bool isTruncStoreLegal(EVT ValVT, EVT MemVT, Align Alignment, unsigned AddrSpace) const
Return true if the specified store with truncation is legal on this target.
virtual bool isCheapToSpeculateCtlz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic ctlz.
virtual void getTgtMemIntrinsic(SmallVectorImpl< IntrinsicInfo > &Infos, const CallBase &I, MachineFunction &MF, unsigned Intrinsic) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual bool shouldExpandCttzElements(EVT VT) const
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
virtual bool signExtendConstant(const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
virtual bool lowerInterleaveIntrinsicToStore(Instruction *Store, Value *Mask, ArrayRef< Value * > InterleaveValues) const
Lower an interleave intrinsic to a target specific store intrinsic.
virtual bool isTruncateFree(LLT FromTy, LLT ToTy, LLVMContext &Ctx) const
AndOrSETCCFoldKind
Enum of different potentially desirable ways to fold (and/or (setcc ...), (setcc ....
virtual bool shouldScalarizeBinop(SDValue VecOp) const
Try to convert an extract element of a vector binary operation into an extract element followed by a ...
Align getPrefFunctionAlignment() const
Return the preferred function alignment.
RTLIB::LibcallImpl getLibcallImpl(RTLIB::Libcall Call) const
Get the libcall impl routine name for the specified libcall.
virtual void emitExpandAtomicLoad(LoadInst *LI) const
Perform a atomic load using a target-specific way.
Align getMinFunctionAlignment() const
Return the minimum function alignment.
virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
static StringRef getLibcallImplName(RTLIB::LibcallImpl Call)
Get the libcall routine name for the specified libcall implementation.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
virtual bool isCtlzFast() const
Return true if ctlz instruction is fast.
virtual bool useSoftFloat() const
virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT, const SelectionDAG &DAG, const MachineMemOperand &MMO) const
Return true if the following transform is beneficial: (store (y (conv x)), y*)) -> (store x,...
BooleanContent getBooleanContents(EVT Type) const
virtual LegalizeAction getCustomLoadAction(EVT ValVT, EVT MemVT, Align Alignment, unsigned AddrSpace, unsigned ExtType, bool Atomic) const
Returns an alternative action to use when the coarser lookups (configured through setLoadExtAction an...
bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
virtual int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) const
Return the prefered common base offset.
virtual bool isVectorClearMaskLegal(ArrayRef< int >, EVT) const
Similar to isShuffleMaskLegal.
LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const
Return pair that represents the legalization kind (first) that needs to happen to EVT (second) in ord...
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT, bool IsSigned) const
Return true if it is more correct/profitable to use strict FP_TO_INT conversion operations - canonica...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
bool hasTargetDAGCombine(ISD::NodeType NT) const
If true, the target has custom DAG combine transformations that it can perform for the specified node...
void setLibcallImpl(RTLIB::Libcall Call, RTLIB::LibcallImpl Impl)
virtual bool fallBackToDAGISel(const Instruction &Inst) const
unsigned GatherAllAliasesMaxDepth
Depth that GatherAllAliases should continue looking for chain dependencies when trying to find a more...
virtual bool shouldSplatInsEltVarIndex(EVT) const
Return true if inserting a scalar into a variable element of an undef vector is more efficiently hand...
LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const
Return how the indexed load should be treated: either it is legal, needs to be promoted to a larger s...
NegatibleCost
Enum that specifies when a float negation is beneficial.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual unsigned preferedOpcodeForCmpEqPiecesOfOperand(EVT VT, unsigned ShiftOpc, bool MayTransformRotate, const APInt &ShiftOrRotateAmt, const std::optional< APInt > &AndMask) const
virtual void emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst *AI) const
Perform a atomicrmw which the result is only used by comparison, using a target-specific intrinsic.
virtual bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
virtual bool isFMADLegal(const MachineInstr &MI, LLT Ty) const
Returns true if MI can be combined with another instruction to form TargetOpcode::G_FMAD.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, ArrayRef< MVT > VTs, LegalizeAction Action)
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
virtual bool isLegalAddScalableImmediate(int64_t) const
Return true if adding the specified scalable immediate is legal, that is the target has add instructi...
std::vector< ArgListEntry > ArgListTy
virtual bool shouldAlignPointerArgs(CallInst *, unsigned &, Align &) const
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
virtual bool hasVectorBlend() const
Return true if the target has a vector blend instruction.
virtual AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT, Align Alignment, unsigned AddrSpace) const
Return true if the specified store with truncation has solution on this target.
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, ArrayRef< MVT > VTs, LegalizeAction Action)
virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const
virtual MachineMemOperand::Flags getTargetMMOFlags(const MemSDNode &Node) const
This callback is used to inspect load/store SDNode.
virtual EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &) const
Returns the target specific optimal type for load and store operations as a result of memset,...
virtual Type * shouldConvertSplatType(ShuffleVectorInst *SVI) const
Given a shuffle vector SVI representing a vector splat, return a new scalar type of size equal to SVI...
virtual bool isZExtFree(SDValue Val, EVT VT2) const
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
void setAtomicLoadExtAction(ArrayRef< unsigned > ExtTypes, MVT ValVT, MVT MemVT, LegalizeAction Action)
virtual bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const
virtual LLVM_READONLY LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const
Return the preferred type to use for a shift opcode, given the shifted amount type is ShiftValueTy.
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
LLT getVectorIdxLLT(const DataLayout &DL) const
Returns the type to be used for the index operand of: G_INSERT_VECTOR_ELT, G_EXTRACT_VECTOR_ELT,...
virtual EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, ArrayRef< MVT > VTs, LegalizeAction Action)
virtual AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal or custom for a comparison of the specified type...
virtual bool isComplexDeinterleavingSupported() const
Does this target support complex deinterleaving.
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
bool isLoadLegal(EVT ValVT, EVT MemVT, Align Alignment, unsigned AddrSpace, unsigned ExtType, bool Atomic) const
Return true if the specified load with extension is legal on this target.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
virtual bool addressingModeSupportsTLS(const GlobalValue &) const
Returns true if the targets addressing mode can target thread local storage (TLS).
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
bool isLoadLegalOrCustom(EVT ValVT, EVT MemVT, Align Alignment, unsigned AddrSpace, unsigned ExtType, bool Atomic) const
Return true if the specified load with extension is legal or custom on this target.
virtual bool shouldConvertPhiType(Type *From, Type *To) const
Given a set in interconnected phis of type 'From' that are loaded/stored or bitcast to type 'To',...
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
virtual bool isLegalStoreImmediate(int64_t Value) const
Return true if the specified immediate is legal for the value input of a store instruction.
virtual bool preferZeroCompareBranch() const
Return true if the heuristic to prefer icmp eq zero should be used in code gen prepare.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
virtual bool lowerInterleavedLoad(Instruction *Load, Value *Mask, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor, const APInt &GapMask) const
Lower an interleaved load to target specific intrinsics.
virtual unsigned getVectorIdxWidth(const DataLayout &DL) const
Returns the type to be used for the index operand vector operations.
MVT getTypeToPromoteTo(unsigned Op, MVT VT) const
If the action for this operation is to promote, this method returns the ValueType to promote to.
virtual bool generateFMAsInMachineCombiner(EVT VT, CodeGenOptLevel OptLevel) const
virtual LoadInst * lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const
On some platforms, an AtomicRMW that never actually modifies the value (such as fetch_add of 0) can b...
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
virtual bool hasPairedLoad(EVT, Align &) const
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType...
virtual bool convertSelectOfConstantsToMath(EVT VT) const
Return true if a select of constants (select Cond, C1, C2) should be transformed into simple math ops...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
virtual bool optimizeExtendOrTruncateConversion(Instruction *I, Loop *L, const TargetTransformInfo &TTI) const
Try to optimize extending or truncating conversion instructions (like zext, trunc,...
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
TargetLoweringBase & operator=(const TargetLoweringBase &)=delete
MulExpansionKind
Enum that specifies when a multiplication should be expanded.
static ISD::NodeType getExtendForContent(BooleanContent Content)
const RTLIB::RuntimeLibcallsInfo & getRuntimeLibcallsInfo() const
virtual bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const
Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT from min(max(fptoi)) satur...
virtual bool supportKCFIBundles() const
Return true if the target supports kcfi operand bundles.
virtual ConstraintWeight getMultipleConstraintMatchWeight(AsmOperandInfo &info, int maIndex) const
Examine constraint type and operand type and determine a weight value.
SmallVector< ConstraintPair > ConstraintGroup
virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps, bool &UseOneConstNR, bool Reciprocal) const
Hooks for building estimates in place of slower divisions and square roots.
virtual bool isDesirableToCommuteWithShift(const MachineInstr &MI, bool IsAfterLegal) const
GlobalISel - return true if it is profitable to move this shift by a constant amount through its oper...
virtual bool supportPtrAuthBundles() const
Return true if the target supports ptrauth operand bundles.
virtual void ReplaceNodeResults(SDNode *, SmallVectorImpl< SDValue > &, SelectionDAG &) const
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
virtual bool isUsedByReturnOnly(SDNode *, SDValue &) const
Return true if result of the specified node is used by a return node only.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, unsigned Depth=0) const
This is the helper function to return the newly negated expression if the cost is not expensive.
virtual bool isReassocProfitable(SelectionDAG &DAG, SDValue N0, SDValue N1) const
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
SDValue getCheaperOrNeutralNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, const NegatibleCost CostThreshold=NegatibleCost::Neutral, unsigned Depth=0) const
virtual Register getRegisterByName(const char *RegName, LLT Ty, const MachineFunction &MF) const
Return the register ID of the name passed in.
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
virtual bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
virtual bool isTargetCanonicalConstantNode(SDValue Op) const
Returns true if the given Opc is considered a canonical constant for the target, which should not be ...
virtual bool isTargetCanonicalSelect(SDNode *N) const
Return true if the given select/vselect should be considered canonical and not be transformed.
SDValue getCheaperNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, unsigned Depth=0) const
This is the helper function to return the newly negated expression only when the cost is cheaper.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual SDValue lowerEHPadEntry(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
Optional target hook to add target-specific actions when entering EH pad blocks.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue unwrapAddress(SDValue N) const
virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual bool IsDesirableToPromoteOp(SDValue, EVT &) const
This method query the target whether it is beneficial for dag combiner to promote the specified node.
virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual void insertCopiesSplitCSR(MachineBasicBlock *Entry, const SmallVectorImpl< MachineBasicBlock * > &Exits) const
Insert explicit copies in entry and exit blocks.
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
virtual bool isTypeDesirableForOp(unsigned, EVT VT) const
Return true if the target has native support for the specified value type and it is 'desirable' to us...
~TargetLowering() override
TargetLowering & operator=(const TargetLowering &)=delete
virtual bool isDesirableToPullExtFromShl(const MachineInstr &MI) const
GlobalISel - return true if it's profitable to perform the combine: shl ([sza]ext x),...
bool isPositionIndependent() const
std::pair< StringRef, TargetLowering::ConstraintType > ConstraintPair
virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual bool isIndexingLegal(MachineInstr &MI, Register Base, Register Offset, bool IsPre, MachineRegisterInfo &MRI) const
Returns true if the specified base+offset is a legal indexed addressing mode for this target.
ConstraintGroup getConstraintPreferences(AsmOperandInfo &OpInfo) const
Given an OpInfo with list of constraints codes as strings, return a sorted Vector of pairs of constra...
virtual void initializeSplitCSR(MachineBasicBlock *Entry) const
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
virtual bool isSDNodeSourceOfDivergence(const SDNode *N, FunctionLoweringInfo *FLI, UniformityInfo *UA) const
virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps) const
Return a reciprocal estimate value for the input operand.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual bool isSDNodeAlwaysUniform(const SDNode *N) const
virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const
Return true if it is profitable to combine an XOR of a logical shift to create a logical shift of NOT...
TargetLowering(const TargetLowering &)=delete
virtual bool shouldSimplifyDemandedVectorElts(SDValue Op, const TargetLoweringOpt &TLO) const
Return true if the target supports simplifying demanded vector elements by converting them to undefs.
virtual SDValue LowerFormalArguments(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::InputArg > &, const SDLoc &, SelectionDAG &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual SDValue getSqrtResultForDenormInput(SDValue Operand, SelectionDAG &DAG) const
Return a target-dependent result if the input operand is not suitable for use with a square root esti...
virtual bool getPostIndexedAddressParts(SDNode *, SDNode *, SDValue &, SDValue &, ISD::MemIndexedMode &, SelectionDAG &) const
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
virtual bool shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout &DL) const
For most targets, an LLVM type must be broken down into multiple smaller types.
virtual ArrayRef< MCPhysReg > getRoundingControlRegisters() const
Returns a 0 terminated array of rounding control registers that can be attached into strict FP call.
virtual SDValue LowerReturn(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::OutputArg > &, const SmallVectorImpl< SDValue > &, const SDLoc &, SelectionDAG &) const
This hook must be implemented to lower outgoing return values, described by the Outs array,...
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const
Return true if it is profitable to move this shift by a constant amount through its operand,...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue Ptr, SDValue Val, SDValue Mask) const
virtual const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *, const MachineBasicBlock *, unsigned, MCContext &) const
virtual bool useTopologicalSorting() const
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
It is an error to pass RTLIB::UNKNOWN_LIBCALL as LC.
virtual FastISel * createFastISel(FunctionLoweringInfo &, const TargetLibraryInfo *, const LibcallLoweringInfo *) const
This method returns a target specific FastISel object, or null if the target does not support "fast" ...
virtual unsigned combineRepeatedFPDivisors() const
Indicate whether this target prefers to combine FDIVs with the same divisor.
virtual AndOrSETCCFoldKind isDesirableToCombineLogicOpOfSETCC(const SDNode *LogicOp, const SDNode *SETCC0, const SDNode *SETCC1) const
virtual void HandleByVal(CCState *, unsigned &, Align) const
Target-specific cleanup for formal ByVal parameters.
virtual const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const
Returns a 0 terminated array of registers that can be safely used as scratch registers.
virtual bool getPreIndexedAddressParts(SDNode *, SDValue &, SDValue &, ISD::MemIndexedMode &, SelectionDAG &) const
Returns true by value, base pointer and offset pointer and addressing mode by reference if the node's...
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index, const SDNodeFlags PtrArithFlags=SDNodeFlags()) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::LibcallImpl LibcallImpl, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual bool supportSplitCSR(MachineFunction *MF) const
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
virtual bool isReassocProfitable(MachineRegisterInfo &MRI, Register N0, Register N1) const
virtual bool mayBeEmittedAsTailCall(const CallInst *) const
Return true if the target may be able emit the call instruction as a tail call.
virtual bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
SDValue getInboundsVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
virtual MVT getJumpTableRegTy(const DataLayout &DL) const
virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC, ArgListTy &Args) const
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &, const Type *RetTy) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
virtual bool isXAndYEqZeroPreferableToXAndYEqY(ISD::CondCode, EVT) const
virtual bool isDesirableToTransformToIntegerOp(unsigned, EVT) const
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode...
Primary interface to the complete machine description for the target machine.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Target - Wrapper for Target specific information.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:186
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
CallInst * Call
#define UINT64_MAX
Definition DataTypes.h:77
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition ISDOpcodes.h:41
@ PARTIAL_REDUCE_SMLA
PARTIAL_REDUCE_[U|S]MLA(Accumulator, Input1, Input2) The partial reduction nodes sign or zero extend ...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:275
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
Definition ISDOpcodes.h:394
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:294
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
Definition ISDOpcodes.h:522
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:264
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
Definition ISDOpcodes.h:400
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:857
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:417
@ CLMUL
Carry-less multiplication operations.
Definition ISDOpcodes.h:778
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
Definition ISDOpcodes.h:407
@ PARTIAL_REDUCE_UMLA
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:848
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
Definition ISDOpcodes.h:715
@ PARTIAL_REDUCE_FMLA
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition ISDOpcodes.h:374
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
Definition ISDOpcodes.h:672
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition ISDOpcodes.h:348
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:704
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:769
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:854
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition ISDOpcodes.h:727
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
Definition ISDOpcodes.h:413
@ STRICT_FP_TO_UINT
Definition ISDOpcodes.h:478
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:477
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:930
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:739
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
Definition ISDOpcodes.h:710
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:304
@ SPLAT_VECTOR_PARTS
SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the scalar values joined together a...
Definition ISDOpcodes.h:681
@ PARTIAL_REDUCE_SUMLA
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition ISDOpcodes.h:365
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
Definition ISDOpcodes.h:722
static const int LAST_LOADEXT_TYPE
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
This namespace contains all of the command line option processing machinery.
Definition CommandLine.h:52
This is an optimization pass for GlobalISel generic memory operations.
GenericUniformityInfo< SSAContext > UniformityInfo
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:557
void fill(R &&Range, T &&Value)
Provide wrappers to std::fill which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1758
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
InstructionCost Cost
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Definition Alignment.h:134
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition bit.h:156
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:337
LLVM_ABI bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Returns true if given the TargetLowering's boolean contents information, the value Val contains a tru...
Definition Utils.cpp:1616
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
@ Default
-O2, -Os, -Oz
Definition CodeGen.h:85
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
AtomicOrdering
Atomic ordering for LLVM's memory model.
LLVM_ABI EVT getApproximateEVTForLLT(LLT Ty, LLVMContext &Ctx)
TargetTransformInfo TTI
CombineLevel
Definition DAGCombine.h:15
@ AfterLegalizeDAG
Definition DAGCombine.h:19
@ AfterLegalizeVectorOps
Definition DAGCombine.h:18
@ BeforeLegalizeTypes
Definition DAGCombine.h:16
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition Analysis.cpp:539
DWARFExpression::Operation Op
LLVM_ABI bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Definition Utils.cpp:1629
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1916
UndefPoisonKind
Enumeration to track whether we are interested in Undef, Poison, or both.
Definition UndefPoison.h:20
static cl::opt< unsigned > CostThreshold("dfa-cost-threshold", cl::desc("Maximum cost accepted for the transformation"), cl::Hidden, cl::init(50))
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:874
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
Represent subnormal handling kind for floating point instruction inputs and outputs.
Extended Value Type.
Definition ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:145
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition ValueTypes.h:70
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition ValueTypes.h:308
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition ValueTypes.h:155
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:381
bool isByteSized() const
Return true if the bit size is a multiple of 8.
Definition ValueTypes.h:251
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:324
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:176
bool isExtended() const
Test if the given EVT is extended (as opposed to being simple).
Definition ValueTypes.h:150
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition ValueTypes.h:165
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition ValueTypes.h:160
ConstraintInfo()=default
Default constructor.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
bool isDstAligned(Align AlignCheck) const
bool allowOverlap() const
bool isFixedDstAlign() const
uint64_t size() const
static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign, bool IsZeroMemset, bool IsVolatile)
Align getDstAlign() const
bool isMemcpyStrSrc() const
bool isAligned(Align AlignCheck) const
static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign, Align SrcAlign, bool IsVolatile, bool MemcpyStrSrc=false)
bool isSrcAligned(Align AlignCheck) const
bool isMemset() const
bool isMemcpy() const
bool isMemcpyWithFixedDstAlign() const
bool isZeroMemset() const
Align getSrcAlign() const
A simple container for information about the supported runtime calls.
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
std::optional< unsigned > fallbackAddressSpace
PointerUnion< const Value *, const PseudoSourceValue * > ptrVal
This contains information for each constraint that we are lowering.
AsmOperandInfo(InlineAsm::ConstraintInfo Info)
Copy constructor for copying from a ConstraintInfo.
MVT ConstraintVT
The ValueType for the operand value.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
std::string ConstraintCode
This contains the actual string for the code, like "m".
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setConvergent(bool Value=true)
CallLoweringInfo & setIsPostTypeLegalization(bool Value=true)
CallLoweringInfo & setDeactivationSymbol(GlobalValue *Sym)
CallLoweringInfo & setCallee(Type *ResultType, FunctionType *FTy, SDValue Target, ArgListTy &&ArgsList, const CallBase &Call)
CallLoweringInfo & setCFIType(const ConstantInt *Type)
CallLoweringInfo & setInRegister(bool Value=true)
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setVarArg(bool Value=true)
Type * OrigRetTy
Original unlegalized return type.
std::optional< PtrAuthInfo > PAI
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, Type *OrigResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setIsPreallocated(bool Value=true)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
CallLoweringInfo & setConvergenceControlToken(SDValue Token)
SmallVector< ISD::OutputArg, 32 > Outs
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setPtrAuth(PtrAuthInfo Value)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setIsPostTypeLegalization(bool Value=true)
MakeLibCallOptions & setDiscardResult(bool Value=true)
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT)
MakeLibCallOptions & setIsSigned(bool Value=true)
MakeLibCallOptions & setNoReturn(bool Value=true)
MakeLibCallOptions & setOpsTypeOverrides(ArrayRef< Type * > OpsTypes)
Override the argument type for an operand.
This structure contains the information necessary for lowering pointer-authenticating indirect calls.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
TargetLoweringOpt(SelectionDAG &InDAG, bool LT, bool LO)