LLVM 20.0.0git
TargetLowering.h
Go to the documentation of this file.
1//===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file describes how to lower LLVM code to machine code. This has two
11/// main components:
12///
13/// 1. Which ValueTypes are natively supported by the target.
14/// 2. Which operations are supported for supported ValueTypes.
15/// 3. Cost thresholds for alternative implementations of certain operations.
16///
17/// In addition it has a few other components, like information about FP
18/// immediates.
19///
20//===----------------------------------------------------------------------===//
21
22#ifndef LLVM_CODEGEN_TARGETLOWERING_H
23#define LLVM_CODEGEN_TARGETLOWERING_H
24
25#include "llvm/ADT/APInt.h"
26#include "llvm/ADT/ArrayRef.h"
27#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/StringRef.h"
40#include "llvm/IR/Attributes.h"
41#include "llvm/IR/CallingConv.h"
42#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/Function.h"
45#include "llvm/IR/InlineAsm.h"
46#include "llvm/IR/Instruction.h"
49#include "llvm/IR/Type.h"
54#include <algorithm>
55#include <cassert>
56#include <climits>
57#include <cstdint>
58#include <iterator>
59#include <map>
60#include <string>
61#include <utility>
62#include <vector>
63
64namespace llvm {
65
66class AssumptionCache;
67class CCState;
68class CCValAssign;
71class Constant;
72class FastISel;
73class FunctionLoweringInfo;
74class GlobalValue;
75class Loop;
76class GISelKnownBits;
77class IntrinsicInst;
78class IRBuilderBase;
79struct KnownBits;
80class LLVMContext;
81class MachineBasicBlock;
82class MachineFunction;
83class MachineInstr;
84class MachineJumpTableInfo;
85class MachineLoop;
86class MachineRegisterInfo;
87class MCContext;
88class MCExpr;
89class Module;
90class ProfileSummaryInfo;
91class TargetLibraryInfo;
92class TargetMachine;
93class TargetRegisterClass;
94class TargetRegisterInfo;
95class TargetTransformInfo;
96class Value;
97
98namespace Sched {
99
101 None, // No preference
102 Source, // Follow source order.
103 RegPressure, // Scheduling for lowest register pressure.
104 Hybrid, // Scheduling for both latency and register pressure.
105 ILP, // Scheduling for ILP in low register pressure mode.
106 VLIW, // Scheduling for VLIW targets.
107 Fast, // Fast suboptimal list scheduling
108 Linearize, // Linearize DAG, no scheduling
109 Last = Linearize // Marker for the last Sched::Preference
111
112} // end namespace Sched
113
114// MemOp models a memory operation, either memset or memcpy/memmove.
115struct MemOp {
116private:
117 // Shared
118 uint64_t Size;
119 bool DstAlignCanChange; // true if destination alignment can satisfy any
120 // constraint.
121 Align DstAlign; // Specified alignment of the memory operation.
122
123 bool AllowOverlap;
124 // memset only
125 bool IsMemset; // If setthis memory operation is a memset.
126 bool ZeroMemset; // If set clears out memory with zeros.
127 // memcpy only
128 bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register
129 // constant so it does not need to be loaded.
130 Align SrcAlign; // Inferred alignment of the source or default value if the
131 // memory operation does not need to load the value.
132public:
133 static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
134 Align SrcAlign, bool IsVolatile,
135 bool MemcpyStrSrc = false) {
136 MemOp Op;
137 Op.Size = Size;
138 Op.DstAlignCanChange = DstAlignCanChange;
139 Op.DstAlign = DstAlign;
140 Op.AllowOverlap = !IsVolatile;
141 Op.IsMemset = false;
142 Op.ZeroMemset = false;
143 Op.MemcpyStrSrc = MemcpyStrSrc;
144 Op.SrcAlign = SrcAlign;
145 return Op;
146 }
147
148 static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
149 bool IsZeroMemset, bool IsVolatile) {
150 MemOp Op;
151 Op.Size = Size;
152 Op.DstAlignCanChange = DstAlignCanChange;
153 Op.DstAlign = DstAlign;
154 Op.AllowOverlap = !IsVolatile;
155 Op.IsMemset = true;
156 Op.ZeroMemset = IsZeroMemset;
157 Op.MemcpyStrSrc = false;
158 return Op;
159 }
160
161 uint64_t size() const { return Size; }
163 assert(!DstAlignCanChange);
164 return DstAlign;
165 }
166 bool isFixedDstAlign() const { return !DstAlignCanChange; }
167 bool allowOverlap() const { return AllowOverlap; }
168 bool isMemset() const { return IsMemset; }
169 bool isMemcpy() const { return !IsMemset; }
171 return isMemcpy() && !DstAlignCanChange;
172 }
173 bool isZeroMemset() const { return isMemset() && ZeroMemset; }
174 bool isMemcpyStrSrc() const {
175 assert(isMemcpy() && "Must be a memcpy");
176 return MemcpyStrSrc;
177 }
179 assert(isMemcpy() && "Must be a memcpy");
180 return SrcAlign;
181 }
182 bool isSrcAligned(Align AlignCheck) const {
183 return isMemset() || llvm::isAligned(AlignCheck, SrcAlign.value());
184 }
185 bool isDstAligned(Align AlignCheck) const {
186 return DstAlignCanChange || llvm::isAligned(AlignCheck, DstAlign.value());
187 }
188 bool isAligned(Align AlignCheck) const {
189 return isSrcAligned(AlignCheck) && isDstAligned(AlignCheck);
190 }
191};
192
193/// This base class for TargetLowering contains the SelectionDAG-independent
194/// parts that can be used from the rest of CodeGen.
196public:
197 /// This enum indicates whether operations are valid for a target, and if not,
198 /// what action should be used to make them valid.
200 Legal, // The target natively supports this operation.
201 Promote, // This operation should be executed in a larger type.
202 Expand, // Try to expand this to other ops, otherwise use a libcall.
203 LibCall, // Don't try to expand this to other ops, always use a libcall.
204 Custom // Use the LowerOperation hook to implement custom lowering.
205 };
206
207 /// This enum indicates whether a types are legal for a target, and if not,
208 /// what action should be used to make them valid.
210 TypeLegal, // The target natively supports this type.
211 TypePromoteInteger, // Replace this integer with a larger one.
212 TypeExpandInteger, // Split this integer into two of half the size.
213 TypeSoftenFloat, // Convert this float to a same size integer type.
214 TypeExpandFloat, // Split this float into two of half the size.
215 TypeScalarizeVector, // Replace this one-element vector with its element.
216 TypeSplitVector, // Split this vector into two of half the size.
217 TypeWidenVector, // This vector should be widened into a larger vector.
218 TypePromoteFloat, // Replace this float with a larger one.
219 TypeSoftPromoteHalf, // Soften half to i16 and use float to do arithmetic.
220 TypeScalarizeScalableVector, // This action is explicitly left unimplemented.
221 // While it is theoretically possible to
222 // legalize operations on scalable types with a
223 // loop that handles the vscale * #lanes of the
224 // vector, this is non-trivial at SelectionDAG
225 // level and these types are better to be
226 // widened or promoted.
227 };
228
229 /// LegalizeKind holds the legalization kind that needs to happen to EVT
230 /// in order to type-legalize it.
231 using LegalizeKind = std::pair<LegalizeTypeAction, EVT>;
232
233 /// Enum that describes how the target represents true/false values.
235 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
236 ZeroOrOneBooleanContent, // All bits zero except for bit 0.
237 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
238 };
239
240 /// Enum that describes what type of support for selects the target has.
242 ScalarValSelect, // The target supports scalar selects (ex: cmov).
243 ScalarCondVectorVal, // The target supports selects with a scalar condition
244 // and vector values (ex: cmov).
245 VectorMaskSelect // The target supports vector selects with a vector
246 // mask (ex: x86 blends).
247 };
248
249 /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
250 /// to, if at all. Exists because different targets have different levels of
251 /// support for these atomic instructions, and also have different options
252 /// w.r.t. what they should expand to.
254 None, // Don't expand the instruction.
255 CastToInteger, // Cast the atomic instruction to another type, e.g. from
256 // floating-point to integer type.
257 LLSC, // Expand the instruction into loadlinked/storeconditional; used
258 // by ARM/AArch64.
259 LLOnly, // Expand the (load) instruction into just a load-linked, which has
260 // greater atomic guarantees than a normal load.
261 CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
262 MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
263 BitTestIntrinsic, // Use a target-specific intrinsic for special bit
264 // operations; used by X86.
265 CmpArithIntrinsic,// Use a target-specific intrinsic for special compare
266 // operations; used by X86.
267 Expand, // Generic expansion in terms of other atomic operations.
268
269 // Rewrite to a non-atomic form for use in a known non-preemptible
270 // environment.
272 };
273
274 /// Enum that specifies when a multiplication should be expanded.
275 enum class MulExpansionKind {
276 Always, // Always expand the instruction.
277 OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
278 // or custom.
279 };
280
281 /// Enum that specifies when a float negation is beneficial.
282 enum class NegatibleCost {
283 Cheaper = 0, // Negated expression is cheaper.
284 Neutral = 1, // Negated expression has the same cost.
285 Expensive = 2 // Negated expression is more expensive.
286 };
287
288 /// Enum of different potentially desirable ways to fold (and/or (setcc ...),
289 /// (setcc ...)).
291 None = 0, // No fold is preferable.
292 AddAnd = 1, // Fold with `Add` op and `And` op is preferable.
293 NotAnd = 2, // Fold with `Not` op and `And` op is preferable.
294 ABS = 4, // Fold with `llvm.abs` op is preferable.
295 };
296
298 public:
299 Value *Val = nullptr;
301 Type *Ty = nullptr;
302 bool IsSExt : 1;
303 bool IsZExt : 1;
304 bool IsNoExt : 1;
305 bool IsInReg : 1;
306 bool IsSRet : 1;
307 bool IsNest : 1;
308 bool IsByVal : 1;
309 bool IsByRef : 1;
310 bool IsInAlloca : 1;
312 bool IsReturned : 1;
313 bool IsSwiftSelf : 1;
314 bool IsSwiftAsync : 1;
315 bool IsSwiftError : 1;
317 MaybeAlign Alignment = std::nullopt;
318 Type *IndirectType = nullptr;
319
326
327 void setAttributes(const CallBase *Call, unsigned ArgIdx);
328 };
329 using ArgListTy = std::vector<ArgListEntry>;
330
331 virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC,
332 ArgListTy &Args) const {};
333
335 switch (Content) {
337 // Extend by adding rubbish bits.
338 return ISD::ANY_EXTEND;
340 // Extend by adding zero bits.
341 return ISD::ZERO_EXTEND;
343 // Extend by copying the sign bit.
344 return ISD::SIGN_EXTEND;
345 }
346 llvm_unreachable("Invalid content kind");
347 }
348
349 explicit TargetLoweringBase(const TargetMachine &TM);
352 virtual ~TargetLoweringBase() = default;
353
354 /// Return true if the target support strict float operation
355 bool isStrictFPEnabled() const {
356 return IsStrictFPEnabled;
357 }
358
359protected:
360 /// Initialize all of the actions to default values.
361 void initActions();
362
363public:
364 const TargetMachine &getTargetMachine() const { return TM; }
365
366 virtual bool useSoftFloat() const { return false; }
367
368 /// Return the pointer type for the given address space, defaults to
369 /// the pointer type from the data layout.
370 /// FIXME: The default needs to be removed once all the code is updated.
371 virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
372 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
373 }
374
375 /// Return the in-memory pointer type for the given address space, defaults to
376 /// the pointer type from the data layout.
377 /// FIXME: The default needs to be removed once all the code is updated.
378 virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const {
379 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
380 }
381
382 /// Return the type for frame index, which is determined by
383 /// the alloca address space specified through the data layout.
385 return getPointerTy(DL, DL.getAllocaAddrSpace());
386 }
387
388 /// Return the type for code pointers, which is determined by the program
389 /// address space specified through the data layout.
391 return getPointerTy(DL, DL.getProgramAddressSpace());
392 }
393
394 /// Return the type for operands of fence.
395 /// TODO: Let fence operands be of i32 type and remove this.
396 virtual MVT getFenceOperandTy(const DataLayout &DL) const {
397 return getPointerTy(DL);
398 }
399
400 /// Return the type to use for a scalar shift opcode, given the shifted amount
401 /// type. Targets should return a legal type if the input type is legal.
402 /// Targets can return a type that is too small if the input type is illegal.
403 virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
404
405 /// Returns the type for the shift amount of a shift opcode. For vectors,
406 /// returns the input type. For scalars, calls getScalarShiftAmountTy.
407 /// If getScalarShiftAmountTy type cannot represent all possible shift
408 /// amounts, returns MVT::i32.
409 EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const;
410
411 /// Return the preferred type to use for a shift opcode, given the shifted
412 /// amount type is \p ShiftValueTy.
414 virtual LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const {
415 return ShiftValueTy;
416 }
417
418 /// Returns the type to be used for the index operand of:
419 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
420 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
421 virtual MVT getVectorIdxTy(const DataLayout &DL) const {
422 return getPointerTy(DL);
423 }
424
425 /// Returns the type to be used for the EVL/AVL operand of VP nodes:
426 /// ISD::VP_ADD, ISD::VP_SUB, etc. It must be a legal scalar integer type,
427 /// and must be at least as large as i32. The EVL is implicitly zero-extended
428 /// to any larger type.
429 virtual MVT getVPExplicitVectorLengthTy() const { return MVT::i32; }
430
431 /// This callback is used to inspect load/store instructions and add
432 /// target-specific MachineMemOperand flags to them. The default
433 /// implementation does nothing.
436 }
437
438 /// This callback is used to inspect load/store SDNode.
439 /// The default implementation does nothing.
443 }
444
447 AssumptionCache *AC = nullptr,
448 const TargetLibraryInfo *LibInfo = nullptr) const;
450 const DataLayout &DL) const;
452 const DataLayout &DL) const;
453
454 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
455 return true;
456 }
457
458 /// Return true if the @llvm.experimental.vector.partial.reduce.* intrinsic
459 /// should be expanded using generic code in SelectionDAGBuilder.
460 virtual bool
462 return true;
463 }
464
465 /// Return true if the @llvm.get.active.lane.mask intrinsic should be expanded
466 /// using generic code in SelectionDAGBuilder.
467 virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const {
468 return true;
469 }
470
471 virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF,
472 bool IsScalable) const {
473 return true;
474 }
475
476 /// Return true if the @llvm.experimental.cttz.elts intrinsic should be
477 /// expanded using generic code in SelectionDAGBuilder.
478 virtual bool shouldExpandCttzElements(EVT VT) const { return true; }
479
480 /// Return the minimum number of bits required to hold the maximum possible
481 /// number of trailing zero vector elements.
483 bool ZeroIsPoison,
484 const ConstantRange *VScaleRange) const;
485
486 /// Return true if the @llvm.experimental.vector.match intrinsic should be
487 /// expanded for vector type `VT' and search size `SearchSize' using generic
488 /// code in SelectionDAGBuilder.
489 virtual bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const {
490 return true;
491 }
492
493 // Return true if op(vecreduce(x), vecreduce(y)) should be reassociated to
494 // vecreduce(op(x, y)) for the reduction opcode RedOpc.
495 virtual bool shouldReassociateReduction(unsigned RedOpc, EVT VT) const {
496 return true;
497 }
498
499 /// Return true if it is profitable to convert a select of FP constants into
500 /// a constant pool load whose address depends on the select condition. The
501 /// parameter may be used to differentiate a select with FP compare from
502 /// integer compare.
503 virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
504 return true;
505 }
506
507 /// Return true if multiple condition registers are available.
509 return HasMultipleConditionRegisters;
510 }
511
512 /// Return true if the target has BitExtract instructions.
513 bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
514
515 /// Return the preferred vector type legalization action.
518 // The default action for one element vectors is to scalarize
520 return TypeScalarizeVector;
521 // The default action for an odd-width vector is to widen.
522 if (!VT.isPow2VectorType())
523 return TypeWidenVector;
524 // The default action for other vectors is to promote
525 return TypePromoteInteger;
526 }
527
528 // Return true if the half type should be promoted using soft promotion rules
529 // where each operation is promoted to f32 individually, then converted to
530 // fp16. The default behavior is to promote chains of operations, keeping
531 // intermediate results in f32 precision and range.
532 virtual bool softPromoteHalfType() const { return false; }
533
534 // Return true if, for soft-promoted half, the half type should be passed
535 // passed to and returned from functions as f32. The default behavior is to
536 // pass as i16. If soft-promoted half is not used, this function is ignored
537 // and values are always passed and returned as f32.
538 virtual bool useFPRegsForHalfType() const { return false; }
539
540 // There are two general methods for expanding a BUILD_VECTOR node:
541 // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
542 // them together.
543 // 2. Build the vector on the stack and then load it.
544 // If this function returns true, then method (1) will be used, subject to
545 // the constraint that all of the necessary shuffles are legal (as determined
546 // by isShuffleMaskLegal). If this function returns false, then method (2) is
547 // always used. The vector type, and the number of defined values, are
548 // provided.
549 virtual bool
551 unsigned DefinedValues) const {
552 return DefinedValues < 3;
553 }
554
555 /// Return true if integer divide is usually cheaper than a sequence of
556 /// several shifts, adds, and multiplies for this target.
557 /// The definition of "cheaper" may depend on whether we're optimizing
558 /// for speed or for size.
559 virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; }
560
561 /// Return true if the target can handle a standalone remainder operation.
562 virtual bool hasStandaloneRem(EVT VT) const {
563 return true;
564 }
565
566 /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
567 virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
568 // Default behavior is to replace SQRT(X) with X*RSQRT(X).
569 return false;
570 }
571
572 /// Reciprocal estimate status values used by the functions below.
576 Enabled = 1
577 };
578
579 /// Return a ReciprocalEstimate enum value for a square root of the given type
580 /// based on the function's attributes. If the operation is not overridden by
581 /// the function's attributes, "Unspecified" is returned and target defaults
582 /// are expected to be used for instruction selection.
584
585 /// Return a ReciprocalEstimate enum value for a division of the given type
586 /// based on the function's attributes. If the operation is not overridden by
587 /// the function's attributes, "Unspecified" is returned and target defaults
588 /// are expected to be used for instruction selection.
590
591 /// Return the refinement step count for a square root of the given type based
592 /// on the function's attributes. If the operation is not overridden by
593 /// the function's attributes, "Unspecified" is returned and target defaults
594 /// are expected to be used for instruction selection.
595 int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
596
597 /// Return the refinement step count for a division of the given type based
598 /// on the function's attributes. If the operation is not overridden by
599 /// the function's attributes, "Unspecified" is returned and target defaults
600 /// are expected to be used for instruction selection.
601 int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
602
603 /// Returns true if target has indicated at least one type should be bypassed.
604 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
605
606 /// Returns map of slow types for division or remainder with corresponding
607 /// fast types
609 return BypassSlowDivWidths;
610 }
611
612 /// Return true only if vscale must be a power of two.
613 virtual bool isVScaleKnownToBeAPowerOfTwo() const { return false; }
614
615 /// Return true if Flow Control is an expensive operation that should be
616 /// avoided.
617 bool isJumpExpensive() const { return JumpIsExpensive; }
618
619 // Costs parameters used by
620 // SelectionDAGBuilder::shouldKeepJumpConditionsTogether.
621 // shouldKeepJumpConditionsTogether will use these parameter value to
622 // determine if two conditions in the form `br (and/or cond1, cond2)` should
623 // be split into two branches or left as one.
624 //
625 // BaseCost is the cost threshold (in latency). If the estimated latency of
626 // computing both `cond1` and `cond2` is below the cost of just computing
627 // `cond1` + BaseCost, the two conditions will be kept together. Otherwise
628 // they will be split.
629 //
630 // LikelyBias increases BaseCost if branch probability info indicates that it
631 // is likely that both `cond1` and `cond2` will be computed.
632 //
633 // UnlikelyBias decreases BaseCost if branch probability info indicates that
634 // it is likely that both `cond1` and `cond2` will be computed.
635 //
636 // Set any field to -1 to make it ignored (setting BaseCost to -1 results in
637 // `shouldKeepJumpConditionsTogether` always returning false).
642 };
643 // Return params for deciding if we should keep two branch conditions merged
644 // or split them into two separate branches.
645 // Arg0: The binary op joining the two conditions (and/or).
646 // Arg1: The first condition (cond1)
647 // Arg2: The second condition (cond2)
648 virtual CondMergingParams
650 const Value *) const {
651 // -1 will always result in splitting.
652 return {-1, -1, -1};
653 }
654
655 /// Return true if selects are only cheaper than branches if the branch is
656 /// unlikely to be predicted right.
659 }
660
661 virtual bool fallBackToDAGISel(const Instruction &Inst) const {
662 return false;
663 }
664
665 /// Return true if the following transform is beneficial:
666 /// fold (conv (load x)) -> (load (conv*)x)
667 /// On architectures that don't natively support some vector loads
668 /// efficiently, casting the load to a smaller vector of larger types and
669 /// loading is more efficient, however, this can be undone by optimizations in
670 /// dag combiner.
671 virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
672 const SelectionDAG &DAG,
673 const MachineMemOperand &MMO) const;
674
675 /// Return true if the following transform is beneficial:
676 /// (store (y (conv x)), y*)) -> (store x, (x*))
677 virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT,
678 const SelectionDAG &DAG,
679 const MachineMemOperand &MMO) const {
680 // Default to the same logic as loads.
681 return isLoadBitCastBeneficial(StoreVT, BitcastVT, DAG, MMO);
682 }
683
684 /// Return true if it is expected to be cheaper to do a store of vector
685 /// constant with the given size and type for the address space than to
686 /// store the individual scalar element constants.
687 virtual bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT,
688 unsigned NumElem,
689 unsigned AddrSpace) const {
690 return IsZero;
691 }
692
693 /// Allow store merging for the specified type after legalization in addition
694 /// to before legalization. This may transform stores that do not exist
695 /// earlier (for example, stores created from intrinsics).
696 virtual bool mergeStoresAfterLegalization(EVT MemVT) const {
697 return true;
698 }
699
700 /// Returns if it's reasonable to merge stores to MemVT size.
701 virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
702 const MachineFunction &MF) const {
703 return true;
704 }
705
706 /// Return true if it is cheap to speculate a call to intrinsic cttz.
707 virtual bool isCheapToSpeculateCttz(Type *Ty) const {
708 return false;
709 }
710
711 /// Return true if it is cheap to speculate a call to intrinsic ctlz.
712 virtual bool isCheapToSpeculateCtlz(Type *Ty) const {
713 return false;
714 }
715
716 /// Return true if ctlz instruction is fast.
717 virtual bool isCtlzFast() const {
718 return false;
719 }
720
721 /// Return true if ctpop instruction is fast.
722 virtual bool isCtpopFast(EVT VT) const {
723 return isOperationLegal(ISD::CTPOP, VT);
724 }
725
726 /// Return the maximum number of "x & (x - 1)" operations that can be done
727 /// instead of deferring to a custom CTPOP.
728 virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const {
729 return 1;
730 }
731
732 /// Return true if instruction generated for equality comparison is folded
733 /// with instruction generated for signed comparison.
734 virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; }
735
736 /// Return true if the heuristic to prefer icmp eq zero should be used in code
737 /// gen prepare.
738 virtual bool preferZeroCompareBranch() const { return false; }
739
740 /// Return true if it is cheaper to split the store of a merged int val
741 /// from a pair of smaller values into multiple stores.
742 virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
743 return false;
744 }
745
746 /// Return if the target supports combining a
747 /// chain like:
748 /// \code
749 /// %andResult = and %val1, #mask
750 /// %icmpResult = icmp %andResult, 0
751 /// \endcode
752 /// into a single machine instruction of a form like:
753 /// \code
754 /// cc = test %register, #mask
755 /// \endcode
756 virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
757 return false;
758 }
759
760 /// Return true if it is valid to merge the TargetMMOFlags in two SDNodes.
761 virtual bool
763 const MemSDNode &NodeY) const {
764 return true;
765 }
766
767 /// Use bitwise logic to make pairs of compares more efficient. For example:
768 /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
769 /// This should be true when it takes more than one instruction to lower
770 /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on
771 /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win.
772 virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const {
773 return false;
774 }
775
776 /// Return the preferred operand type if the target has a quick way to compare
777 /// integer values of the given size. Assume that any legal integer type can
778 /// be compared efficiently. Targets may override this to allow illegal wide
779 /// types to return a vector type if there is support to compare that type.
780 virtual MVT hasFastEqualityCompare(unsigned NumBits) const {
781 MVT VT = MVT::getIntegerVT(NumBits);
783 }
784
785 /// Return true if the target should transform:
786 /// (X & Y) == Y ---> (~X & Y) == 0
787 /// (X & Y) != Y ---> (~X & Y) != 0
788 ///
789 /// This may be profitable if the target has a bitwise and-not operation that
790 /// sets comparison flags. A target may want to limit the transformation based
791 /// on the type of Y or if Y is a constant.
792 ///
793 /// Note that the transform will not occur if Y is known to be a power-of-2
794 /// because a mask and compare of a single bit can be handled by inverting the
795 /// predicate, for example:
796 /// (X & 8) == 8 ---> (X & 8) != 0
797 virtual bool hasAndNotCompare(SDValue Y) const {
798 return false;
799 }
800
801 /// Return true if the target has a bitwise and-not operation:
802 /// X = ~A & B
803 /// This can be used to simplify select or other instructions.
804 virtual bool hasAndNot(SDValue X) const {
805 // If the target has the more complex version of this operation, assume that
806 // it has this operation too.
807 return hasAndNotCompare(X);
808 }
809
810 /// Return true if the target has a bit-test instruction:
811 /// (X & (1 << Y)) ==/!= 0
812 /// This knowledge can be used to prevent breaking the pattern,
813 /// or creating it if it could be recognized.
814 virtual bool hasBitTest(SDValue X, SDValue Y) const { return false; }
815
816 /// There are two ways to clear extreme bits (either low or high):
817 /// Mask: x & (-1 << y) (the instcombine canonical form)
818 /// Shifts: x >> y << y
819 /// Return true if the variant with 2 variable shifts is preferred.
820 /// Return false if there is no preference.
822 // By default, let's assume that no one prefers shifts.
823 return false;
824 }
825
826 /// Return true if it is profitable to fold a pair of shifts into a mask.
827 /// This is usually true on most targets. But some targets, like Thumb1,
828 /// have immediate shift instructions, but no immediate "and" instruction;
829 /// this makes the fold unprofitable.
831 CombineLevel Level) const {
832 return true;
833 }
834
835 /// Should we tranform the IR-optimal check for whether given truncation
836 /// down into KeptBits would be truncating or not:
837 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
838 /// Into it's more traditional form:
839 /// ((%x << C) a>> C) dstcond %x
840 /// Return true if we should transform.
841 /// Return false if there is no preference.
843 unsigned KeptBits) const {
844 // By default, let's assume that no one prefers shifts.
845 return false;
846 }
847
848 /// Given the pattern
849 /// (X & (C l>>/<< Y)) ==/!= 0
850 /// return true if it should be transformed into:
851 /// ((X <</l>> Y) & C) ==/!= 0
852 /// WARNING: if 'X' is a constant, the fold may deadlock!
853 /// FIXME: we could avoid passing XC, but we can't use isConstOrConstSplat()
854 /// here because it can end up being not linked in.
857 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
858 SelectionDAG &DAG) const {
859 if (hasBitTest(X, Y)) {
860 // One interesting pattern that we'd want to form is 'bit test':
861 // ((1 << Y) & C) ==/!= 0
862 // But we also need to be careful not to try to reverse that fold.
863
864 // Is this '1 << Y' ?
865 if (OldShiftOpcode == ISD::SHL && CC->isOne())
866 return false; // Keep the 'bit test' pattern.
867
868 // Will it be '1 << Y' after the transform ?
869 if (XC && NewShiftOpcode == ISD::SHL && XC->isOne())
870 return true; // Do form the 'bit test' pattern.
871 }
872
873 // If 'X' is a constant, and we transform, then we will immediately
874 // try to undo the fold, thus causing endless combine loop.
875 // So by default, let's assume everyone prefers the fold
876 // iff 'X' is not a constant.
877 return !XC;
878 }
879
880 // Return true if its desirable to perform the following transform:
881 // (fmul C, (uitofp Pow2))
882 // -> (bitcast_to_FP (add (bitcast_to_INT C), Log2(Pow2) << mantissa))
883 // (fdiv C, (uitofp Pow2))
884 // -> (bitcast_to_FP (sub (bitcast_to_INT C), Log2(Pow2) << mantissa))
885 //
886 // This is only queried after we have verified the transform will be bitwise
887 // equals.
888 //
889 // SDNode *N : The FDiv/FMul node we want to transform.
890 // SDValue FPConst: The Float constant operand in `N`.
891 // SDValue IntPow2: The Integer power of 2 operand in `N`.
893 SDValue IntPow2) const {
894 // Default to avoiding fdiv which is often very expensive.
895 return N->getOpcode() == ISD::FDIV;
896 }
897
898 // Given:
899 // (icmp eq/ne (and X, C0), (shift X, C1))
900 // or
901 // (icmp eq/ne X, (rotate X, CPow2))
902
903 // If C0 is a mask or shifted mask and the shift amt (C1) isolates the
904 // remaining bits (i.e something like `(x64 & UINT32_MAX) == (x64 >> 32)`)
905 // Do we prefer the shift to be shift-right, shift-left, or rotate.
906 // Note: Its only valid to convert the rotate version to the shift version iff
907 // the shift-amt (`C1`) is a power of 2 (including 0).
908 // If ShiftOpc (current Opcode) is returned, do nothing.
910 EVT VT, unsigned ShiftOpc, bool MayTransformRotate,
911 const APInt &ShiftOrRotateAmt,
912 const std::optional<APInt> &AndMask) const {
913 return ShiftOpc;
914 }
915
916 /// These two forms are equivalent:
917 /// sub %y, (xor %x, -1)
918 /// add (add %x, 1), %y
919 /// The variant with two add's is IR-canonical.
920 /// Some targets may prefer one to the other.
921 virtual bool preferIncOfAddToSubOfNot(EVT VT) const {
922 // By default, let's assume that everyone prefers the form with two add's.
923 return true;
924 }
925
926 // By default prefer folding (abs (sub nsw x, y)) -> abds(x, y). Some targets
927 // may want to avoid this to prevent loss of sub_nsw pattern.
928 virtual bool preferABDSToABSWithNSW(EVT VT) const {
929 return true;
930 }
931
932 // Return true if the target wants to transform Op(Splat(X)) -> Splat(Op(X))
933 virtual bool preferScalarizeSplat(SDNode *N) const { return true; }
934
935 // Return true if the target wants to transform:
936 // (TruncVT truncate(sext_in_reg(VT X, ExtVT))
937 // -> (TruncVT sext_in_reg(truncate(VT X), ExtVT))
938 // Some targets might prefer pre-sextinreg to improve truncation/saturation.
939 virtual bool preferSextInRegOfTruncate(EVT TruncVT, EVT VT, EVT ExtVT) const {
940 return true;
941 }
942
943 /// Return true if the target wants to use the optimization that
944 /// turns ext(promotableInst1(...(promotableInstN(load)))) into
945 /// promotedInst1(...(promotedInstN(ext(load)))).
947
948 /// Return true if the target can combine store(extractelement VectorTy,
949 /// Idx).
950 /// \p Cost[out] gives the cost of that transformation when this is true.
951 virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
952 unsigned &Cost) const {
953 return false;
954 }
955
956 /// Return true if the target shall perform extract vector element and store
957 /// given that the vector is known to be splat of constant.
958 /// \p Index[out] gives the index of the vector element to be extracted when
959 /// this is true.
961 Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const {
962 return false;
963 }
964
965 /// Return true if inserting a scalar into a variable element of an undef
966 /// vector is more efficiently handled by splatting the scalar instead.
967 virtual bool shouldSplatInsEltVarIndex(EVT) const {
968 return false;
969 }
970
971 /// Return true if target always benefits from combining into FMA for a
972 /// given value type. This must typically return false on targets where FMA
973 /// takes more cycles to execute than FADD.
974 virtual bool enableAggressiveFMAFusion(EVT VT) const { return false; }
975
976 /// Return true if target always benefits from combining into FMA for a
977 /// given value type. This must typically return false on targets where FMA
978 /// takes more cycles to execute than FADD.
979 virtual bool enableAggressiveFMAFusion(LLT Ty) const { return false; }
980
981 /// Return the ValueType of the result of SETCC operations.
982 virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
983 EVT VT) const;
984
985 /// Return the ValueType for comparison libcalls. Comparison libcalls include
986 /// floating point comparison calls, and Ordered/Unordered check calls on
987 /// floating point numbers.
988 virtual
990
991 /// For targets without i1 registers, this gives the nature of the high-bits
992 /// of boolean values held in types wider than i1.
993 ///
994 /// "Boolean values" are special true/false values produced by nodes like
995 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
996 /// Not to be confused with general values promoted from i1. Some cpus
997 /// distinguish between vectors of boolean and scalars; the isVec parameter
998 /// selects between the two kinds. For example on X86 a scalar boolean should
999 /// be zero extended from i1, while the elements of a vector of booleans
1000 /// should be sign extended from i1.
1001 ///
1002 /// Some cpus also treat floating point types the same way as they treat
1003 /// vectors instead of the way they treat scalars.
1004 BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
1005 if (isVec)
1006 return BooleanVectorContents;
1007 return isFloat ? BooleanFloatContents : BooleanContents;
1008 }
1009
1011 return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
1012 }
1013
1014 /// Promote the given target boolean to a target boolean of the given type.
1015 /// A target boolean is an integer value, not necessarily of type i1, the bits
1016 /// of which conform to getBooleanContents.
1017 ///
1018 /// ValVT is the type of values that produced the boolean.
1020 EVT ValVT) const {
1021 SDLoc dl(Bool);
1022 EVT BoolVT =
1023 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ValVT);
1025 return DAG.getNode(ExtendCode, dl, BoolVT, Bool);
1026 }
1027
1028 /// Return target scheduling preference.
1030 return SchedPreferenceInfo;
1031 }
1032
1033 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
1034 /// for different nodes. This function returns the preference (or none) for
1035 /// the given node.
1037 return Sched::None;
1038 }
1039
1040 /// Return the register class that should be used for the specified value
1041 /// type.
1042 virtual const TargetRegisterClass *getRegClassFor(MVT VT, bool isDivergent = false) const {
1043 (void)isDivergent;
1044 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1045 assert(RC && "This value type is not natively supported!");
1046 return RC;
1047 }
1048
1049 /// Allows target to decide about the register class of the
1050 /// specific value that is live outside the defining block.
1051 /// Returns true if the value needs uniform register class.
1053 const Value *) const {
1054 return false;
1055 }
1056
1057 /// Return the 'representative' register class for the specified value
1058 /// type.
1059 ///
1060 /// The 'representative' register class is the largest legal super-reg
1061 /// register class for the register class of the value type. For example, on
1062 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
1063 /// register class is GR64 on x86_64.
1064 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
1065 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
1066 return RC;
1067 }
1068
1069 /// Return the cost of the 'representative' register class for the specified
1070 /// value type.
1072 return RepRegClassCostForVT[VT.SimpleTy];
1073 }
1074
1075 /// Return the preferred strategy to legalize tihs SHIFT instruction, with
1076 /// \p ExpansionFactor being the recursion depth - how many expansion needed.
1081 };
1084 unsigned ExpansionFactor) const {
1085 if (ExpansionFactor == 1)
1088 }
1089
1090 /// Return true if the target has native support for the specified value type.
1091 /// This means that it has a register that directly holds it without
1092 /// promotions or expansions.
1093 bool isTypeLegal(EVT VT) const {
1094 assert(!VT.isSimple() ||
1095 (unsigned)VT.getSimpleVT().SimpleTy < std::size(RegClassForVT));
1096 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
1097 }
1098
1100 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
1101 /// that indicates how instruction selection should deal with the type.
1102 LegalizeTypeAction ValueTypeActions[MVT::VALUETYPE_SIZE];
1103
1104 public:
1106 std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
1107 TypeLegal);
1108 }
1109
1111 return ValueTypeActions[VT.SimpleTy];
1112 }
1113
1115 ValueTypeActions[VT.SimpleTy] = Action;
1116 }
1117 };
1118
1120 return ValueTypeActions;
1121 }
1122
1123 /// Return pair that represents the legalization kind (first) that needs to
1124 /// happen to EVT (second) in order to type-legalize it.
1125 ///
1126 /// First: how we should legalize values of this type, either it is already
1127 /// legal (return 'Legal') or we need to promote it to a larger type (return
1128 /// 'Promote'), or we need to expand it into multiple registers of smaller
1129 /// integer type (return 'Expand'). 'Custom' is not an option.
1130 ///
1131 /// Second: for types supported by the target, this is an identity function.
1132 /// For types that must be promoted to larger types, this returns the larger
1133 /// type to promote to. For integer types that are larger than the largest
1134 /// integer register, this contains one step in the expansion to get to the
1135 /// smaller register. For illegal floating point types, this returns the
1136 /// integer type to transform to.
1137 LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
1138
1139 /// Return how we should legalize values of this type, either it is already
1140 /// legal (return 'Legal') or we need to promote it to a larger type (return
1141 /// 'Promote'), or we need to expand it into multiple registers of smaller
1142 /// integer type (return 'Expand'). 'Custom' is not an option.
1144 return getTypeConversion(Context, VT).first;
1145 }
1147 return ValueTypeActions.getTypeAction(VT);
1148 }
1149
1150 /// For types supported by the target, this is an identity function. For
1151 /// types that must be promoted to larger types, this returns the larger type
1152 /// to promote to. For integer types that are larger than the largest integer
1153 /// register, this contains one step in the expansion to get to the smaller
1154 /// register. For illegal floating point types, this returns the integer type
1155 /// to transform to.
1156 virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
1157 return getTypeConversion(Context, VT).second;
1158 }
1159
1160 /// For types supported by the target, this is an identity function. For
1161 /// types that must be expanded (i.e. integer types that are larger than the
1162 /// largest integer register or illegal floating point types), this returns
1163 /// the largest legal type it will be expanded to.
1164 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
1165 assert(!VT.isVector());
1166 while (true) {
1167 switch (getTypeAction(Context, VT)) {
1168 case TypeLegal:
1169 return VT;
1170 case TypeExpandInteger:
1171 VT = getTypeToTransformTo(Context, VT);
1172 break;
1173 default:
1174 llvm_unreachable("Type is not legal nor is it to be expanded!");
1175 }
1176 }
1177 }
1178
1179 /// Vector types are broken down into some number of legal first class types.
1180 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
1181 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
1182 /// turns into 4 EVT::i32 values with both PPC and X86.
1183 ///
1184 /// This method returns the number of registers needed, and the VT for each
1185 /// register. It also returns the VT and quantity of the intermediate values
1186 /// before they are promoted/expanded.
1187 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
1188 EVT &IntermediateVT,
1189 unsigned &NumIntermediates,
1190 MVT &RegisterVT) const;
1191
1192 /// Certain targets such as MIPS require that some types such as vectors are
1193 /// always broken down into scalars in some contexts. This occurs even if the
1194 /// vector type is legal.
1196 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
1197 unsigned &NumIntermediates, MVT &RegisterVT) const {
1198 return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
1199 RegisterVT);
1200 }
1201
1203 unsigned opc = 0; // target opcode
1204 EVT memVT; // memory VT
1205
1206 // value representing memory location
1208
1209 // Fallback address space for use if ptrVal is nullptr. std::nullopt means
1210 // unknown address space.
1211 std::optional<unsigned> fallbackAddressSpace;
1212
1213 int offset = 0; // offset off of ptrVal
1214 uint64_t size = 0; // the size of the memory location
1215 // (taken from memVT if zero)
1216 MaybeAlign align = Align(1); // alignment
1217
1219 IntrinsicInfo() = default;
1220 };
1221
1222 /// Given an intrinsic, checks if on the target the intrinsic will need to map
1223 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
1224 /// true and store the intrinsic information into the IntrinsicInfo that was
1225 /// passed to the function.
1228 unsigned /*Intrinsic*/) const {
1229 return false;
1230 }
1231
1232 /// Returns true if the target can instruction select the specified FP
1233 /// immediate natively. If false, the legalizer will materialize the FP
1234 /// immediate as a load from a constant pool.
1235 virtual bool isFPImmLegal(const APFloat & /*Imm*/, EVT /*VT*/,
1236 bool ForCodeSize = false) const {
1237 return false;
1238 }
1239
1240 /// Targets can use this to indicate that they only support *some*
1241 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
1242 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
1243 /// legal.
1244 virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const {
1245 return true;
1246 }
1247
1248 /// Returns true if the operation can trap for the value type.
1249 ///
1250 /// VT must be a legal type. By default, we optimistically assume most
1251 /// operations don't trap except for integer divide and remainder.
1252 virtual bool canOpTrap(unsigned Op, EVT VT) const;
1253
1254 /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
1255 /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
1256 /// constant pool entry.
1258 EVT /*VT*/) const {
1259 return false;
1260 }
1261
1262 /// How to legalize this custom operation?
1264 return Legal;
1265 }
1266
1267 /// Return how this operation should be treated: either it is legal, needs to
1268 /// be promoted to a larger size, needs to be expanded to some other code
1269 /// sequence, or the target has a custom expander for it.
1271 // If a target-specific SDNode requires legalization, require the target
1272 // to provide custom legalization for it.
1273 if (Op >= std::size(OpActions[0]))
1274 return Custom;
1275 if (VT.isExtended())
1276 return Expand;
1277 return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
1278 }
1279
1280 /// Custom method defined by each target to indicate if an operation which
1281 /// may require a scale is supported natively by the target.
1282 /// If not, the operation is illegal.
1283 virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT,
1284 unsigned Scale) const {
1285 return false;
1286 }
1287
1288 /// Some fixed point operations may be natively supported by the target but
1289 /// only for specific scales. This method allows for checking
1290 /// if the width is supported by the target for a given operation that may
1291 /// depend on scale.
1293 unsigned Scale) const {
1294 auto Action = getOperationAction(Op, VT);
1295 if (Action != Legal)
1296 return Action;
1297
1298 // This operation is supported in this type but may only work on specific
1299 // scales.
1300 bool Supported;
1301 switch (Op) {
1302 default:
1303 llvm_unreachable("Unexpected fixed point operation.");
1304 case ISD::SMULFIX:
1305 case ISD::SMULFIXSAT:
1306 case ISD::UMULFIX:
1307 case ISD::UMULFIXSAT:
1308 case ISD::SDIVFIX:
1309 case ISD::SDIVFIXSAT:
1310 case ISD::UDIVFIX:
1311 case ISD::UDIVFIXSAT:
1312 Supported = isSupportedFixedPointOperation(Op, VT, Scale);
1313 break;
1314 }
1315
1316 return Supported ? Action : Expand;
1317 }
1318
1319 // If Op is a strict floating-point operation, return the result
1320 // of getOperationAction for the equivalent non-strict operation.
1322 unsigned EqOpc;
1323 switch (Op) {
1324 default: llvm_unreachable("Unexpected FP pseudo-opcode");
1325#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1326 case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break;
1327#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1328 case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break;
1329#include "llvm/IR/ConstrainedOps.def"
1330 }
1331
1332 return getOperationAction(EqOpc, VT);
1333 }
1334
1335 /// Return true if the specified operation is legal on this target or can be
1336 /// made legal with custom lowering. This is used to help guide high-level
1337 /// lowering decisions. LegalOnly is an optional convenience for code paths
1338 /// traversed pre and post legalisation.
1340 bool LegalOnly = false) const {
1341 if (LegalOnly)
1342 return isOperationLegal(Op, VT);
1343
1344 return (VT == MVT::Other || isTypeLegal(VT)) &&
1345 (getOperationAction(Op, VT) == Legal ||
1346 getOperationAction(Op, VT) == Custom);
1347 }
1348
1349 /// Return true if the specified operation is legal on this target or can be
1350 /// made legal using promotion. This is used to help guide high-level lowering
1351 /// decisions. LegalOnly is an optional convenience for code paths traversed
1352 /// pre and post legalisation.
1354 bool LegalOnly = false) const {
1355 if (LegalOnly)
1356 return isOperationLegal(Op, VT);
1357
1358 return (VT == MVT::Other || isTypeLegal(VT)) &&
1359 (getOperationAction(Op, VT) == Legal ||
1360 getOperationAction(Op, VT) == Promote);
1361 }
1362
1363 /// Return true if the specified operation is legal on this target or can be
1364 /// made legal with custom lowering or using promotion. This is used to help
1365 /// guide high-level lowering decisions. LegalOnly is an optional convenience
1366 /// for code paths traversed pre and post legalisation.
1368 bool LegalOnly = false) const {
1369 if (LegalOnly)
1370 return isOperationLegal(Op, VT);
1371
1372 return (VT == MVT::Other || isTypeLegal(VT)) &&
1373 (getOperationAction(Op, VT) == Legal ||
1374 getOperationAction(Op, VT) == Custom ||
1375 getOperationAction(Op, VT) == Promote);
1376 }
1377
1378 /// Return true if the operation uses custom lowering, regardless of whether
1379 /// the type is legal or not.
1380 bool isOperationCustom(unsigned Op, EVT VT) const {
1381 return getOperationAction(Op, VT) == Custom;
1382 }
1383
1384 /// Return true if lowering to a jump table is allowed.
1385 virtual bool areJTsAllowed(const Function *Fn) const {
1386 if (Fn->getFnAttribute("no-jump-tables").getValueAsBool())
1387 return false;
1388
1389 return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1391 }
1392
1393 /// Check whether the range [Low,High] fits in a machine word.
1394 bool rangeFitsInWord(const APInt &Low, const APInt &High,
1395 const DataLayout &DL) const {
1396 // FIXME: Using the pointer type doesn't seem ideal.
1397 uint64_t BW = DL.getIndexSizeInBits(0u);
1398 uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
1399 return Range <= BW;
1400 }
1401
1402 /// Return true if lowering to a jump table is suitable for a set of case
1403 /// clusters which may contain \p NumCases cases, \p Range range of values.
1404 virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
1406 BlockFrequencyInfo *BFI) const;
1407
1408 /// Returns preferred type for switch condition.
1410 EVT ConditionVT) const;
1411
1412 /// Return true if lowering to a bit test is suitable for a set of case
1413 /// clusters which contains \p NumDests unique destinations, \p Low and
1414 /// \p High as its lowest and highest case values, and expects \p NumCmps
1415 /// case value comparisons. Check if the number of destinations, comparison
1416 /// metric, and range are all suitable.
1417 bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps,
1418 const APInt &Low, const APInt &High,
1419 const DataLayout &DL) const {
1420 // FIXME: I don't think NumCmps is the correct metric: a single case and a
1421 // range of cases both require only one branch to lower. Just looking at the
1422 // number of clusters and destinations should be enough to decide whether to
1423 // build bit tests.
1424
1425 // To lower a range with bit tests, the range must fit the bitwidth of a
1426 // machine word.
1427 if (!rangeFitsInWord(Low, High, DL))
1428 return false;
1429
1430 // Decide whether it's profitable to lower this range with bit tests. Each
1431 // destination requires a bit test and branch, and there is an overall range
1432 // check branch. For a small number of clusters, separate comparisons might
1433 // be cheaper, and for many destinations, splitting the range might be
1434 // better.
1435 return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
1436 (NumDests == 3 && NumCmps >= 6);
1437 }
1438
1439 /// Return true if the specified operation is illegal on this target or
1440 /// unlikely to be made legal with custom lowering. This is used to help guide
1441 /// high-level lowering decisions.
1442 bool isOperationExpand(unsigned Op, EVT VT) const {
1443 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
1444 }
1445
1446 /// Return true if the specified operation is legal on this target.
1447 bool isOperationLegal(unsigned Op, EVT VT) const {
1448 return (VT == MVT::Other || isTypeLegal(VT)) &&
1449 getOperationAction(Op, VT) == Legal;
1450 }
1451
1452 /// Return how this load with extension should be treated: either it is legal,
1453 /// needs to be promoted to a larger size, needs to be expanded to some other
1454 /// code sequence, or the target has a custom expander for it.
1455 LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
1456 EVT MemVT) const {
1457 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1458 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1459 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1461 MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!");
1462 unsigned Shift = 4 * ExtType;
1463 return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
1464 }
1465
1466 /// Return true if the specified load with extension is legal on this target.
1467 bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1468 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
1469 }
1470
1471 /// Return true if the specified load with extension is legal or custom
1472 /// on this target.
1473 bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1474 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
1475 getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
1476 }
1477
1478 /// Same as getLoadExtAction, but for atomic loads.
1480 EVT MemVT) const {
1481 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1482 unsigned ValI = (unsigned)ValVT.getSimpleVT().SimpleTy;
1483 unsigned MemI = (unsigned)MemVT.getSimpleVT().SimpleTy;
1485 MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!");
1486 unsigned Shift = 4 * ExtType;
1487 LegalizeAction Action =
1488 (LegalizeAction)((AtomicLoadExtActions[ValI][MemI] >> Shift) & 0xf);
1489 assert((Action == Legal || Action == Expand) &&
1490 "Unsupported atomic load extension action.");
1491 return Action;
1492 }
1493
1494 /// Return true if the specified atomic load with extension is legal on
1495 /// this target.
1496 bool isAtomicLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1497 return getAtomicLoadExtAction(ExtType, ValVT, MemVT) == Legal;
1498 }
1499
1500 /// Return how this store with truncation should be treated: either it is
1501 /// legal, needs to be promoted to a larger size, needs to be expanded to some
1502 /// other code sequence, or the target has a custom expander for it.
1504 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1505 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1506 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1508 "Table isn't big enough!");
1509 return TruncStoreActions[ValI][MemI];
1510 }
1511
1512 /// Return true if the specified store with truncation is legal on this
1513 /// target.
1514 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
1515 return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal;
1516 }
1517
1518 /// Return true if the specified store with truncation has solution on this
1519 /// target.
1520 bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const {
1521 return isTypeLegal(ValVT) &&
1522 (getTruncStoreAction(ValVT, MemVT) == Legal ||
1523 getTruncStoreAction(ValVT, MemVT) == Custom);
1524 }
1525
1526 virtual bool canCombineTruncStore(EVT ValVT, EVT MemVT,
1527 bool LegalOnly) const {
1528 if (LegalOnly)
1529 return isTruncStoreLegal(ValVT, MemVT);
1530
1531 return isTruncStoreLegalOrCustom(ValVT, MemVT);
1532 }
1533
1534 /// Return how the indexed load should be treated: either it is legal, needs
1535 /// to be promoted to a larger size, needs to be expanded to some other code
1536 /// sequence, or the target has a custom expander for it.
1537 LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
1538 return getIndexedModeAction(IdxMode, VT, IMAB_Load);
1539 }
1540
1541 /// Return true if the specified indexed load is legal on this target.
1542 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
1543 return VT.isSimple() &&
1544 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1545 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1546 }
1547
1548 /// Return how the indexed store should be treated: either it is legal, needs
1549 /// to be promoted to a larger size, needs to be expanded to some other code
1550 /// sequence, or the target has a custom expander for it.
1551 LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
1552 return getIndexedModeAction(IdxMode, VT, IMAB_Store);
1553 }
1554
1555 /// Return true if the specified indexed load is legal on this target.
1556 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
1557 return VT.isSimple() &&
1558 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1559 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1560 }
1561
1562 /// Return how the indexed load should be treated: either it is legal, needs
1563 /// to be promoted to a larger size, needs to be expanded to some other code
1564 /// sequence, or the target has a custom expander for it.
1565 LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const {
1566 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad);
1567 }
1568
1569 /// Return true if the specified indexed load is legal on this target.
1570 bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const {
1571 return VT.isSimple() &&
1572 (getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1574 }
1575
1576 /// Return how the indexed store should be treated: either it is legal, needs
1577 /// to be promoted to a larger size, needs to be expanded to some other code
1578 /// sequence, or the target has a custom expander for it.
1579 LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const {
1580 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore);
1581 }
1582
1583 /// Return true if the specified indexed load is legal on this target.
1584 bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const {
1585 return VT.isSimple() &&
1586 (getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1588 }
1589
1590 /// Returns true if the index type for a masked gather/scatter requires
1591 /// extending
1592 virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const { return false; }
1593
1594 // Returns true if Extend can be folded into the index of a masked gathers/scatters
1595 // on this target.
1596 virtual bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const {
1597 return false;
1598 }
1599
1600 // Return true if the target supports a scatter/gather instruction with
1601 // indices which are scaled by the particular value. Note that all targets
1602 // must by definition support scale of 1.
1604 uint64_t ElemSize) const {
1605 // MGATHER/MSCATTER are only required to support scaling by one or by the
1606 // element size.
1607 if (Scale != ElemSize && Scale != 1)
1608 return false;
1609 return true;
1610 }
1611
1612 /// Return how the condition code should be treated: either it is legal, needs
1613 /// to be expanded to some other code sequence, or the target has a custom
1614 /// expander for it.
1617 assert((unsigned)CC < std::size(CondCodeActions) &&
1618 ((unsigned)VT.SimpleTy >> 3) < std::size(CondCodeActions[0]) &&
1619 "Table isn't big enough!");
1620 // See setCondCodeAction for how this is encoded.
1621 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
1622 uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
1623 LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
1624 assert(Action != Promote && "Can't promote condition code!");
1625 return Action;
1626 }
1627
1628 /// Return true if the specified condition code is legal for a comparison of
1629 /// the specified types on this target.
1631 return getCondCodeAction(CC, VT) == Legal;
1632 }
1633
1634 /// Return true if the specified condition code is legal or custom for a
1635 /// comparison of the specified types on this target.
1637 return getCondCodeAction(CC, VT) == Legal ||
1638 getCondCodeAction(CC, VT) == Custom;
1639 }
1640
1641 /// If the action for this operation is to promote, this method returns the
1642 /// ValueType to promote to.
1643 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
1645 "This operation isn't promoted!");
1646
1647 // See if this has an explicit type specified.
1648 std::map<std::pair<unsigned, MVT::SimpleValueType>,
1650 PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
1651 if (PTTI != PromoteToType.end()) return PTTI->second;
1652
1653 assert((VT.isInteger() || VT.isFloatingPoint()) &&
1654 "Cannot autopromote this type, add it with AddPromotedToType.");
1655
1656 uint64_t VTBits = VT.getScalarSizeInBits();
1657 MVT NVT = VT;
1658 do {
1659 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
1660 assert(NVT.isInteger() == VT.isInteger() &&
1661 NVT.isFloatingPoint() == VT.isFloatingPoint() &&
1662 "Didn't find type to promote to!");
1663 } while (VTBits >= NVT.getScalarSizeInBits() || !isTypeLegal(NVT) ||
1664 getOperationAction(Op, NVT) == Promote);
1665 return NVT;
1666 }
1667
1669 bool AllowUnknown = false) const {
1670 return getValueType(DL, Ty, AllowUnknown);
1671 }
1672
1673 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
1674 /// operations except for the pointer size. If AllowUnknown is true, this
1675 /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
1676 /// otherwise it will assert.
1678 bool AllowUnknown = false) const {
1679 // Lower scalar pointers to native pointer types.
1680 if (auto *PTy = dyn_cast<PointerType>(Ty))
1681 return getPointerTy(DL, PTy->getAddressSpace());
1682
1683 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1684 Type *EltTy = VTy->getElementType();
1685 // Lower vectors of pointers to native pointer types.
1686 if (auto *PTy = dyn_cast<PointerType>(EltTy)) {
1687 EVT PointerTy(getPointerTy(DL, PTy->getAddressSpace()));
1688 EltTy = PointerTy.getTypeForEVT(Ty->getContext());
1689 }
1690 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false),
1691 VTy->getElementCount());
1692 }
1693
1694 return EVT::getEVT(Ty, AllowUnknown);
1695 }
1696
1698 bool AllowUnknown = false) const {
1699 // Lower scalar pointers to native pointer types.
1700 if (auto *PTy = dyn_cast<PointerType>(Ty))
1701 return getPointerMemTy(DL, PTy->getAddressSpace());
1702
1703 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1704 Type *EltTy = VTy->getElementType();
1705 if (auto *PTy = dyn_cast<PointerType>(EltTy)) {
1706 EVT PointerTy(getPointerMemTy(DL, PTy->getAddressSpace()));
1707 EltTy = PointerTy.getTypeForEVT(Ty->getContext());
1708 }
1709 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false),
1710 VTy->getElementCount());
1711 }
1712
1713 return getValueType(DL, Ty, AllowUnknown);
1714 }
1715
1716
1717 /// Return the MVT corresponding to this LLVM type. See getValueType.
1719 bool AllowUnknown = false) const {
1720 return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
1721 }
1722
1723 /// Returns the desired alignment for ByVal or InAlloca aggregate function
1724 /// arguments in the caller parameter area.
1725 virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
1726
1727 /// Return the type of registers that this ValueType will eventually require.
1729 assert((unsigned)VT.SimpleTy < std::size(RegisterTypeForVT));
1730 return RegisterTypeForVT[VT.SimpleTy];
1731 }
1732
1733 /// Return the type of registers that this ValueType will eventually require.
1734 MVT getRegisterType(LLVMContext &Context, EVT VT) const {
1735 if (VT.isSimple())
1736 return getRegisterType(VT.getSimpleVT());
1737 if (VT.isVector()) {
1738 EVT VT1;
1739 MVT RegisterVT;
1740 unsigned NumIntermediates;
1741 (void)getVectorTypeBreakdown(Context, VT, VT1,
1742 NumIntermediates, RegisterVT);
1743 return RegisterVT;
1744 }
1745 if (VT.isInteger()) {
1746 return getRegisterType(Context, getTypeToTransformTo(Context, VT));
1747 }
1748 llvm_unreachable("Unsupported extended type!");
1749 }
1750
1751 /// Return the number of registers that this ValueType will eventually
1752 /// require.
1753 ///
1754 /// This is one for any types promoted to live in larger registers, but may be
1755 /// more than one for types (like i64) that are split into pieces. For types
1756 /// like i140, which are first promoted then expanded, it is the number of
1757 /// registers needed to hold all the bits of the original type. For an i140
1758 /// on a 32 bit machine this means 5 registers.
1759 ///
1760 /// RegisterVT may be passed as a way to override the default settings, for
1761 /// instance with i128 inline assembly operands on SystemZ.
1762 virtual unsigned
1764 std::optional<MVT> RegisterVT = std::nullopt) const {
1765 if (VT.isSimple()) {
1766 assert((unsigned)VT.getSimpleVT().SimpleTy <
1767 std::size(NumRegistersForVT));
1768 return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
1769 }
1770 if (VT.isVector()) {
1771 EVT VT1;
1772 MVT VT2;
1773 unsigned NumIntermediates;
1774 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
1775 }
1776 if (VT.isInteger()) {
1777 unsigned BitWidth = VT.getSizeInBits();
1778 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
1779 return (BitWidth + RegWidth - 1) / RegWidth;
1780 }
1781 llvm_unreachable("Unsupported extended type!");
1782 }
1783
1784 /// Certain combinations of ABIs, Targets and features require that types
1785 /// are legal for some operations and not for other operations.
1786 /// For MIPS all vector types must be passed through the integer register set.
1788 CallingConv::ID CC, EVT VT) const {
1789 return getRegisterType(Context, VT);
1790 }
1791
1792 /// Certain targets require unusual breakdowns of certain types. For MIPS,
1793 /// this occurs when a vector type is used, as vector are passed through the
1794 /// integer register set.
1797 EVT VT) const {
1798 return getNumRegisters(Context, VT);
1799 }
1800
1801 /// Certain targets have context sensitive alignment requirements, where one
1802 /// type has the alignment requirement of another type.
1804 const DataLayout &DL) const {
1805 return DL.getABITypeAlign(ArgTy);
1806 }
1807
1808 /// If true, then instruction selection should seek to shrink the FP constant
1809 /// of the specified type to a smaller type in order to save space and / or
1810 /// reduce runtime.
1811 virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
1812
1813 /// Return true if it is profitable to reduce a load to a smaller type.
1814 /// Example: (i16 (trunc (i32 (load x))) -> i16 load x
1816 EVT NewVT) const {
1817 // By default, assume that it is cheaper to extract a subvector from a wide
1818 // vector load rather than creating multiple narrow vector loads.
1819 if (NewVT.isVector() && !Load->hasOneUse())
1820 return false;
1821
1822 return true;
1823 }
1824
1825 /// Return true (the default) if it is profitable to remove a sext_inreg(x)
1826 /// where the sext is redundant, and use x directly.
1827 virtual bool shouldRemoveRedundantExtend(SDValue Op) const { return true; }
1828
1829 /// Indicates if any padding is guaranteed to go at the most significant bits
1830 /// when storing the type to memory and the type size isn't equal to the store
1831 /// size.
1833 return VT.isScalarInteger() && !VT.isByteSized();
1834 }
1835
1836 /// When splitting a value of the specified type into parts, does the Lo
1837 /// or Hi part come first? This usually follows the endianness, except
1838 /// for ppcf128, where the Hi part always comes first.
1840 return DL.isBigEndian() || VT == MVT::ppcf128;
1841 }
1842
1843 /// If true, the target has custom DAG combine transformations that it can
1844 /// perform for the specified node.
1846 assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray));
1847 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
1848 }
1849
1852 }
1853
1854 /// Returns the size of the platform's va_list object.
1855 virtual unsigned getVaListSizeInBits(const DataLayout &DL) const {
1856 return getPointerTy(DL).getSizeInBits();
1857 }
1858
1859 /// Get maximum # of store operations permitted for llvm.memset
1860 ///
1861 /// This function returns the maximum number of store operations permitted
1862 /// to replace a call to llvm.memset. The value is set by the target at the
1863 /// performance threshold for such a replacement. If OptSize is true,
1864 /// return the limit for functions that have OptSize attribute.
1865 unsigned getMaxStoresPerMemset(bool OptSize) const {
1867 }
1868
1869 /// Get maximum # of store operations permitted for llvm.memcpy
1870 ///
1871 /// This function returns the maximum number of store operations permitted
1872 /// to replace a call to llvm.memcpy. The value is set by the target at the
1873 /// performance threshold for such a replacement. If OptSize is true,
1874 /// return the limit for functions that have OptSize attribute.
1875 unsigned getMaxStoresPerMemcpy(bool OptSize) const {
1877 }
1878
1879 /// \brief Get maximum # of store operations to be glued together
1880 ///
1881 /// This function returns the maximum number of store operations permitted
1882 /// to glue together during lowering of llvm.memcpy. The value is set by
1883 // the target at the performance threshold for such a replacement.
1884 virtual unsigned getMaxGluedStoresPerMemcpy() const {
1886 }
1887
1888 /// Get maximum # of load operations permitted for memcmp
1889 ///
1890 /// This function returns the maximum number of load operations permitted
1891 /// to replace a call to memcmp. The value is set by the target at the
1892 /// performance threshold for such a replacement. If OptSize is true,
1893 /// return the limit for functions that have OptSize attribute.
1894 unsigned getMaxExpandSizeMemcmp(bool OptSize) const {
1896 }
1897
1898 /// Get maximum # of store operations permitted for llvm.memmove
1899 ///
1900 /// This function returns the maximum number of store operations permitted
1901 /// to replace a call to llvm.memmove. The value is set by the target at the
1902 /// performance threshold for such a replacement. If OptSize is true,
1903 /// return the limit for functions that have OptSize attribute.
1904 unsigned getMaxStoresPerMemmove(bool OptSize) const {
1906 }
1907
1908 /// Determine if the target supports unaligned memory accesses.
1909 ///
1910 /// This function returns true if the target allows unaligned memory accesses
1911 /// of the specified type in the given address space. If true, it also returns
1912 /// a relative speed of the unaligned memory access in the last argument by
1913 /// reference. The higher the speed number the faster the operation comparing
1914 /// to a number returned by another such call. This is used, for example, in
1915 /// situations where an array copy/move/set is converted to a sequence of
1916 /// store operations. Its use helps to ensure that such replacements don't
1917 /// generate code that causes an alignment error (trap) on the target machine.
1919 EVT, unsigned AddrSpace = 0, Align Alignment = Align(1),
1921 unsigned * /*Fast*/ = nullptr) const {
1922 return false;
1923 }
1924
1925 /// LLT handling variant.
1927 LLT, unsigned AddrSpace = 0, Align Alignment = Align(1),
1929 unsigned * /*Fast*/ = nullptr) const {
1930 return false;
1931 }
1932
1933 /// This function returns true if the memory access is aligned or if the
1934 /// target allows this specific unaligned memory access. If the access is
1935 /// allowed, the optional final parameter returns a relative speed of the
1936 /// access (as defined by the target).
1938 LLVMContext &Context, const DataLayout &DL, EVT VT,
1939 unsigned AddrSpace = 0, Align Alignment = Align(1),
1941 unsigned *Fast = nullptr) const;
1942
1943 /// Return true if the memory access of this type is aligned or if the target
1944 /// allows this specific unaligned access for the given MachineMemOperand.
1945 /// If the access is allowed, the optional final parameter returns a relative
1946 /// speed of the access (as defined by the target).
1948 const DataLayout &DL, EVT VT,
1949 const MachineMemOperand &MMO,
1950 unsigned *Fast = nullptr) const;
1951
1952 /// Return true if the target supports a memory access of this type for the
1953 /// given address space and alignment. If the access is allowed, the optional
1954 /// final parameter returns the relative speed of the access (as defined by
1955 /// the target).
1956 virtual bool
1957 allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1958 unsigned AddrSpace = 0, Align Alignment = Align(1),
1960 unsigned *Fast = nullptr) const;
1961
1962 /// Return true if the target supports a memory access of this type for the
1963 /// given MachineMemOperand. If the access is allowed, the optional
1964 /// final parameter returns the relative access speed (as defined by the
1965 /// target).
1966 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1967 const MachineMemOperand &MMO,
1968 unsigned *Fast = nullptr) const;
1969
1970 /// LLT handling variant.
1971 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, LLT Ty,
1972 const MachineMemOperand &MMO,
1973 unsigned *Fast = nullptr) const;
1974
1975 /// Returns the target specific optimal type for load and store operations as
1976 /// a result of memset, memcpy, and memmove lowering.
1977 /// It returns EVT::Other if the type should be determined using generic
1978 /// target-independent logic.
1979 virtual EVT
1981 const AttributeList & /*FuncAttributes*/) const {
1982 return MVT::Other;
1983 }
1984
1985 /// LLT returning variant.
1986 virtual LLT
1988 const AttributeList & /*FuncAttributes*/) const {
1989 return LLT();
1990 }
1991
1992 /// Returns true if it's safe to use load / store of the specified type to
1993 /// expand memcpy / memset inline.
1994 ///
1995 /// This is mostly true for all types except for some special cases. For
1996 /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
1997 /// fstpl which also does type conversion. Note the specified type doesn't
1998 /// have to be legal as the hook is used before type legalization.
1999 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
2000
2001 /// Return lower limit for number of blocks in a jump table.
2002 virtual unsigned getMinimumJumpTableEntries() const;
2003
2004 /// Return lower limit of the density in a jump table.
2005 unsigned getMinimumJumpTableDensity(bool OptForSize) const;
2006
2007 /// Return upper limit for number of entries in a jump table.
2008 /// Zero if no limit.
2009 unsigned getMaximumJumpTableSize() const;
2010
2011 virtual bool isJumpTableRelative() const;
2012
2013 /// If a physical register, this specifies the register that
2014 /// llvm.savestack/llvm.restorestack should save and restore.
2016 return StackPointerRegisterToSaveRestore;
2017 }
2018
2019 /// If a physical register, this returns the register that receives the
2020 /// exception address on entry to an EH pad.
2021 virtual Register
2022 getExceptionPointerRegister(const Constant *PersonalityFn) const {
2023 return Register();
2024 }
2025
2026 /// If a physical register, this returns the register that receives the
2027 /// exception typeid on entry to a landing pad.
2028 virtual Register
2029 getExceptionSelectorRegister(const Constant *PersonalityFn) const {
2030 return Register();
2031 }
2032
2033 virtual bool needsFixedCatchObjects() const {
2034 report_fatal_error("Funclet EH is not implemented for this target");
2035 }
2036
2037 /// Return the minimum stack alignment of an argument.
2039 return MinStackArgumentAlignment;
2040 }
2041
2042 /// Return the minimum function alignment.
2043 Align getMinFunctionAlignment() const { return MinFunctionAlignment; }
2044
2045 /// Return the preferred function alignment.
2046 Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
2047
2048 /// Return the preferred loop alignment.
2049 virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const;
2050
2051 /// Return the maximum amount of bytes allowed to be emitted when padding for
2052 /// alignment
2053 virtual unsigned
2055
2056 /// Should loops be aligned even when the function is marked OptSize (but not
2057 /// MinSize).
2058 virtual bool alignLoopsWithOptSize() const { return false; }
2059
2060 /// If the target has a standard location for the stack protector guard,
2061 /// returns the address of that location. Otherwise, returns nullptr.
2062 /// DEPRECATED: please override useLoadStackGuardNode and customize
2063 /// LOAD_STACK_GUARD, or customize \@llvm.stackguard().
2064 virtual Value *getIRStackGuard(IRBuilderBase &IRB) const;
2065
2066 /// Inserts necessary declarations for SSP (stack protection) purpose.
2067 /// Should be used only when getIRStackGuard returns nullptr.
2068 virtual void insertSSPDeclarations(Module &M) const;
2069
2070 /// Return the variable that's previously inserted by insertSSPDeclarations,
2071 /// if any, otherwise return nullptr. Should be used only when
2072 /// getIRStackGuard returns nullptr.
2073 virtual Value *getSDagStackGuard(const Module &M) const;
2074
2075 /// If this function returns true, stack protection checks should XOR the
2076 /// frame pointer (or whichever pointer is used to address locals) into the
2077 /// stack guard value before checking it. getIRStackGuard must return nullptr
2078 /// if this returns true.
2079 virtual bool useStackGuardXorFP() const { return false; }
2080
2081 /// If the target has a standard stack protection check function that
2082 /// performs validation and error handling, returns the function. Otherwise,
2083 /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
2084 /// Should be used only when getIRStackGuard returns nullptr.
2085 virtual Function *getSSPStackGuardCheck(const Module &M) const;
2086
2087protected:
2089 bool UseTLS) const;
2090
2091public:
2092 /// Returns the target-specific address of the unsafe stack pointer.
2093 virtual Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const;
2094
2095 /// Returns the name of the symbol used to emit stack probes or the empty
2096 /// string if not applicable.
2097 virtual bool hasStackProbeSymbol(const MachineFunction &MF) const { return false; }
2098
2099 virtual bool hasInlineStackProbe(const MachineFunction &MF) const { return false; }
2100
2102 return "";
2103 }
2104
2105 /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
2106 /// are happy to sink it into basic blocks. A cast may be free, but not
2107 /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer.
2108 virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const;
2109
2110 /// Return true if the pointer arguments to CI should be aligned by aligning
2111 /// the object whose address is being passed. If so then MinSize is set to the
2112 /// minimum size the object must be to be aligned and PrefAlign is set to the
2113 /// preferred alignment.
2114 virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
2115 Align & /*PrefAlign*/) const {
2116 return false;
2117 }
2118
2119 //===--------------------------------------------------------------------===//
2120 /// \name Helpers for TargetTransformInfo implementations
2121 /// @{
2122
2123 /// Get the ISD node that corresponds to the Instruction class opcode.
2124 int InstructionOpcodeToISD(unsigned Opcode) const;
2125
2126 /// @}
2127
2128 //===--------------------------------------------------------------------===//
2129 /// \name Helpers for atomic expansion.
2130 /// @{
2131
2132 /// Returns the maximum atomic operation size (in bits) supported by
2133 /// the backend. Atomic operations greater than this size (as well
2134 /// as ones that are not naturally aligned), will be expanded by
2135 /// AtomicExpandPass into an __atomic_* library call.
2137 return MaxAtomicSizeInBitsSupported;
2138 }
2139
2140 /// Returns the size in bits of the maximum div/rem the backend supports.
2141 /// Larger operations will be expanded by ExpandLargeDivRem.
2143 return MaxDivRemBitWidthSupported;
2144 }
2145
2146 /// Returns the size in bits of the maximum larget fp convert the backend
2147 /// supports. Larger operations will be expanded by ExpandLargeFPConvert.
2149 return MaxLargeFPConvertBitWidthSupported;
2150 }
2151
2152 /// Returns the size of the smallest cmpxchg or ll/sc instruction
2153 /// the backend supports. Any smaller operations are widened in
2154 /// AtomicExpandPass.
2155 ///
2156 /// Note that *unlike* operations above the maximum size, atomic ops
2157 /// are still natively supported below the minimum; they just
2158 /// require a more complex expansion.
2159 unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
2160
2161 /// Whether the target supports unaligned atomic operations.
2162 bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; }
2163
2164 /// Whether AtomicExpandPass should automatically insert fences and reduce
2165 /// ordering for this atomic. This should be true for most architectures with
2166 /// weak memory ordering. Defaults to false.
2167 virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
2168 return false;
2169 }
2170
2171 /// Whether AtomicExpandPass should automatically insert a trailing fence
2172 /// without reducing the ordering for this atomic. Defaults to false.
2173 virtual bool
2175 return false;
2176 }
2177
2178 /// Perform a load-linked operation on Addr, returning a "Value *" with the
2179 /// corresponding pointee type. This may entail some non-trivial operations to
2180 /// truncate or reconstruct types that will be illegal in the backend. See
2181 /// ARMISelLowering for an example implementation.
2182 virtual Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy,
2183 Value *Addr, AtomicOrdering Ord) const {
2184 llvm_unreachable("Load linked unimplemented on this target");
2185 }
2186
2187 /// Perform a store-conditional operation to Addr. Return the status of the
2188 /// store. This should be 0 if the store succeeded, non-zero otherwise.
2190 Value *Addr, AtomicOrdering Ord) const {
2191 llvm_unreachable("Store conditional unimplemented on this target");
2192 }
2193
2194 /// Perform a masked atomicrmw using a target-specific intrinsic. This
2195 /// represents the core LL/SC loop which will be lowered at a late stage by
2196 /// the backend. The target-specific intrinsic returns the loaded value and
2197 /// is not responsible for masking and shifting the result.
2199 AtomicRMWInst *AI,
2200 Value *AlignedAddr, Value *Incr,
2201 Value *Mask, Value *ShiftAmt,
2202 AtomicOrdering Ord) const {
2203 llvm_unreachable("Masked atomicrmw expansion unimplemented on this target");
2204 }
2205
2206 /// Perform a atomicrmw expansion using a target-specific way. This is
2207 /// expected to be called when masked atomicrmw and bit test atomicrmw don't
2208 /// work, and the target supports another way to lower atomicrmw.
2209 virtual void emitExpandAtomicRMW(AtomicRMWInst *AI) const {
2211 "Generic atomicrmw expansion unimplemented on this target");
2212 }
2213
2214 /// Perform a cmpxchg expansion using a target-specific method.
2216 llvm_unreachable("Generic cmpxchg expansion unimplemented on this target");
2217 }
2218
2219 /// Perform a bit test atomicrmw using a target-specific intrinsic. This
2220 /// represents the combined bit test intrinsic which will be lowered at a late
2221 /// stage by the backend.
2224 "Bit test atomicrmw expansion unimplemented on this target");
2225 }
2226
2227 /// Perform a atomicrmw which the result is only used by comparison, using a
2228 /// target-specific intrinsic. This represents the combined atomic and compare
2229 /// intrinsic which will be lowered at a late stage by the backend.
2232 "Compare arith atomicrmw expansion unimplemented on this target");
2233 }
2234
2235 /// Perform a masked cmpxchg using a target-specific intrinsic. This
2236 /// represents the core LL/SC loop which will be lowered at a late stage by
2237 /// the backend. The target-specific intrinsic returns the loaded value and
2238 /// is not responsible for masking and shifting the result.
2240 IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
2241 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
2242 llvm_unreachable("Masked cmpxchg expansion unimplemented on this target");
2243 }
2244
2245 //===--------------------------------------------------------------------===//
2246 /// \name KCFI check lowering.
2247 /// @{
2248
2251 const TargetInstrInfo *TII) const {
2252 llvm_unreachable("KCFI is not supported on this target");
2253 }
2254
2255 /// @}
2256
2257 /// Inserts in the IR a target-specific intrinsic specifying a fence.
2258 /// It is called by AtomicExpandPass before expanding an
2259 /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
2260 /// if shouldInsertFencesForAtomic returns true.
2261 ///
2262 /// Inst is the original atomic instruction, prior to other expansions that
2263 /// may be performed.
2264 ///
2265 /// This function should either return a nullptr, or a pointer to an IR-level
2266 /// Instruction*. Even complex fence sequences can be represented by a
2267 /// single Instruction* through an intrinsic to be lowered later.
2268 ///
2269 /// The default implementation emits an IR fence before any release (or
2270 /// stronger) operation that stores, and after any acquire (or stronger)
2271 /// operation. This is generally a correct implementation, but backends may
2272 /// override if they wish to use alternative schemes (e.g. the PowerPC
2273 /// standard ABI uses a fence before a seq_cst load instead of after a
2274 /// seq_cst store).
2275 /// @{
2276 virtual Instruction *emitLeadingFence(IRBuilderBase &Builder,
2277 Instruction *Inst,
2278 AtomicOrdering Ord) const;
2279
2281 Instruction *Inst,
2282 AtomicOrdering Ord) const;
2283 /// @}
2284
2285 // Emits code that executes when the comparison result in the ll/sc
2286 // expansion of a cmpxchg instruction is such that the store-conditional will
2287 // not execute. This makes it possible to balance out the load-linked with
2288 // a dedicated instruction, if desired.
2289 // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
2290 // be unnecessarily held, except if clrex, inserted by this hook, is executed.
2291 virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const {}
2292
2293 /// Returns true if arguments should be sign-extended in lib calls.
2294 virtual bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const {
2295 return IsSigned;
2296 }
2297
2298 /// Returns true if arguments should be extended in lib calls.
2299 virtual bool shouldExtendTypeInLibCall(EVT Type) const {
2300 return true;
2301 }
2302
2303 /// Returns how the given (atomic) load should be expanded by the
2304 /// IR-level AtomicExpand pass.
2307 }
2308
2309 /// Returns how the given (atomic) load should be cast by the IR-level
2310 /// AtomicExpand pass.
2312 if (LI->getType()->isFloatingPointTy())
2315 }
2316
2317 /// Returns how the given (atomic) store should be expanded by the IR-level
2318 /// AtomicExpand pass into. For instance AtomicExpansionKind::Expand will try
2319 /// to use an atomicrmw xchg.
2322 }
2323
2324 /// Returns how the given (atomic) store should be cast by the IR-level
2325 /// AtomicExpand pass into. For instance AtomicExpansionKind::CastToInteger
2326 /// will try to cast the operands to integer values.
2328 if (SI->getValueOperand()->getType()->isFloatingPointTy())
2331 }
2332
2333 /// Returns how the given atomic cmpxchg should be expanded by the IR-level
2334 /// AtomicExpand pass.
2335 virtual AtomicExpansionKind
2338 }
2339
2340 /// Returns how the IR-level AtomicExpand pass should expand the given
2341 /// AtomicRMW, if at all. Default is to never expand.
2343 return RMW->isFloatingPointOperation() ?
2345 }
2346
2347 /// Returns how the given atomic atomicrmw should be cast by the IR-level
2348 /// AtomicExpand pass.
2349 virtual AtomicExpansionKind
2351 if (RMWI->getOperation() == AtomicRMWInst::Xchg &&
2352 (RMWI->getValOperand()->getType()->isFloatingPointTy() ||
2353 RMWI->getValOperand()->getType()->isPointerTy()))
2355
2357 }
2358
2359 /// On some platforms, an AtomicRMW that never actually modifies the value
2360 /// (such as fetch_add of 0) can be turned into a fence followed by an
2361 /// atomic load. This may sound useless, but it makes it possible for the
2362 /// processor to keep the cacheline shared, dramatically improving
2363 /// performance. And such idempotent RMWs are useful for implementing some
2364 /// kinds of locks, see for example (justification + benchmarks):
2365 /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
2366 /// This method tries doing that transformation, returning the atomic load if
2367 /// it succeeds, and nullptr otherwise.
2368 /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
2369 /// another round of expansion.
2370 virtual LoadInst *
2372 return nullptr;
2373 }
2374
2375 /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
2376 /// SIGN_EXTEND, or ANY_EXTEND).
2378 return ISD::ZERO_EXTEND;
2379 }
2380
2381 /// Returns how the platform's atomic compare and swap expects its comparison
2382 /// value to be extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND). This is
2383 /// separate from getExtendForAtomicOps, which is concerned with the
2384 /// sign-extension of the instruction's output, whereas here we are concerned
2385 /// with the sign-extension of the input. For targets with compare-and-swap
2386 /// instructions (or sub-word comparisons in their LL/SC loop expansions),
2387 /// the input can be ANY_EXTEND, but the output will still have a specific
2388 /// extension.
2390 return ISD::ANY_EXTEND;
2391 }
2392
2393 /// @}
2394
2395 /// Returns true if we should normalize
2396 /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
2397 /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
2398 /// that it saves us from materializing N0 and N1 in an integer register.
2399 /// Targets that are able to perform and/or on flags should return false here.
2401 EVT VT) const {
2402 // If a target has multiple condition registers, then it likely has logical
2403 // operations on those registers.
2405 return false;
2406 // Only do the transform if the value won't be split into multiple
2407 // registers.
2408 LegalizeTypeAction Action = getTypeAction(Context, VT);
2409 return Action != TypeExpandInteger && Action != TypeExpandFloat &&
2410 Action != TypeSplitVector;
2411 }
2412
2413 virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const { return true; }
2414
2415 /// Return true if a select of constants (select Cond, C1, C2) should be
2416 /// transformed into simple math ops with the condition value. For example:
2417 /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
2418 virtual bool convertSelectOfConstantsToMath(EVT VT) const {
2419 return false;
2420 }
2421
2422 /// Return true if it is profitable to transform an integer
2423 /// multiplication-by-constant into simpler operations like shifts and adds.
2424 /// This may be true if the target does not directly support the
2425 /// multiplication operation for the specified type or the sequence of simpler
2426 /// ops is faster than the multiply.
2428 EVT VT, SDValue C) const {
2429 return false;
2430 }
2431
2432 /// Return true if it may be profitable to transform
2433 /// (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2).
2434 /// This may not be true if c1 and c2 can be represented as immediates but
2435 /// c1*c2 cannot, for example.
2436 /// The target should check if c1, c2 and c1*c2 can be represented as
2437 /// immediates, or have to be materialized into registers. If it is not sure
2438 /// about some cases, a default true can be returned to let the DAGCombiner
2439 /// decide.
2440 /// AddNode is (add x, c1), and ConstNode is c2.
2442 SDValue ConstNode) const {
2443 return true;
2444 }
2445
2446 /// Return true if it is more correct/profitable to use strict FP_TO_INT
2447 /// conversion operations - canonicalizing the FP source value instead of
2448 /// converting all cases and then selecting based on value.
2449 /// This may be true if the target throws exceptions for out of bounds
2450 /// conversions or has fast FP CMOV.
2451 virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
2452 bool IsSigned) const {
2453 return false;
2454 }
2455
2456 /// Return true if it is beneficial to expand an @llvm.powi.* intrinsic.
2457 /// If not optimizing for size, expanding @llvm.powi.* intrinsics is always
2458 /// considered beneficial.
2459 /// If optimizing for size, expansion is only considered beneficial for upto
2460 /// 5 multiplies and a divide (if the exponent is negative).
2461 bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const {
2462 if (Exponent < 0)
2463 Exponent = -Exponent;
2464 uint64_t E = static_cast<uint64_t>(Exponent);
2465 return !OptForSize || (llvm::popcount(E) + Log2_64(E) < 7);
2466 }
2467
2468 //===--------------------------------------------------------------------===//
2469 // TargetLowering Configuration Methods - These methods should be invoked by
2470 // the derived class constructor to configure this object for the target.
2471 //
2472protected:
2473 /// Specify how the target extends the result of integer and floating point
2474 /// boolean values from i1 to a wider type. See getBooleanContents.
2476 BooleanContents = Ty;
2477 BooleanFloatContents = Ty;
2478 }
2479
2480 /// Specify how the target extends the result of integer and floating point
2481 /// boolean values from i1 to a wider type. See getBooleanContents.
2483 BooleanContents = IntTy;
2484 BooleanFloatContents = FloatTy;
2485 }
2486
2487 /// Specify how the target extends the result of a vector boolean value from a
2488 /// vector of i1 to a wider type. See getBooleanContents.
2490 BooleanVectorContents = Ty;
2491 }
2492
2493 /// Specify the target scheduling preference.
2495 SchedPreferenceInfo = Pref;
2496 }
2497
2498 /// Indicate the minimum number of blocks to generate jump tables.
2499 void setMinimumJumpTableEntries(unsigned Val);
2500
2501 /// Indicate the maximum number of entries in jump tables.
2502 /// Set to zero to generate unlimited jump tables.
2503 void setMaximumJumpTableSize(unsigned);
2504
2505 /// If set to a physical register, this specifies the register that
2506 /// llvm.savestack/llvm.restorestack should save and restore.
2508 StackPointerRegisterToSaveRestore = R;
2509 }
2510
2511 /// Tells the code generator that the target has multiple (allocatable)
2512 /// condition registers that can be used to store the results of comparisons
2513 /// for use by selects and conditional branches. With multiple condition
2514 /// registers, the code generator will not aggressively sink comparisons into
2515 /// the blocks of their users.
2516 void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
2517 HasMultipleConditionRegisters = hasManyRegs;
2518 }
2519
2520 /// Tells the code generator that the target has BitExtract instructions.
2521 /// The code generator will aggressively sink "shift"s into the blocks of
2522 /// their users if the users will generate "and" instructions which can be
2523 /// combined with "shift" to BitExtract instructions.
2524 void setHasExtractBitsInsn(bool hasExtractInsn = true) {
2525 HasExtractBitsInsn = hasExtractInsn;
2526 }
2527
2528 /// Tells the code generator not to expand logic operations on comparison
2529 /// predicates into separate sequences that increase the amount of flow
2530 /// control.
2531 void setJumpIsExpensive(bool isExpensive = true);
2532
2533 /// Tells the code generator which bitwidths to bypass.
2534 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
2535 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
2536 }
2537
2538 /// Add the specified register class as an available regclass for the
2539 /// specified value type. This indicates the selector can handle values of
2540 /// that class natively.
2542 assert((unsigned)VT.SimpleTy < std::size(RegClassForVT));
2543 RegClassForVT[VT.SimpleTy] = RC;
2544 }
2545
2546 /// Return the largest legal super-reg register class of the register class
2547 /// for the specified type and its associated "cost".
2548 virtual std::pair<const TargetRegisterClass *, uint8_t>
2550
2551 /// Once all of the register classes are added, this allows us to compute
2552 /// derived properties we expose.
2554
2555 /// Indicate that the specified operation does not work with the specified
2556 /// type and indicate what to do about it. Note that VT may refer to either
2557 /// the type of a result or that of an operand of Op.
2558 void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action) {
2559 assert(Op < std::size(OpActions[0]) && "Table isn't big enough!");
2560 OpActions[(unsigned)VT.SimpleTy][Op] = Action;
2561 }
2563 LegalizeAction Action) {
2564 for (auto Op : Ops)
2565 setOperationAction(Op, VT, Action);
2566 }
2568 LegalizeAction Action) {
2569 for (auto VT : VTs)
2570 setOperationAction(Ops, VT, Action);
2571 }
2572
2573 /// Indicate that the specified load with extension does not work with the
2574 /// specified type and indicate what to do about it.
2575 void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
2576 LegalizeAction Action) {
2577 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
2578 MemVT.isValid() && "Table isn't big enough!");
2579 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2580 unsigned Shift = 4 * ExtType;
2581 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
2582 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
2583 }
2584 void setLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT, MVT MemVT,
2585 LegalizeAction Action) {
2586 for (auto ExtType : ExtTypes)
2587 setLoadExtAction(ExtType, ValVT, MemVT, Action);
2588 }
2590 ArrayRef<MVT> MemVTs, LegalizeAction Action) {
2591 for (auto MemVT : MemVTs)
2592 setLoadExtAction(ExtTypes, ValVT, MemVT, Action);
2593 }
2594
2595 /// Let target indicate that an extending atomic load of the specified type
2596 /// is legal.
2597 void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
2598 LegalizeAction Action) {
2599 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
2600 MemVT.isValid() && "Table isn't big enough!");
2601 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2602 unsigned Shift = 4 * ExtType;
2603 AtomicLoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &=
2604 ~((uint16_t)0xF << Shift);
2605 AtomicLoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |=
2606 ((uint16_t)Action << Shift);
2607 }
2609 LegalizeAction Action) {
2610 for (auto ExtType : ExtTypes)
2611 setAtomicLoadExtAction(ExtType, ValVT, MemVT, Action);
2612 }
2614 ArrayRef<MVT> MemVTs, LegalizeAction Action) {
2615 for (auto MemVT : MemVTs)
2616 setAtomicLoadExtAction(ExtTypes, ValVT, MemVT, Action);
2617 }
2618
2619 /// Indicate that the specified truncating store does not work with the
2620 /// specified type and indicate what to do about it.
2621 void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action) {
2622 assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
2623 TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
2624 }
2625
2626 /// Indicate that the specified indexed load does or does not work with the
2627 /// specified type and indicate what to do abort it.
2628 ///
2629 /// NOTE: All indexed mode loads are initialized to Expand in
2630 /// TargetLowering.cpp
2632 LegalizeAction Action) {
2633 for (auto IdxMode : IdxModes)
2634 setIndexedModeAction(IdxMode, VT, IMAB_Load, Action);
2635 }
2636
2638 LegalizeAction Action) {
2639 for (auto VT : VTs)
2640 setIndexedLoadAction(IdxModes, VT, Action);
2641 }
2642
2643 /// Indicate that the specified indexed store does or does not work with the
2644 /// specified type and indicate what to do about it.
2645 ///
2646 /// NOTE: All indexed mode stores are initialized to Expand in
2647 /// TargetLowering.cpp
2649 LegalizeAction Action) {
2650 for (auto IdxMode : IdxModes)
2651 setIndexedModeAction(IdxMode, VT, IMAB_Store, Action);
2652 }
2653
2655 LegalizeAction Action) {
2656 for (auto VT : VTs)
2657 setIndexedStoreAction(IdxModes, VT, Action);
2658 }
2659
2660 /// Indicate that the specified indexed masked load does or does not work with
2661 /// the specified type and indicate what to do about it.
2662 ///
2663 /// NOTE: All indexed mode masked loads are initialized to Expand in
2664 /// TargetLowering.cpp
2665 void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT,
2666 LegalizeAction Action) {
2667 setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action);
2668 }
2669
2670 /// Indicate that the specified indexed masked store does or does not work
2671 /// with the specified type and indicate what to do about it.
2672 ///
2673 /// NOTE: All indexed mode masked stores are initialized to Expand in
2674 /// TargetLowering.cpp
2675 void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT,
2676 LegalizeAction Action) {
2677 setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action);
2678 }
2679
2680 /// Indicate that the specified condition code is or isn't supported on the
2681 /// target and indicate what to do about it.
2683 LegalizeAction Action) {
2684 for (auto CC : CCs) {
2685 assert(VT.isValid() && (unsigned)CC < std::size(CondCodeActions) &&
2686 "Table isn't big enough!");
2687 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2688 /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the
2689 /// 32-bit value and the upper 29 bits index into the second dimension of
2690 /// the array to select what 32-bit value to use.
2691 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
2692 CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
2693 CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
2694 }
2695 }
2697 LegalizeAction Action) {
2698 for (auto VT : VTs)
2699 setCondCodeAction(CCs, VT, Action);
2700 }
2701
2702 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
2703 /// to trying a larger integer/fp until it can find one that works. If that
2704 /// default is insufficient, this method can be used by the target to override
2705 /// the default.
2706 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2707 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
2708 }
2709
2710 /// Convenience method to set an operation to Promote and specify the type
2711 /// in a single call.
2712 void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2713 setOperationAction(Opc, OrigVT, Promote);
2714 AddPromotedToType(Opc, OrigVT, DestVT);
2715 }
2717 MVT DestVT) {
2718 for (auto Op : Ops) {
2719 setOperationAction(Op, OrigVT, Promote);
2720 AddPromotedToType(Op, OrigVT, DestVT);
2721 }
2722 }
2723
2724 /// Targets should invoke this method for each target independent node that
2725 /// they want to provide a custom DAG combiner for by implementing the
2726 /// PerformDAGCombine virtual method.
2728 for (auto NT : NTs) {
2729 assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray));
2730 TargetDAGCombineArray[NT >> 3] |= 1 << (NT & 7);
2731 }
2732 }
2733
2734 /// Set the target's minimum function alignment.
2736 MinFunctionAlignment = Alignment;
2737 }
2738
2739 /// Set the target's preferred function alignment. This should be set if
2740 /// there is a performance benefit to higher-than-minimum alignment
2742 PrefFunctionAlignment = Alignment;
2743 }
2744
2745 /// Set the target's preferred loop alignment. Default alignment is one, it
2746 /// means the target does not care about loop alignment. The target may also
2747 /// override getPrefLoopAlignment to provide per-loop values.
2748 void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; }
2749 void setMaxBytesForAlignment(unsigned MaxBytes) {
2750 MaxBytesForAlignment = MaxBytes;
2751 }
2752
2753 /// Set the minimum stack alignment of an argument.
2755 MinStackArgumentAlignment = Alignment;
2756 }
2757
2758 /// Set the maximum atomic operation size supported by the
2759 /// backend. Atomic operations greater than this size (as well as
2760 /// ones that are not naturally aligned), will be expanded by
2761 /// AtomicExpandPass into an __atomic_* library call.
2762 void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
2763 MaxAtomicSizeInBitsSupported = SizeInBits;
2764 }
2765
2766 /// Set the size in bits of the maximum div/rem the backend supports.
2767 /// Larger operations will be expanded by ExpandLargeDivRem.
2768 void setMaxDivRemBitWidthSupported(unsigned SizeInBits) {
2769 MaxDivRemBitWidthSupported = SizeInBits;
2770 }
2771
2772 /// Set the size in bits of the maximum fp convert the backend supports.
2773 /// Larger operations will be expanded by ExpandLargeFPConvert.
2774 void setMaxLargeFPConvertBitWidthSupported(unsigned SizeInBits) {
2775 MaxLargeFPConvertBitWidthSupported = SizeInBits;
2776 }
2777
2778 /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
2779 void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
2780 MinCmpXchgSizeInBits = SizeInBits;
2781 }
2782
2783 /// Sets whether unaligned atomic operations are supported.
2784 void setSupportsUnalignedAtomics(bool UnalignedSupported) {
2785 SupportsUnalignedAtomics = UnalignedSupported;
2786 }
2787
2788public:
2789 //===--------------------------------------------------------------------===//
2790 // Addressing mode description hooks (used by LSR etc).
2791 //
2792
2793 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
2794 /// instructions reading the address. This allows as much computation as
2795 /// possible to be done in the address mode for that operand. This hook lets
2796 /// targets also pass back when this should be done on intrinsics which
2797 /// load/store.
2799 SmallVectorImpl<Value*> &/*Ops*/,
2800 Type *&/*AccessTy*/) const {
2801 return false;
2802 }
2803
2804 /// This represents an addressing mode of:
2805 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*vscale
2806 /// If BaseGV is null, there is no BaseGV.
2807 /// If BaseOffs is zero, there is no base offset.
2808 /// If HasBaseReg is false, there is no base register.
2809 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
2810 /// no scale.
2811 /// If ScalableOffset is zero, there is no scalable offset.
2812 struct AddrMode {
2814 int64_t BaseOffs = 0;
2815 bool HasBaseReg = false;
2816 int64_t Scale = 0;
2817 int64_t ScalableOffset = 0;
2818 AddrMode() = default;
2819 };
2820
2821 /// Return true if the addressing mode represented by AM is legal for this
2822 /// target, for a load/store of the specified type.
2823 ///
2824 /// The type may be VoidTy, in which case only return true if the addressing
2825 /// mode is legal for a load/store of any legal type. TODO: Handle
2826 /// pre/postinc as well.
2827 ///
2828 /// If the address space cannot be determined, it will be -1.
2829 ///
2830 /// TODO: Remove default argument
2831 virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
2832 Type *Ty, unsigned AddrSpace,
2833 Instruction *I = nullptr) const;
2834
2835 /// Returns true if the targets addressing mode can target thread local
2836 /// storage (TLS).
2837 virtual bool addressingModeSupportsTLS(const GlobalValue &) const {
2838 return false;
2839 }
2840
2841 /// Return the prefered common base offset.
2842 virtual int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset,
2843 int64_t MaxOffset) const {
2844 return 0;
2845 }
2846
2847 /// Return true if the specified immediate is legal icmp immediate, that is
2848 /// the target has icmp instructions which can compare a register against the
2849 /// immediate without having to materialize the immediate into a register.
2850 virtual bool isLegalICmpImmediate(int64_t) const {
2851 return true;
2852 }
2853
2854 /// Return true if the specified immediate is legal add immediate, that is the
2855 /// target has add instructions which can add a register with the immediate
2856 /// without having to materialize the immediate into a register.
2857 virtual bool isLegalAddImmediate(int64_t) const {
2858 return true;
2859 }
2860
2861 /// Return true if adding the specified scalable immediate is legal, that is
2862 /// the target has add instructions which can add a register with the
2863 /// immediate (multiplied by vscale) without having to materialize the
2864 /// immediate into a register.
2865 virtual bool isLegalAddScalableImmediate(int64_t) const { return false; }
2866
2867 /// Return true if the specified immediate is legal for the value input of a
2868 /// store instruction.
2869 virtual bool isLegalStoreImmediate(int64_t Value) const {
2870 // Default implementation assumes that at least 0 works since it is likely
2871 // that a zero register exists or a zero immediate is allowed.
2872 return Value == 0;
2873 }
2874
2875 /// Given a shuffle vector SVI representing a vector splat, return a new
2876 /// scalar type of size equal to SVI's scalar type if the new type is more
2877 /// profitable. Returns nullptr otherwise. For example under MVE float splats
2878 /// are converted to integer to prevent the need to move from SPR to GPR
2879 /// registers.
2881 return nullptr;
2882 }
2883
2884 /// Given a set in interconnected phis of type 'From' that are loaded/stored
2885 /// or bitcast to type 'To', return true if the set should be converted to
2886 /// 'To'.
2887 virtual bool shouldConvertPhiType(Type *From, Type *To) const {
2888 return (From->isIntegerTy() || From->isFloatingPointTy()) &&
2889 (To->isIntegerTy() || To->isFloatingPointTy());
2890 }
2891
2892 /// Returns true if the opcode is a commutative binary operation.
2893 virtual bool isCommutativeBinOp(unsigned Opcode) const {
2894 // FIXME: This should get its info from the td file.
2895 switch (Opcode) {
2896 case ISD::ADD:
2897 case ISD::SMIN:
2898 case ISD::SMAX:
2899 case ISD::UMIN:
2900 case ISD::UMAX:
2901 case ISD::MUL:
2902 case ISD::MULHU:
2903 case ISD::MULHS:
2904 case ISD::SMUL_LOHI:
2905 case ISD::UMUL_LOHI:
2906 case ISD::FADD:
2907 case ISD::FMUL:
2908 case ISD::AND:
2909 case ISD::OR:
2910 case ISD::XOR:
2911 case ISD::SADDO:
2912 case ISD::UADDO:
2913 case ISD::ADDC:
2914 case ISD::ADDE:
2915 case ISD::SADDSAT:
2916 case ISD::UADDSAT:
2917 case ISD::FMINNUM:
2918 case ISD::FMAXNUM:
2919 case ISD::FMINNUM_IEEE:
2920 case ISD::FMAXNUM_IEEE:
2921 case ISD::FMINIMUM:
2922 case ISD::FMAXIMUM:
2923 case ISD::FMINIMUMNUM:
2924 case ISD::FMAXIMUMNUM:
2925 case ISD::AVGFLOORS:
2926 case ISD::AVGFLOORU:
2927 case ISD::AVGCEILS:
2928 case ISD::AVGCEILU:
2929 case ISD::ABDS:
2930 case ISD::ABDU:
2931 return true;
2932 default: return false;
2933 }
2934 }
2935
2936 /// Return true if the node is a math/logic binary operator.
2937 virtual bool isBinOp(unsigned Opcode) const {
2938 // A commutative binop must be a binop.
2939 if (isCommutativeBinOp(Opcode))
2940 return true;
2941 // These are non-commutative binops.
2942 switch (Opcode) {
2943 case ISD::SUB:
2944 case ISD::SHL:
2945 case ISD::SRL:
2946 case ISD::SRA:
2947 case ISD::ROTL:
2948 case ISD::ROTR:
2949 case ISD::SDIV:
2950 case ISD::UDIV:
2951 case ISD::SREM:
2952 case ISD::UREM:
2953 case ISD::SSUBSAT:
2954 case ISD::USUBSAT:
2955 case ISD::FSUB:
2956 case ISD::FDIV:
2957 case ISD::FREM:
2958 return true;
2959 default:
2960 return false;
2961 }
2962 }
2963
2964 /// Return true if it's free to truncate a value of type FromTy to type
2965 /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
2966 /// by referencing its sub-register AX.
2967 /// Targets must return false when FromTy <= ToTy.
2968 virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
2969 return false;
2970 }
2971
2972 /// Return true if a truncation from FromTy to ToTy is permitted when deciding
2973 /// whether a call is in tail position. Typically this means that both results
2974 /// would be assigned to the same register or stack slot, but it could mean
2975 /// the target performs adequate checks of its own before proceeding with the
2976 /// tail call. Targets must return false when FromTy <= ToTy.
2977 virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
2978 return false;
2979 }
2980
2981 virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const { return false; }
2982 virtual bool isTruncateFree(LLT FromTy, LLT ToTy, LLVMContext &Ctx) const {
2983 return isTruncateFree(getApproximateEVTForLLT(FromTy, Ctx),
2984 getApproximateEVTForLLT(ToTy, Ctx));
2985 }
2986
2987 /// Return true if truncating the specific node Val to type VT2 is free.
2988 virtual bool isTruncateFree(SDValue Val, EVT VT2) const {
2989 // Fallback to type matching.
2990 return isTruncateFree(Val.getValueType(), VT2);
2991 }
2992
2993 virtual bool isProfitableToHoist(Instruction *I) const { return true; }
2994
2995 /// Return true if the extension represented by \p I is free.
2996 /// Unlikely the is[Z|FP]ExtFree family which is based on types,
2997 /// this method can use the context provided by \p I to decide
2998 /// whether or not \p I is free.
2999 /// This method extends the behavior of the is[Z|FP]ExtFree family.
3000 /// In other words, if is[Z|FP]Free returns true, then this method
3001 /// returns true as well. The converse is not true.
3002 /// The target can perform the adequate checks by overriding isExtFreeImpl.
3003 /// \pre \p I must be a sign, zero, or fp extension.
3004 bool isExtFree(const Instruction *I) const {
3005 switch (I->getOpcode()) {
3006 case Instruction::FPExt:
3007 if (isFPExtFree(EVT::getEVT(I->getType()),
3008 EVT::getEVT(I->getOperand(0)->getType())))
3009 return true;
3010 break;
3011 case Instruction::ZExt:
3012 if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
3013 return true;
3014 break;
3015 case Instruction::SExt:
3016 break;
3017 default:
3018 llvm_unreachable("Instruction is not an extension");
3019 }
3020 return isExtFreeImpl(I);
3021 }
3022
3023 /// Return true if \p Load and \p Ext can form an ExtLoad.
3024 /// For example, in AArch64
3025 /// %L = load i8, i8* %ptr
3026 /// %E = zext i8 %L to i32
3027 /// can be lowered into one load instruction
3028 /// ldrb w0, [x0]
3029 bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
3030 const DataLayout &DL) const {
3031 EVT VT = getValueType(DL, Ext->getType());
3032 EVT LoadVT = getValueType(DL, Load->getType());
3033
3034 // If the load has other users and the truncate is not free, the ext
3035 // probably isn't free.
3036 if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
3037 !isTruncateFree(Ext->getType(), Load->getType()))
3038 return false;
3039
3040 // Check whether the target supports casts folded into loads.
3041 unsigned LType;
3042 if (isa<ZExtInst>(Ext))
3043 LType = ISD::ZEXTLOAD;
3044 else {
3045 assert(isa<SExtInst>(Ext) && "Unexpected ext type!");
3046 LType = ISD::SEXTLOAD;
3047 }
3048
3049 return isLoadExtLegal(LType, VT, LoadVT);
3050 }
3051
3052 /// Return true if any actual instruction that defines a value of type FromTy
3053 /// implicitly zero-extends the value to ToTy in the result register.
3054 ///
3055 /// The function should return true when it is likely that the truncate can
3056 /// be freely folded with an instruction defining a value of FromTy. If
3057 /// the defining instruction is unknown (because you're looking at a
3058 /// function argument, PHI, etc.) then the target may require an
3059 /// explicit truncate, which is not necessarily free, but this function
3060 /// does not deal with those cases.
3061 /// Targets must return false when FromTy >= ToTy.
3062 virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
3063 return false;
3064 }
3065
3066 virtual bool isZExtFree(EVT FromTy, EVT ToTy) const { return false; }
3067 virtual bool isZExtFree(LLT FromTy, LLT ToTy, LLVMContext &Ctx) const {
3068 return isZExtFree(getApproximateEVTForLLT(FromTy, Ctx),
3069 getApproximateEVTForLLT(ToTy, Ctx));
3070 }
3071
3072 /// Return true if zero-extending the specific node Val to type VT2 is free
3073 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
3074 /// because it's folded such as X86 zero-extending loads).
3075 virtual bool isZExtFree(SDValue Val, EVT VT2) const {
3076 return isZExtFree(Val.getValueType(), VT2);
3077 }
3078
3079 /// Return true if sign-extension from FromTy to ToTy is cheaper than
3080 /// zero-extension.
3081 virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const {
3082 return false;
3083 }
3084
3085 /// Return true if this constant should be sign extended when promoting to
3086 /// a larger type.
3087 virtual bool signExtendConstant(const ConstantInt *C) const { return false; }
3088
3089 /// Try to optimize extending or truncating conversion instructions (like
3090 /// zext, trunc, fptoui, uitofp) for the target.
3091 virtual bool
3093 const TargetTransformInfo &TTI) const {
3094 return false;
3095 }
3096
3097 /// Return true if the target supplies and combines to a paired load
3098 /// two loaded values of type LoadedType next to each other in memory.
3099 /// RequiredAlignment gives the minimal alignment constraints that must be met
3100 /// to be able to select this paired load.
3101 ///
3102 /// This information is *not* used to generate actual paired loads, but it is
3103 /// used to generate a sequence of loads that is easier to combine into a
3104 /// paired load.
3105 /// For instance, something like this:
3106 /// a = load i64* addr
3107 /// b = trunc i64 a to i32
3108 /// c = lshr i64 a, 32
3109 /// d = trunc i64 c to i32
3110 /// will be optimized into:
3111 /// b = load i32* addr1
3112 /// d = load i32* addr2
3113 /// Where addr1 = addr2 +/- sizeof(i32).
3114 ///
3115 /// In other words, unless the target performs a post-isel load combining,
3116 /// this information should not be provided because it will generate more
3117 /// loads.
3118 virtual bool hasPairedLoad(EVT /*LoadedType*/,
3119 Align & /*RequiredAlignment*/) const {
3120 return false;
3121 }
3122
3123 /// Return true if the target has a vector blend instruction.
3124 virtual bool hasVectorBlend() const { return false; }
3125
3126 /// Get the maximum supported factor for interleaved memory accesses.
3127 /// Default to be the minimum interleave factor: 2.
3128 virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
3129
3130 /// Lower an interleaved load to target specific intrinsics. Return
3131 /// true on success.
3132 ///
3133 /// \p LI is the vector load instruction.
3134 /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
3135 /// \p Indices is the corresponding indices for each shufflevector.
3136 /// \p Factor is the interleave factor.
3139 ArrayRef<unsigned> Indices,
3140 unsigned Factor) const {
3141 return false;
3142 }
3143
3144 /// Lower an interleaved store to target specific intrinsics. Return
3145 /// true on success.
3146 ///
3147 /// \p SI is the vector store instruction.
3148 /// \p SVI is the shufflevector to RE-interleave the stored vector.
3149 /// \p Factor is the interleave factor.
3151 unsigned Factor) const {
3152 return false;
3153 }
3154
3155 /// Lower a deinterleave intrinsic to a target specific load intrinsic.
3156 /// Return true on success. Currently only supports
3157 /// llvm.vector.deinterleave2
3158 ///
3159 /// \p DI is the deinterleave intrinsic.
3160 /// \p LI is the accompanying load instruction
3161 /// \p DeadInsts is a reference to a vector that keeps track of dead
3162 /// instruction during transformations.
3164 IntrinsicInst *DI, LoadInst *LI,
3165 SmallVectorImpl<Instruction *> &DeadInsts) const {
3166 return false;
3167 }
3168
3169 /// Lower an interleave intrinsic to a target specific store intrinsic.
3170 /// Return true on success. Currently only supports
3171 /// llvm.vector.interleave2
3172 ///
3173 /// \p II is the interleave intrinsic.
3174 /// \p SI is the accompanying store instruction
3175 /// \p DeadInsts is a reference to a vector that keeps track of dead
3176 /// instruction during transformations.
3179 SmallVectorImpl<Instruction *> &DeadInsts) const {
3180 return false;
3181 }
3182
3183 /// Return true if an fpext operation is free (for instance, because
3184 /// single-precision floating-point numbers are implicitly extended to
3185 /// double-precision).
3186 virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const {
3187 assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() &&
3188 "invalid fpext types");
3189 return false;
3190 }
3191
3192 /// Return true if an fpext operation input to an \p Opcode operation is free
3193 /// (for instance, because half-precision floating-point numbers are
3194 /// implicitly extended to float-precision) for an FMA instruction.
3195 virtual bool isFPExtFoldable(const MachineInstr &MI, unsigned Opcode,
3196 LLT DestTy, LLT SrcTy) const {
3197 return false;
3198 }
3199
3200 /// Return true if an fpext operation input to an \p Opcode operation is free
3201 /// (for instance, because half-precision floating-point numbers are
3202 /// implicitly extended to float-precision) for an FMA instruction.
3203 virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode,
3204 EVT DestVT, EVT SrcVT) const {
3205 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
3206 "invalid fpext types");
3207 return isFPExtFree(DestVT, SrcVT);
3208 }
3209
3210 /// Return true if folding a vector load into ExtVal (a sign, zero, or any
3211 /// extend node) is profitable.
3212 virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
3213
3214 /// Return true if an fneg operation is free to the point where it is never
3215 /// worthwhile to replace it with a bitwise operation.
3216 virtual bool isFNegFree(EVT VT) const {
3217 assert(VT.isFloatingPoint());
3218 return false;
3219 }
3220
3221 /// Return true if an fabs operation is free to the point where it is never
3222 /// worthwhile to replace it with a bitwise operation.
3223 virtual bool isFAbsFree(EVT VT) const {
3224 assert(VT.isFloatingPoint());
3225 return false;
3226 }
3227
3228 /// Return true if an FMA operation is faster than a pair of fmul and fadd
3229 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
3230 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
3231 ///
3232 /// NOTE: This may be called before legalization on types for which FMAs are
3233 /// not legal, but should return true if those types will eventually legalize
3234 /// to types that support FMAs. After legalization, it will only be called on
3235 /// types that support FMAs (via Legal or Custom actions)
3236 ///
3237 /// Targets that care about soft float support should return false when soft
3238 /// float code is being generated (i.e. use-soft-float).
3240 EVT) const {
3241 return false;
3242 }
3243
3244 /// Return true if an FMA operation is faster than a pair of fmul and fadd
3245 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
3246 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
3247 ///
3248 /// NOTE: This may be called before legalization on types for which FMAs are
3249 /// not legal, but should return true if those types will eventually legalize
3250 /// to types that support FMAs. After legalization, it will only be called on
3251 /// types that support FMAs (via Legal or Custom actions)
3253 LLT) const {
3254 return false;
3255 }
3256
3257 /// IR version
3258 virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const {
3259 return false;
3260 }
3261
3262 /// Returns true if \p MI can be combined with another instruction to
3263 /// form TargetOpcode::G_FMAD. \p N may be an TargetOpcode::G_FADD,
3264 /// TargetOpcode::G_FSUB, or an TargetOpcode::G_FMUL which will be
3265 /// distributed into an fadd/fsub.
3266 virtual bool isFMADLegal(const MachineInstr &MI, LLT Ty) const {
3267 assert((MI.getOpcode() == TargetOpcode::G_FADD ||
3268 MI.getOpcode() == TargetOpcode::G_FSUB ||
3269 MI.getOpcode() == TargetOpcode::G_FMUL) &&
3270 "unexpected node in FMAD forming combine");
3271 switch (Ty.getScalarSizeInBits()) {
3272 case 16:
3273 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f16);
3274 case 32:
3275 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f32);
3276 case 64:
3277 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f64);
3278 default:
3279 break;
3280 }
3281
3282 return false;
3283 }
3284
3285 /// Returns true if be combined with to form an ISD::FMAD. \p N may be an
3286 /// ISD::FADD, ISD::FSUB, or an ISD::FMUL which will be distributed into an
3287 /// fadd/fsub.
3288 virtual bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const {
3289 assert((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB ||
3290 N->getOpcode() == ISD::FMUL) &&
3291 "unexpected node in FMAD forming combine");
3292 return isOperationLegal(ISD::FMAD, N->getValueType(0));
3293 }
3294
3295 // Return true when the decision to generate FMA's (or FMS, FMLA etc) rather
3296 // than FMUL and ADD is delegated to the machine combiner.
3298 CodeGenOptLevel OptLevel) const {
3299 return false;
3300 }
3301
3302 /// Return true if it's profitable to narrow operations of type SrcVT to
3303 /// DestVT. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
3304 /// i32 to i16.
3305 virtual bool isNarrowingProfitable(SDNode *N, EVT SrcVT, EVT DestVT) const {
3306 return false;
3307 }
3308
3309 /// Return true if pulling a binary operation into a select with an identity
3310 /// constant is profitable. This is the inverse of an IR transform.
3311 /// Example: X + (Cond ? Y : 0) --> Cond ? (X + Y) : X
3312 virtual bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
3313 EVT VT) const {
3314 return false;
3315 }
3316
3317 /// Return true if it is beneficial to convert a load of a constant to
3318 /// just the constant itself.
3319 /// On some targets it might be more efficient to use a combination of
3320 /// arithmetic instructions to materialize the constant instead of loading it
3321 /// from a constant pool.
3323 Type *Ty) const {
3324 return false;
3325 }
3326
3327 /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type
3328 /// from this source type with this index. This is needed because
3329 /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of
3330 /// the first element, and only the target knows which lowering is cheap.
3331 virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
3332 unsigned Index) const {
3333 return false;
3334 }
3335
3336 /// Try to convert an extract element of a vector binary operation into an
3337 /// extract element followed by a scalar operation.
3338 virtual bool shouldScalarizeBinop(SDValue VecOp) const {
3339 return false;
3340 }
3341
3342 /// Return true if extraction of a scalar element from the given vector type
3343 /// at the given index is cheap. For example, if scalar operations occur on
3344 /// the same register file as vector operations, then an extract element may
3345 /// be a sub-register rename rather than an actual instruction.
3346 virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const {
3347 return false;
3348 }
3349
3350 /// Try to convert math with an overflow comparison into the corresponding DAG
3351 /// node operation. Targets may want to override this independently of whether
3352 /// the operation is legal/custom for the given type because it may obscure
3353 /// matching of other patterns.
3354 virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
3355 bool MathUsed) const {
3356 // TODO: The default logic is inherited from code in CodeGenPrepare.
3357 // The opcode should not make a difference by default?
3358 if (Opcode != ISD::UADDO)
3359 return false;
3360
3361 // Allow the transform as long as we have an integer type that is not
3362 // obviously illegal and unsupported and if the math result is used
3363 // besides the overflow check. On some targets (e.g. SPARC), it is
3364 // not profitable to form on overflow op if the math result has no
3365 // concrete users.
3366 if (VT.isVector())
3367 return false;
3368 return MathUsed && (VT.isSimple() || !isOperationExpand(Opcode, VT));
3369 }
3370
3371 // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
3372 // even if the vector itself has multiple uses.
3373 virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
3374 return false;
3375 }
3376
3377 // Return true if CodeGenPrepare should consider splitting large offset of a
3378 // GEP to make the GEP fit into the addressing mode and can be sunk into the
3379 // same blocks of its users.
3380 virtual bool shouldConsiderGEPOffsetSplit() const { return false; }
3381
3382 /// Return true if creating a shift of the type by the given
3383 /// amount is not profitable.
3384 virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const {
3385 return false;
3386 }
3387
3388 // Should we fold (select_cc seteq (and x, y), 0, 0, A) -> (and (sra (shl x))
3389 // A) where y has a single bit set?
3391 const APInt &AndMask) const {
3392 unsigned ShCt = AndMask.getBitWidth() - 1;
3393 return !shouldAvoidTransformToShift(VT, ShCt);
3394 }
3395
3396 /// Does this target require the clearing of high-order bits in a register
3397 /// passed to the fp16 to fp conversion library function.
3398 virtual bool shouldKeepZExtForFP16Conv() const { return false; }
3399
3400 /// Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT
3401 /// from min(max(fptoi)) saturation patterns.
3402 virtual bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const {
3403 return isOperationLegalOrCustom(Op, VT);
3404 }
3405
3406 /// Should we expand [US]CMP nodes using two selects and two compares, or by
3407 /// doing arithmetic on boolean types
3408 virtual bool shouldExpandCmpUsingSelects(EVT VT) const { return false; }
3409
3410 /// Does this target support complex deinterleaving
3411 virtual bool isComplexDeinterleavingSupported() const { return false; }
3412
3413 /// Does this target support complex deinterleaving with the given operation
3414 /// and type
3417 return false;
3418 }
3419
3420 /// Create the IR node for the given complex deinterleaving operation.
3421 /// If one cannot be created using all the given inputs, nullptr should be
3422 /// returned.
3425 ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
3426 Value *Accumulator = nullptr) const {
3427 return nullptr;
3428 }
3429
3430 /// Rename the default libcall routine name for the specified libcall.
3431 void setLibcallName(RTLIB::Libcall Call, const char *Name) {
3432 Libcalls.setLibcallName(Call, Name);
3433 }
3434
3436 Libcalls.setLibcallName(Calls, Name);
3437 }
3438
3439 /// Get the libcall routine name for the specified libcall.
3440 const char *getLibcallName(RTLIB::Libcall Call) const {
3441 return Libcalls.getLibcallName(Call);
3442 }
3443
3444 /// Override the default CondCode to be used to test the result of the
3445 /// comparison libcall against zero.
3446 /// FIXME: This can't be merged with 'RuntimeLibcallsInfo' because of the ISD.
3448 CmpLibcallCCs[Call] = CC;
3449 }
3450
3451
3452 /// Get the CondCode that's to be used to test the result of the comparison
3453 /// libcall against zero.
3454 /// FIXME: This can't be merged with 'RuntimeLibcallsInfo' because of the ISD.
3456 return CmpLibcallCCs[Call];
3457 }
3458
3459
3460 /// Set the CallingConv that should be used for the specified libcall.
3462 Libcalls.setLibcallCallingConv(Call, CC);
3463 }
3464
3465 /// Get the CallingConv that should be used for the specified libcall.
3467 return Libcalls.getLibcallCallingConv(Call);
3468 }
3469
3470 /// Execute target specific actions to finalize target lowering.
3471 /// This is used to set extra flags in MachineFrameInformation and freezing
3472 /// the set of reserved registers.
3473 /// The default implementation just freezes the set of reserved registers.
3474 virtual void finalizeLowering(MachineFunction &MF) const;
3475
3476 //===----------------------------------------------------------------------===//
3477 // GlobalISel Hooks
3478 //===----------------------------------------------------------------------===//
3479 /// Check whether or not \p MI needs to be moved close to its uses.
3480 virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const;
3481
3482
3483private:
3484 const TargetMachine &TM;
3485
3486 /// Tells the code generator that the target has multiple (allocatable)
3487 /// condition registers that can be used to store the results of comparisons
3488 /// for use by selects and conditional branches. With multiple condition
3489 /// registers, the code generator will not aggressively sink comparisons into
3490 /// the blocks of their users.
3491 bool HasMultipleConditionRegisters;
3492
3493 /// Tells the code generator that the target has BitExtract instructions.
3494 /// The code generator will aggressively sink "shift"s into the blocks of
3495 /// their users if the users will generate "and" instructions which can be
3496 /// combined with "shift" to BitExtract instructions.
3497 bool HasExtractBitsInsn;
3498
3499 /// Tells the code generator to bypass slow divide or remainder
3500 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
3501 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
3502 /// div/rem when the operands are positive and less than 256.
3503 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
3504
3505 /// Tells the code generator that it shouldn't generate extra flow control
3506 /// instructions and should attempt to combine flow control instructions via
3507 /// predication.
3508 bool JumpIsExpensive;
3509
3510 /// Information about the contents of the high-bits in boolean values held in
3511 /// a type wider than i1. See getBooleanContents.
3512 BooleanContent BooleanContents;
3513
3514 /// Information about the contents of the high-bits in boolean values held in
3515 /// a type wider than i1. See getBooleanContents.
3516 BooleanContent BooleanFloatContents;
3517
3518 /// Information about the contents of the high-bits in boolean vector values
3519 /// when the element type is wider than i1. See getBooleanContents.
3520 BooleanContent BooleanVectorContents;
3521
3522 /// The target scheduling preference: shortest possible total cycles or lowest
3523 /// register usage.
3524 Sched::Preference SchedPreferenceInfo;
3525
3526 /// The minimum alignment that any argument on the stack needs to have.
3527 Align MinStackArgumentAlignment;
3528
3529 /// The minimum function alignment (used when optimizing for size, and to
3530 /// prevent explicitly provided alignment from leading to incorrect code).
3531 Align MinFunctionAlignment;
3532
3533 /// The preferred function alignment (used when alignment unspecified and
3534 /// optimizing for speed).
3535 Align PrefFunctionAlignment;
3536
3537 /// The preferred loop alignment (in log2 bot in bytes).
3538 Align PrefLoopAlignment;
3539 /// The maximum amount of bytes permitted to be emitted for alignment.
3540 unsigned MaxBytesForAlignment;
3541
3542 /// Size in bits of the maximum atomics size the backend supports.
3543 /// Accesses larger than this will be expanded by AtomicExpandPass.
3544 unsigned MaxAtomicSizeInBitsSupported;
3545
3546 /// Size in bits of the maximum div/rem size the backend supports.
3547 /// Larger operations will be expanded by ExpandLargeDivRem.
3548 unsigned MaxDivRemBitWidthSupported;
3549
3550 /// Size in bits of the maximum larget fp convert size the backend
3551 /// supports. Larger operations will be expanded by ExpandLargeFPConvert.
3552 unsigned MaxLargeFPConvertBitWidthSupported;
3553
3554 /// Size in bits of the minimum cmpxchg or ll/sc operation the
3555 /// backend supports.
3556 unsigned MinCmpXchgSizeInBits;
3557
3558 /// This indicates if the target supports unaligned atomic operations.
3559 bool SupportsUnalignedAtomics;
3560
3561 /// If set to a physical register, this specifies the register that
3562 /// llvm.savestack/llvm.restorestack should save and restore.
3563 Register StackPointerRegisterToSaveRestore;
3564
3565 /// This indicates the default register class to use for each ValueType the
3566 /// target supports natively.
3567 const TargetRegisterClass *RegClassForVT[MVT::VALUETYPE_SIZE];
3568 uint16_t NumRegistersForVT[MVT::VALUETYPE_SIZE];
3569 MVT RegisterTypeForVT[MVT::VALUETYPE_SIZE];
3570
3571 /// This indicates the "representative" register class to use for each
3572 /// ValueType the target supports natively. This information is used by the
3573 /// scheduler to track register pressure. By default, the representative
3574 /// register class is the largest legal super-reg register class of the
3575 /// register class of the specified type. e.g. On x86, i8, i16, and i32's
3576 /// representative class would be GR32.
3577 const TargetRegisterClass *RepRegClassForVT[MVT::VALUETYPE_SIZE] = {0};
3578
3579 /// This indicates the "cost" of the "representative" register class for each
3580 /// ValueType. The cost is used by the scheduler to approximate register
3581 /// pressure.
3582 uint8_t RepRegClassCostForVT[MVT::VALUETYPE_SIZE];
3583
3584 /// For any value types we are promoting or expanding, this contains the value
3585 /// type that we are changing to. For Expanded types, this contains one step
3586 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
3587 /// (e.g. i64 -> i16). For types natively supported by the system, this holds
3588 /// the same type (e.g. i32 -> i32).
3589 MVT TransformToType[MVT::VALUETYPE_SIZE];
3590
3591 /// For each operation and each value type, keep a LegalizeAction that
3592 /// indicates how instruction selection should deal with the operation. Most
3593 /// operations are Legal (aka, supported natively by the target), but
3594 /// operations that are not should be described. Note that operations on
3595 /// non-legal value types are not described here.
3597
3598 /// For each load extension type and each value type, keep a LegalizeAction
3599 /// that indicates how instruction selection should deal with a load of a
3600 /// specific value type and extension type. Uses 4-bits to store the action
3601 /// for each of the 4 load ext types.
3603
3604 /// Similar to LoadExtActions, but for atomic loads. Only Legal or Expand
3605 /// (default) values are supported.
3606 uint16_t AtomicLoadExtActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE];
3607
3608 /// For each value type pair keep a LegalizeAction that indicates whether a
3609 /// truncating store of a specific value type and truncating type is legal.
3611
3612 /// For each indexed mode and each value type, keep a quad of LegalizeAction
3613 /// that indicates how instruction selection should deal with the load /
3614 /// store / maskedload / maskedstore.
3615 ///
3616 /// The first dimension is the value_type for the reference. The second
3617 /// dimension represents the various modes for load store.
3619
3620 /// For each condition code (ISD::CondCode) keep a LegalizeAction that
3621 /// indicates how instruction selection should deal with the condition code.
3622 ///
3623 /// Because each CC action takes up 4 bits, we need to have the array size be
3624 /// large enough to fit all of the value types. This can be done by rounding
3625 /// up the MVT::VALUETYPE_SIZE value to the next multiple of 8.
3626 uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::VALUETYPE_SIZE + 7) / 8];
3627
3628 ValueTypeActionImpl ValueTypeActions;
3629
3630private:
3631 /// Targets can specify ISD nodes that they would like PerformDAGCombine
3632 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
3633 /// array.
3634 unsigned char
3635 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
3636
3637 /// For operations that must be promoted to a specific type, this holds the
3638 /// destination type. This map should be sparse, so don't hold it as an
3639 /// array.
3640 ///
3641 /// Targets add entries to this map with AddPromotedToType(..), clients access
3642 /// this with getTypeToPromoteTo(..).
3643 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
3644 PromoteToType;
3645
3646 /// The list of libcalls that the target will use.
3647 RTLIB::RuntimeLibcallsInfo Libcalls;
3648
3649 /// The ISD::CondCode that should be used to test the result of each of the
3650 /// comparison libcall against zero.
3651 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
3652
3653 /// The bits of IndexedModeActions used to store the legalisation actions
3654 /// We store the data as | ML | MS | L | S | each taking 4 bits.
3655 enum IndexedModeActionsBits {
3656 IMAB_Store = 0,
3657 IMAB_Load = 4,
3658 IMAB_MaskedStore = 8,
3659 IMAB_MaskedLoad = 12
3660 };
3661
3662 void setIndexedModeAction(unsigned IdxMode, MVT VT, unsigned Shift,
3663 LegalizeAction Action) {
3664 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
3665 (unsigned)Action < 0xf && "Table isn't big enough!");
3666 unsigned Ty = (unsigned)VT.SimpleTy;
3667 IndexedModeActions[Ty][IdxMode] &= ~(0xf << Shift);
3668 IndexedModeActions[Ty][IdxMode] |= ((uint16_t)Action) << Shift;
3669 }
3670
3671 LegalizeAction getIndexedModeAction(unsigned IdxMode, MVT VT,
3672 unsigned Shift) const {
3673 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
3674 "Table isn't big enough!");
3675 unsigned Ty = (unsigned)VT.SimpleTy;
3676 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] >> Shift) & 0xf);
3677 }
3678
3679protected:
3680 /// Return true if the extension represented by \p I is free.
3681 /// \pre \p I is a sign, zero, or fp extension and
3682 /// is[Z|FP]ExtFree of the related types is not true.
3683 virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
3684
3685 /// Depth that GatherAllAliases should continue looking for chain
3686 /// dependencies when trying to find a more preferable chain. As an
3687 /// approximation, this should be more than the number of consecutive stores
3688 /// expected to be merged.
3690
3691 /// \brief Specify maximum number of store instructions per memset call.
3692 ///
3693 /// When lowering \@llvm.memset this field specifies the maximum number of
3694 /// store operations that may be substituted for the call to memset. Targets
3695 /// must set this value based on the cost threshold for that target. Targets
3696 /// should assume that the memset will be done using as many of the largest
3697 /// store operations first, followed by smaller ones, if necessary, per
3698 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
3699 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
3700 /// store. This only applies to setting a constant array of a constant size.
3702 /// Likewise for functions with the OptSize attribute.
3704
3705 /// \brief Specify maximum number of store instructions per memcpy call.
3706 ///
3707 /// When lowering \@llvm.memcpy this field specifies the maximum number of
3708 /// store operations that may be substituted for a call to memcpy. Targets
3709 /// must set this value based on the cost threshold for that target. Targets
3710 /// should assume that the memcpy will be done using as many of the largest
3711 /// store operations first, followed by smaller ones, if necessary, per
3712 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
3713 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
3714 /// and one 1-byte store. This only applies to copying a constant array of
3715 /// constant size.
3717 /// Likewise for functions with the OptSize attribute.
3719 /// \brief Specify max number of store instructions to glue in inlined memcpy.
3720 ///
3721 /// When memcpy is inlined based on MaxStoresPerMemcpy, specify maximum number
3722 /// of store instructions to keep together. This helps in pairing and
3723 // vectorization later on.
3725
3726 /// \brief Specify maximum number of load instructions per memcmp call.
3727 ///
3728 /// When lowering \@llvm.memcmp this field specifies the maximum number of
3729 /// pairs of load operations that may be substituted for a call to memcmp.
3730 /// Targets must set this value based on the cost threshold for that target.
3731 /// Targets should assume that the memcmp will be done using as many of the
3732 /// largest load operations first, followed by smaller ones, if necessary, per
3733 /// alignment restrictions. For example, loading 7 bytes on a 32-bit machine
3734 /// with 32-bit alignment would result in one 4-byte load, a one 2-byte load
3735 /// and one 1-byte load. This only applies to copying a constant array of
3736 /// constant size.
3738 /// Likewise for functions with the OptSize attribute.
3740
3741 /// \brief Specify maximum number of store instructions per memmove call.
3742 ///
3743 /// When lowering \@llvm.memmove this field specifies the maximum number of
3744 /// store instructions that may be substituted for a call to memmove. Targets
3745 /// must set this value based on the cost threshold for that target. Targets
3746 /// should assume that the memmove will be done using as many of the largest
3747 /// store operations first, followed by smaller ones, if necessary, per
3748 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
3749 /// with 8-bit alignment would result in nine 1-byte stores. This only
3750 /// applies to copying a constant array of constant size.
3752 /// Likewise for functions with the OptSize attribute.
3754
3755 /// Tells the code generator that select is more expensive than a branch if
3756 /// the branch is usually predicted right.
3758
3759 /// \see enableExtLdPromotion.
3761
3762 /// Return true if the value types that can be represented by the specified
3763 /// register class are all legal.
3764 bool isLegalRC(const TargetRegisterInfo &TRI,
3765 const TargetRegisterClass &RC) const;
3766
3767 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
3768 /// sequence of memory operands that is recognized by PrologEpilogInserter.
3770 MachineBasicBlock *MBB) const;
3771
3773};
3774
3775/// This class defines information used to lower LLVM code to legal SelectionDAG
3776/// operators that the target instruction selector can accept natively.
3777///
3778/// This class also defines callbacks that targets must implement to lower
3779/// target-specific constructs to SelectionDAG operators.
3781public:
3782 struct DAGCombinerInfo;
3783 struct MakeLibCallOptions;
3784
3787
3788 explicit TargetLowering(const TargetMachine &TM);
3789
3790 bool isPositionIndependent() const;
3791
3794 UniformityInfo *UA) const {
3795 return false;
3796 }
3797
3798 // Lets target to control the following reassociation of operands: (op (op x,
3799 // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By
3800 // default consider profitable any case where N0 has single use. This
3801 // behavior reflects the condition replaced by this target hook call in the
3802 // DAGCombiner. Any particular target can implement its own heuristic to
3803 // restrict common combiner.
3805 SDValue N1) const {
3806 return N0.hasOneUse();
3807 }
3808
3809 // Lets target to control the following reassociation of operands: (op (op x,
3810 // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By
3811 // default consider profitable any case where N0 has single use. This
3812 // behavior reflects the condition replaced by this target hook call in the
3813 // combiner. Any particular target can implement its own heuristic to
3814 // restrict common combiner.
3816 Register N1) const {
3817 return MRI.hasOneNonDBGUse(N0);
3818 }
3819
3820 virtual bool isSDNodeAlwaysUniform(const SDNode * N) const {
3821 return false;
3822 }
3823
3824 /// Returns true by value, base pointer and offset pointer and addressing mode
3825 /// by reference if the node's address can be legally represented as
3826 /// pre-indexed load / store address.
3827 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
3828 SDValue &/*Offset*/,
3829 ISD::MemIndexedMode &/*AM*/,
3830 SelectionDAG &/*DAG*/) const {
3831 return false;
3832 }
3833
3834 /// Returns true by value, base pointer and offset pointer and addressing mode
3835 /// by reference if this node can be combined with a load / store to form a
3836 /// post-indexed load / store.
3837 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
3838 SDValue &/*Base*/,
3839 SDValue &/*Offset*/,
3840 ISD::MemIndexedMode &/*AM*/,
3841 SelectionDAG &/*DAG*/) const {
3842 return false;
3843 }
3844
3845 /// Returns true if the specified base+offset is a legal indexed addressing
3846 /// mode for this target. \p MI is the load or store instruction that is being
3847 /// considered for transformation.
3849 bool IsPre, MachineRegisterInfo &MRI) const {
3850 return false;
3851 }
3852
3853 /// Return the entry encoding for a jump table in the current function. The
3854 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
3855 virtual unsigned getJumpTableEncoding() const;
3856
3857 virtual MVT getJumpTableRegTy(const DataLayout &DL) const {
3858 return getPointerTy(DL);
3859 }
3860
3861 virtual const MCExpr *
3863 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
3864 MCContext &/*Ctx*/) const {
3865 llvm_unreachable("Need to implement this hook if target has custom JTIs");
3866 }
3867
3868 /// Returns relocation base for the given PIC jumptable.
3870 SelectionDAG &DAG) const;
3871
3872 /// This returns the relocation base for the given PIC jumptable, the same as
3873 /// getPICJumpTableRelocBase, but as an MCExpr.
3874 virtual const MCExpr *
3876 unsigned JTI, MCContext &Ctx) const;
3877
3878 /// Return true if folding a constant offset with the given GlobalAddress is
3879 /// legal. It is frequently not legal in PIC relocation models.
3880 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
3881
3882 /// On x86, return true if the operand with index OpNo is a CALL or JUMP
3883 /// instruction, which can use either a memory constraint or an address
3884 /// constraint. -fasm-blocks "__asm call foo" lowers to
3885 /// call void asm sideeffect inteldialect "call ${0:P}", "*m..."
3886 ///
3887 /// This function is used by a hack to choose the address constraint,
3888 /// lowering to a direct call.
3889 virtual bool
3891 unsigned OpNo) const {
3892 return false;
3893 }
3894
3896 SDValue &Chain) const;
3897
3898 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
3899 SDValue &NewRHS, ISD::CondCode &CCCode,
3900 const SDLoc &DL, const SDValue OldLHS,
3901 const SDValue OldRHS) const;
3902
3903 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
3904 SDValue &NewRHS, ISD::CondCode &CCCode,
3905 const SDLoc &DL, const SDValue OldLHS,
3906 const SDValue OldRHS, SDValue &Chain,
3907 bool IsSignaling = false) const;
3908
3910 SDValue Chain, MachineMemOperand *MMO,
3911 SDValue &NewLoad, SDValue Ptr,
3912 SDValue PassThru, SDValue Mask) const {
3913 llvm_unreachable("Not Implemented");
3914 }
3915
3917 SDValue Chain, MachineMemOperand *MMO,
3918 SDValue Ptr, SDValue Val,
3919 SDValue Mask) const {
3920 llvm_unreachable("Not Implemented");
3921 }
3922
3923 /// Returns a pair of (return value, chain).
3924 /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
3925 std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
3926 EVT RetVT, ArrayRef<SDValue> Ops,
3927 MakeLibCallOptions CallOptions,
3928 const SDLoc &dl,
3929 SDValue Chain = SDValue()) const;
3930
3931 /// Check whether parameters to a call that are passed in callee saved
3932 /// registers are the same as from the calling function. This needs to be
3933 /// checked for tail call eligibility.
3935 const uint32_t *CallerPreservedMask,
3936 const SmallVectorImpl<CCValAssign> &ArgLocs,
3937 const SmallVectorImpl<SDValue> &OutVals) const;
3938
3939 //===--------------------------------------------------------------------===//
3940 // TargetLowering Optimization Methods
3941 //
3942
3943 /// A convenience struct that encapsulates a DAG, and two SDValues for
3944 /// returning information from TargetLowering to its clients that want to
3945 /// combine.
3952
3954 bool LT, bool LO) :
3955 DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
3956
3957 bool LegalTypes() const { return LegalTys; }
3958 bool LegalOperations() const { return LegalOps; }
3959
3961 Old = O;
3962 New = N;
3963 return true;
3964 }
3965 };
3966
3967 /// Determines the optimal series of memory ops to replace the memset / memcpy.
3968 /// Return true if the number of memory ops is below the threshold (Limit).
3969 /// Note that this is always the case when Limit is ~0.
3970 /// It returns the types of the sequence of memory ops to perform
3971 /// memset / memcpy by reference.
3972 virtual bool
3973 findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit,
3974 const MemOp &Op, unsigned DstAS, unsigned SrcAS,
3975 const AttributeList &FuncAttributes) const;
3976
3977 /// Check to see if the specified operand of the specified instruction is a
3978 /// constant integer. If so, check to see if there are any bits set in the
3979 /// constant that are not demanded. If so, shrink the constant and return
3980 /// true.
3982 const APInt &DemandedElts,
3983 TargetLoweringOpt &TLO) const;
3984
3985 /// Helper wrapper around ShrinkDemandedConstant, demanding all elements.
3987 TargetLoweringOpt &TLO) const;
3988
3989 // Target hook to do target-specific const optimization, which is called by
3990 // ShrinkDemandedConstant. This function should return true if the target
3991 // doesn't want ShrinkDemandedConstant to further optimize the constant.
3993 const APInt &DemandedBits,
3994 const APInt &DemandedElts,
3995 TargetLoweringOpt &TLO) const {
3996 return false;
3997 }
3998
3999 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
4000 /// This uses isTruncateFree/isZExtFree and ANY_EXTEND for the widening cast,
4001 /// but it could be generalized for targets with other types of implicit
4002 /// widening casts.
4003 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth,
4004 const APInt &DemandedBits,
4005 TargetLoweringOpt &TLO) const;
4006
4007 /// Look at Op. At this point, we know that only the DemandedBits bits of the
4008 /// result of Op are ever used downstream. If we can use this information to
4009 /// simplify Op, create a new simplified DAG node and return true, returning
4010 /// the original and new nodes in Old and New. Otherwise, analyze the
4011 /// expression and return a mask of KnownOne and KnownZero bits for the
4012 /// expression (used to simplify the caller). The KnownZero/One bits may only
4013 /// be accurate for those bits in the Demanded masks.
4014 /// \p AssumeSingleUse When this parameter is true, this function will
4015 /// attempt to simplify \p Op even if there are multiple uses.
4016 /// Callers are responsible for correctly updating the DAG based on the
4017 /// results of this function, because simply replacing TLO.Old
4018 /// with TLO.New will be incorrect when this parameter is true and TLO.Old
4019 /// has multiple uses.
4021 const APInt &DemandedElts, KnownBits &Known,
4022 TargetLoweringOpt &TLO, unsigned Depth = 0,
4023 bool AssumeSingleUse = false) const;
4024
4025 /// Helper wrapper around SimplifyDemandedBits, demanding all elements.
4026 /// Adds Op back to the worklist upon success.
4028 KnownBits &Known, TargetLoweringOpt &TLO,
4029 unsigned Depth = 0,
4030 bool AssumeSingleUse = false) const;
4031
4032 /// Helper wrapper around SimplifyDemandedBits.
4033 /// Adds Op back to the worklist upon success.
4035 DAGCombinerInfo &DCI) const;
4036
4037 /// Helper wrapper around SimplifyDemandedBits.
4038 /// Adds Op back to the worklist upon success.
4040 const APInt &DemandedElts,
4041 DAGCombinerInfo &DCI) const;
4042
4043 /// More limited version of SimplifyDemandedBits that can be used to "look
4044 /// through" ops that don't contribute to the DemandedBits/DemandedElts -
4045 /// bitwise ops etc.
4047 const APInt &DemandedElts,
4048 SelectionDAG &DAG,
4049 unsigned Depth = 0) const;
4050
4051 /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
4052 /// elements.
4054 SelectionDAG &DAG,
4055 unsigned Depth = 0) const;
4056
4057 /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
4058 /// bits from only some vector elements.
4060 const APInt &DemandedElts,
4061 SelectionDAG &DAG,
4062 unsigned Depth = 0) const;
4063
4064 /// Look at Vector Op. At this point, we know that only the DemandedElts
4065 /// elements of the result of Op are ever used downstream. If we can use
4066 /// this information to simplify Op, create a new simplified DAG node and
4067 /// return true, storing the original and new nodes in TLO.
4068 /// Otherwise, analyze the expression and return a mask of KnownUndef and
4069 /// KnownZero elements for the expression (used to simplify the caller).
4070 /// The KnownUndef/Zero elements may only be accurate for those bits
4071 /// in the DemandedMask.
4072 /// \p AssumeSingleUse When this parameter is true, this function will
4073 /// attempt to simplify \p Op even if there are multiple uses.
4074 /// Callers are responsible for correctly updating the DAG based on the
4075 /// results of this function, because simply replacing TLO.Old
4076 /// with TLO.New will be incorrect when this parameter is true and TLO.Old
4077 /// has multiple uses.
4078 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask,
4079 APInt &KnownUndef, APInt &KnownZero,
4080 TargetLoweringOpt &TLO, unsigned Depth = 0,
4081 bool AssumeSingleUse = false) const;
4082
4083 /// Helper wrapper around SimplifyDemandedVectorElts.
4084 /// Adds Op back to the worklist upon success.
4085 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
4086 DAGCombinerInfo &DCI) const;
4087
4088 /// Return true if the target supports simplifying demanded vector elements by
4089 /// converting them to undefs.
4090 virtual bool
4092 const TargetLoweringOpt &TLO) const {
4093 return true;
4094 }
4095
4096 /// Determine which of the bits specified in Mask are known to be either zero
4097 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
4098 /// argument allows us to only collect the known bits that are shared by the
4099 /// requested vector elements.
4100 virtual void computeKnownBitsForTargetNode(const SDValue Op,
4101 KnownBits &Known,
4102 const APInt &DemandedElts,
4103 const SelectionDAG &DAG,
4104 unsigned Depth = 0) const;
4105
4106 /// Determine which of the bits specified in Mask are known to be either zero
4107 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
4108 /// argument allows us to only collect the known bits that are shared by the
4109 /// requested vector elements. This is for GISel.
4111 Register R, KnownBits &Known,
4112 const APInt &DemandedElts,
4113 const MachineRegisterInfo &MRI,
4114 unsigned Depth = 0) const;
4115
4116 /// Determine the known alignment for the pointer value \p R. This is can
4117 /// typically be inferred from the number of low known 0 bits. However, for a
4118 /// pointer with a non-integral address space, the alignment value may be
4119 /// independent from the known low bits.
4121 Register R,
4122 const MachineRegisterInfo &MRI,
4123 unsigned Depth = 0) const;
4124
4125 /// Determine which of the bits of FrameIndex \p FIOp are known to be 0.
4126 /// Default implementation computes low bits based on alignment
4127 /// information. This should preserve known bits passed into it.
4128 virtual void computeKnownBitsForFrameIndex(int FIOp,
4129 KnownBits &Known,
4130 const MachineFunction &MF) const;
4131
4132 /// This method can be implemented by targets that want to expose additional
4133 /// information about sign bits to the DAG Combiner. The DemandedElts
4134 /// argument allows us to only collect the minimum sign bits that are shared
4135 /// by the requested vector elements.
4137 const APInt &DemandedElts,
4138 const SelectionDAG &DAG,
4139 unsigned Depth = 0) const;
4140
4141 /// This method can be implemented by targets that want to expose additional
4142 /// information about sign bits to GlobalISel combiners. The DemandedElts
4143 /// argument allows us to only collect the minimum sign bits that are shared
4144 /// by the requested vector elements.
4146 Register R,
4147 const APInt &DemandedElts,
4148 const MachineRegisterInfo &MRI,
4149 unsigned Depth = 0) const;
4150
4151 /// Attempt to simplify any target nodes based on the demanded vector
4152 /// elements, returning true on success. Otherwise, analyze the expression and
4153 /// return a mask of KnownUndef and KnownZero elements for the expression
4154 /// (used to simplify the caller). The KnownUndef/Zero elements may only be
4155 /// accurate for those bits in the DemandedMask.
4157 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef,
4158 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const;
4159
4160 /// Attempt to simplify any target nodes based on the demanded bits/elts,
4161 /// returning true on success. Otherwise, analyze the
4162 /// expression and return a mask of KnownOne and KnownZero bits for the
4163 /// expression (used to simplify the caller). The KnownZero/One bits may only
4164 /// be accurate for those bits in the Demanded masks.
4166 const APInt &DemandedBits,
4167 const APInt &DemandedElts,
4168 KnownBits &Known,
4169 TargetLoweringOpt &TLO,
4170 unsigned Depth = 0) const;
4171
4172 /// More limited version of SimplifyDemandedBits that can be used to "look
4173 /// through" ops that don't contribute to the DemandedBits/DemandedElts -
4174 /// bitwise ops etc.
4176 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
4177 SelectionDAG &DAG, unsigned Depth) const;
4178
4179 /// Return true if this function can prove that \p Op is never poison
4180 /// and, if \p PoisonOnly is false, does not have undef bits. The DemandedElts
4181 /// argument limits the check to the requested vector elements.
4183 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
4184 bool PoisonOnly, unsigned Depth) const;
4185
4186 /// Return true if Op can create undef or poison from non-undef & non-poison
4187 /// operands. The DemandedElts argument limits the check to the requested
4188 /// vector elements.
4189 virtual bool
4191 const SelectionDAG &DAG, bool PoisonOnly,
4192 bool ConsiderFlags, unsigned Depth) const;
4193
4194 /// Tries to build a legal vector shuffle using the provided parameters
4195 /// or equivalent variations. The Mask argument maybe be modified as the
4196 /// function tries different variations.
4197 /// Returns an empty SDValue if the operation fails.
4200 SelectionDAG &DAG) const;
4201
4202 /// This method returns the constant pool value that will be loaded by LD.
4203 /// NOTE: You must check for implicit extensions of the constant by LD.
4204 virtual const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const;
4205
4206 /// If \p SNaN is false, \returns true if \p Op is known to never be any
4207 /// NaN. If \p sNaN is true, returns if \p Op is known to never be a signaling
4208 /// NaN.
4210 const SelectionDAG &DAG,
4211 bool SNaN = false,
4212 unsigned Depth = 0) const;
4213
4214 /// Return true if vector \p Op has the same value across all \p DemandedElts,
4215 /// indicating any elements which may be undef in the output \p UndefElts.
4216 virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts,
4217 APInt &UndefElts,
4218 const SelectionDAG &DAG,
4219 unsigned Depth = 0) const;
4220
4221 /// Returns true if the given Opc is considered a canonical constant for the
4222 /// target, which should not be transformed back into a BUILD_VECTOR.
4224 return Op.getOpcode() == ISD::SPLAT_VECTOR ||
4225 Op.getOpcode() == ISD::SPLAT_VECTOR_PARTS;
4226 }
4227
4229 void *DC; // The DAG Combiner object.
4232
4233 public:
4235
4236 DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
4237 : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
4238
4239 bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
4240 bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
4241 bool isAfterLegalizeDAG() const { return Level >= AfterLegalizeDAG; }
4244
4245 void AddToWorklist(SDNode *N);
4246 SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
4247 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
4248 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
4249
4251
4253 };
4254
4255 /// Return if the N is a constant or constant vector equal to the true value
4256 /// from getBooleanContents().
4257 bool isConstTrueVal(SDValue N) const;
4258
4259 /// Return if the N is a constant or constant vector equal to the false value
4260 /// from getBooleanContents().
4261 bool isConstFalseVal(SDValue N) const;
4262
4263 /// Return if \p N is a True value when extended to \p VT.
4264 bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const;
4265
4266 /// Try to simplify a setcc built with the specified operands and cc. If it is
4267 /// unable to simplify it, return a null SDValue.
4269 bool foldBooleans, DAGCombinerInfo &DCI,
4270 const SDLoc &dl) const;
4271
4272 // For targets which wrap address, unwrap for analysis.
4273 virtual SDValue unwrapAddress(SDValue N) const { return N; }
4274
4275 /// Returns true (and the GlobalValue and the offset) if the node is a
4276 /// GlobalAddress + offset.
4277 virtual bool
4278 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
4279
4280 /// This method will be invoked for all target nodes and for any
4281 /// target-independent nodes that the target has registered with invoke it
4282 /// for.
4283 ///
4284 /// The semantics are as follows:
4285 /// Return Value:
4286 /// SDValue.Val == 0 - No change was made
4287 /// SDValue.Val == N - N was replaced, is dead, and is already handled.
4288 /// otherwise - N should be replaced by the returned Operand.
4289 ///
4290 /// In addition, methods provided by DAGCombinerInfo may be used to perform
4291 /// more complex transformations.
4292 ///
4293 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
4294
4295 /// Return true if it is profitable to move this shift by a constant amount
4296 /// through its operand, adjusting any immediate operands as necessary to
4297 /// preserve semantics. This transformation may not be desirable if it
4298 /// disrupts a particularly auspicious target-specific tree (e.g. bitfield
4299 /// extraction in AArch64). By default, it returns true.
4300 ///
4301 /// @param N the shift node
4302 /// @param Level the current DAGCombine legalization level.
4304 CombineLevel Level) const {
4305 SDValue ShiftLHS = N->getOperand(0);
4306 if (!ShiftLHS->hasOneUse())
4307 return false;
4308 if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
4309 !ShiftLHS.getOperand(0)->hasOneUse())
4310 return false;
4311 return true;
4312 }
4313
4314 /// GlobalISel - return true if it is profitable to move this shift by a
4315 /// constant amount through its operand, adjusting any immediate operands as
4316 /// necessary to preserve semantics. This transformation may not be desirable
4317 /// if it disrupts a particularly auspicious target-specific tree (e.g.
4318 /// bitfield extraction in AArch64). By default, it returns true.
4319 ///
4320 /// @param MI the shift instruction
4321 /// @param IsAfterLegal true if running after legalization.
4323 bool IsAfterLegal) const {
4324 return true;
4325 }
4326
4327 /// GlobalISel - return true if it's profitable to perform the combine:
4328 /// shl ([sza]ext x), y => zext (shl x, y)
4329 virtual bool isDesirableToPullExtFromShl(const MachineInstr &MI) const {
4330 return true;
4331 }
4332
4333 // Return AndOrSETCCFoldKind::{AddAnd, ABS} if its desirable to try and
4334 // optimize LogicOp(SETCC0, SETCC1). An example (what is implemented as of
4335 // writing this) is:
4336 // With C as a power of 2 and C != 0 and C != INT_MIN:
4337 // AddAnd:
4338 // (icmp eq A, C) | (icmp eq A, -C)
4339 // -> (icmp eq and(add(A, C), ~(C + C)), 0)
4340 // (icmp ne A, C) & (icmp ne A, -C)w
4341 // -> (icmp ne and(add(A, C), ~(C + C)), 0)
4342 // ABS:
4343 // (icmp eq A, C) | (icmp eq A, -C)
4344 // -> (icmp eq Abs(A), C)
4345 // (icmp ne A, C) & (icmp ne A, -C)w
4346 // -> (icmp ne Abs(A), C)
4347 //
4348 // @param LogicOp the logic op
4349 // @param SETCC0 the first of the SETCC nodes
4350 // @param SETCC0 the second of the SETCC nodes
4352 const SDNode *LogicOp, const SDNode *SETCC0, const SDNode *SETCC1) const {
4354 }
4355
4356 /// Return true if it is profitable to combine an XOR of a logical shift
4357 /// to create a logical shift of NOT. This transformation may not be desirable
4358 /// if it disrupts a particularly auspicious target-specific tree (e.g.
4359 /// BIC on ARM/AArch64). By default, it returns true.
4360 virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const {
4361 return true;
4362 }
4363
4364 /// Return true if the target has native support for the specified value type
4365 /// and it is 'desirable' to use the type for the given node type. e.g. On x86
4366 /// i16 is legal, but undesirable since i16 instruction encodings are longer
4367 /// and some i16 instructions are slow.
4368 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
4369 // By default, assume all legal types are desirable.
4370 return isTypeLegal(VT);
4371 }
4372
4373 /// Return true if it is profitable for dag combiner to transform a floating
4374 /// point op of specified opcode to a equivalent op of an integer
4375 /// type. e.g. f32 load -> i32 load can be profitable on ARM.
4376 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
4377 EVT /*VT*/) const {
4378 return false;
4379 }
4380
4381 /// This method query the target whether it is beneficial for dag combiner to
4382 /// promote the specified node. If true, it should return the desired
4383 /// promotion type by reference.
4384 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
4385 return false;
4386 }
4387
4388 /// Return true if the target supports swifterror attribute. It optimizes
4389 /// loads and stores to reading and writing a specific register.
4390 virtual bool supportSwiftError() const {
4391 return false;
4392 }
4393
4394 /// Return true if the target supports that a subset of CSRs for the given
4395 /// machine function is handled explicitly via copies.
4396 virtual bool supportSplitCSR(MachineFunction *MF) const {
4397 return false;
4398 }
4399
4400 /// Return true if the target supports kcfi operand bundles.
4401 virtual bool supportKCFIBundles() const { return false; }
4402
4403 /// Return true if the target supports ptrauth operand bundles.
4404 virtual bool supportPtrAuthBundles() const { return false; }
4405
4406 /// Perform necessary initialization to handle a subset of CSRs explicitly
4407 /// via copies. This function is called at the beginning of instruction
4408 /// selection.