LLVM 17.0.0git
TargetLowering.h
Go to the documentation of this file.
1//===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file describes how to lower LLVM code to machine code. This has two
11/// main components:
12///
13/// 1. Which ValueTypes are natively supported by the target.
14/// 2. Which operations are supported for supported ValueTypes.
15/// 3. Cost thresholds for alternative implementations of certain operations.
16///
17/// In addition it has a few other components, like information about FP
18/// immediates.
19///
20//===----------------------------------------------------------------------===//
21
22#ifndef LLVM_CODEGEN_TARGETLOWERING_H
23#define LLVM_CODEGEN_TARGETLOWERING_H
24
25#include "llvm/ADT/APInt.h"
26#include "llvm/ADT/ArrayRef.h"
27#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/StringRef.h"
39#include "llvm/IR/Attributes.h"
40#include "llvm/IR/CallingConv.h"
41#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/Function.h"
44#include "llvm/IR/InlineAsm.h"
45#include "llvm/IR/Instruction.h"
47#include "llvm/IR/Type.h"
53#include <algorithm>
54#include <cassert>
55#include <climits>
56#include <cstdint>
57#include <iterator>
58#include <map>
59#include <string>
60#include <utility>
61#include <vector>
62
63namespace llvm {
64
65class AssumptionCache;
66class CCState;
67class CCValAssign;
68class Constant;
69class FastISel;
70class FunctionLoweringInfo;
71class GlobalValue;
72class Loop;
73class GISelKnownBits;
74class IntrinsicInst;
75class IRBuilderBase;
76struct KnownBits;
77class LegacyDivergenceAnalysis;
78class LLVMContext;
79class MachineBasicBlock;
80class MachineFunction;
81class MachineInstr;
82class MachineJumpTableInfo;
83class MachineLoop;
84class MachineRegisterInfo;
85class MCContext;
86class MCExpr;
87class Module;
88class ProfileSummaryInfo;
89class TargetLibraryInfo;
90class TargetMachine;
91class TargetRegisterClass;
92class TargetRegisterInfo;
93class TargetTransformInfo;
94class Value;
95
96namespace Sched {
97
99 None, // No preference
100 Source, // Follow source order.
101 RegPressure, // Scheduling for lowest register pressure.
102 Hybrid, // Scheduling for both latency and register pressure.
103 ILP, // Scheduling for ILP in low register pressure mode.
104 VLIW, // Scheduling for VLIW targets.
105 Fast, // Fast suboptimal list scheduling
106 Linearize // Linearize DAG, no scheduling
108
109} // end namespace Sched
110
111// MemOp models a memory operation, either memset or memcpy/memmove.
112struct MemOp {
113private:
114 // Shared
115 uint64_t Size;
116 bool DstAlignCanChange; // true if destination alignment can satisfy any
117 // constraint.
118 Align DstAlign; // Specified alignment of the memory operation.
119
120 bool AllowOverlap;
121 // memset only
122 bool IsMemset; // If setthis memory operation is a memset.
123 bool ZeroMemset; // If set clears out memory with zeros.
124 // memcpy only
125 bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register
126 // constant so it does not need to be loaded.
127 Align SrcAlign; // Inferred alignment of the source or default value if the
128 // memory operation does not need to load the value.
129public:
130 static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
131 Align SrcAlign, bool IsVolatile,
132 bool MemcpyStrSrc = false) {
133 MemOp Op;
134 Op.Size = Size;
135 Op.DstAlignCanChange = DstAlignCanChange;
136 Op.DstAlign = DstAlign;
137 Op.AllowOverlap = !IsVolatile;
138 Op.IsMemset = false;
139 Op.ZeroMemset = false;
140 Op.MemcpyStrSrc = MemcpyStrSrc;
141 Op.SrcAlign = SrcAlign;
142 return Op;
143 }
144
145 static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
146 bool IsZeroMemset, bool IsVolatile) {
147 MemOp Op;
148 Op.Size = Size;
149 Op.DstAlignCanChange = DstAlignCanChange;
150 Op.DstAlign = DstAlign;
151 Op.AllowOverlap = !IsVolatile;
152 Op.IsMemset = true;
153 Op.ZeroMemset = IsZeroMemset;
154 Op.MemcpyStrSrc = false;
155 return Op;
156 }
157
158 uint64_t size() const { return Size; }
160 assert(!DstAlignCanChange);
161 return DstAlign;
162 }
163 bool isFixedDstAlign() const { return !DstAlignCanChange; }
164 bool allowOverlap() const { return AllowOverlap; }
165 bool isMemset() const { return IsMemset; }
166 bool isMemcpy() const { return !IsMemset; }
168 return isMemcpy() && !DstAlignCanChange;
169 }
170 bool isZeroMemset() const { return isMemset() && ZeroMemset; }
171 bool isMemcpyStrSrc() const {
172 assert(isMemcpy() && "Must be a memcpy");
173 return MemcpyStrSrc;
174 }
176 assert(isMemcpy() && "Must be a memcpy");
177 return SrcAlign;
178 }
179 bool isSrcAligned(Align AlignCheck) const {
180 return isMemset() || llvm::isAligned(AlignCheck, SrcAlign.value());
181 }
182 bool isDstAligned(Align AlignCheck) const {
183 return DstAlignCanChange || llvm::isAligned(AlignCheck, DstAlign.value());
184 }
185 bool isAligned(Align AlignCheck) const {
186 return isSrcAligned(AlignCheck) && isDstAligned(AlignCheck);
187 }
188};
189
190/// This base class for TargetLowering contains the SelectionDAG-independent
191/// parts that can be used from the rest of CodeGen.
193public:
194 /// This enum indicates whether operations are valid for a target, and if not,
195 /// what action should be used to make them valid.
196 enum LegalizeAction : uint8_t {
197 Legal, // The target natively supports this operation.
198 Promote, // This operation should be executed in a larger type.
199 Expand, // Try to expand this to other ops, otherwise use a libcall.
200 LibCall, // Don't try to expand this to other ops, always use a libcall.
201 Custom // Use the LowerOperation hook to implement custom lowering.
202 };
203
204 /// This enum indicates whether a types are legal for a target, and if not,
205 /// what action should be used to make them valid.
206 enum LegalizeTypeAction : uint8_t {
207 TypeLegal, // The target natively supports this type.
208 TypePromoteInteger, // Replace this integer with a larger one.
209 TypeExpandInteger, // Split this integer into two of half the size.
210 TypeSoftenFloat, // Convert this float to a same size integer type.
211 TypeExpandFloat, // Split this float into two of half the size.
212 TypeScalarizeVector, // Replace this one-element vector with its element.
213 TypeSplitVector, // Split this vector into two of half the size.
214 TypeWidenVector, // This vector should be widened into a larger vector.
215 TypePromoteFloat, // Replace this float with a larger one.
216 TypeSoftPromoteHalf, // Soften half to i16 and use float to do arithmetic.
217 TypeScalarizeScalableVector, // This action is explicitly left unimplemented.
218 // While it is theoretically possible to
219 // legalize operations on scalable types with a
220 // loop that handles the vscale * #lanes of the
221 // vector, this is non-trivial at SelectionDAG
222 // level and these types are better to be
223 // widened or promoted.
224 };
225
226 /// LegalizeKind holds the legalization kind that needs to happen to EVT
227 /// in order to type-legalize it.
228 using LegalizeKind = std::pair<LegalizeTypeAction, EVT>;
229
230 /// Enum that describes how the target represents true/false values.
232 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
233 ZeroOrOneBooleanContent, // All bits zero except for bit 0.
234 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
235 };
236
237 /// Enum that describes what type of support for selects the target has.
239 ScalarValSelect, // The target supports scalar selects (ex: cmov).
240 ScalarCondVectorVal, // The target supports selects with a scalar condition
241 // and vector values (ex: cmov).
242 VectorMaskSelect // The target supports vector selects with a vector
243 // mask (ex: x86 blends).
244 };
245
246 /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
247 /// to, if at all. Exists because different targets have different levels of
248 /// support for these atomic instructions, and also have different options
249 /// w.r.t. what they should expand to.
251 None, // Don't expand the instruction.
252 CastToInteger, // Cast the atomic instruction to another type, e.g. from
253 // floating-point to integer type.
254 LLSC, // Expand the instruction into loadlinked/storeconditional; used
255 // by ARM/AArch64.
256 LLOnly, // Expand the (load) instruction into just a load-linked, which has
257 // greater atomic guarantees than a normal load.
258 CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
259 MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
260 BitTestIntrinsic, // Use a target-specific intrinsic for special bit
261 // operations; used by X86.
262 CmpArithIntrinsic,// Use a target-specific intrinsic for special compare
263 // operations; used by X86.
264 Expand, // Generic expansion in terms of other atomic operations.
265
266 // Rewrite to a non-atomic form for use in a known non-preemptible
267 // environment.
269 };
270
271 /// Enum that specifies when a multiplication should be expanded.
272 enum class MulExpansionKind {
273 Always, // Always expand the instruction.
274 OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
275 // or custom.
276 };
277
278 /// Enum that specifies when a float negation is beneficial.
279 enum class NegatibleCost {
280 Cheaper = 0, // Negated expression is cheaper.
281 Neutral = 1, // Negated expression has the same cost.
282 Expensive = 2 // Negated expression is more expensive.
283 };
284
286 public:
287 Value *Val = nullptr;
289 Type *Ty = nullptr;
290 bool IsSExt : 1;
291 bool IsZExt : 1;
292 bool IsInReg : 1;
293 bool IsSRet : 1;
294 bool IsNest : 1;
295 bool IsByVal : 1;
296 bool IsByRef : 1;
297 bool IsInAlloca : 1;
299 bool IsReturned : 1;
300 bool IsSwiftSelf : 1;
301 bool IsSwiftAsync : 1;
302 bool IsSwiftError : 1;
304 MaybeAlign Alignment = std::nullopt;
305 Type *IndirectType = nullptr;
306
312
313 void setAttributes(const CallBase *Call, unsigned ArgIdx);
314 };
315 using ArgListTy = std::vector<ArgListEntry>;
316
317 virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC,
318 ArgListTy &Args) const {};
319
321 switch (Content) {
323 // Extend by adding rubbish bits.
324 return ISD::ANY_EXTEND;
326 // Extend by adding zero bits.
327 return ISD::ZERO_EXTEND;
329 // Extend by copying the sign bit.
330 return ISD::SIGN_EXTEND;
331 }
332 llvm_unreachable("Invalid content kind");
333 }
334
335 explicit TargetLoweringBase(const TargetMachine &TM);
338 virtual ~TargetLoweringBase() = default;
339
340 /// Return true if the target support strict float operation
341 bool isStrictFPEnabled() const {
342 return IsStrictFPEnabled;
343 }
344
345protected:
346 /// Initialize all of the actions to default values.
347 void initActions();
348
349public:
350 const TargetMachine &getTargetMachine() const { return TM; }
351
352 virtual bool useSoftFloat() const { return false; }
353
354 /// Return the pointer type for the given address space, defaults to
355 /// the pointer type from the data layout.
356 /// FIXME: The default needs to be removed once all the code is updated.
357 virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
358 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
359 }
360
361 /// Return the in-memory pointer type for the given address space, defaults to
362 /// the pointer type from the data layout. FIXME: The default needs to be
363 /// removed once all the code is updated.
364 virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const {
365 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
366 }
367
368 /// Return the type for frame index, which is determined by
369 /// the alloca address space specified through the data layout.
371 return getPointerTy(DL, DL.getAllocaAddrSpace());
372 }
373
374 /// Return the type for code pointers, which is determined by the program
375 /// address space specified through the data layout.
377 return getPointerTy(DL, DL.getProgramAddressSpace());
378 }
379
380 /// Return the type for operands of fence.
381 /// TODO: Let fence operands be of i32 type and remove this.
382 virtual MVT getFenceOperandTy(const DataLayout &DL) const {
383 return getPointerTy(DL);
384 }
385
386 /// Return the type to use for a scalar shift opcode, given the shifted amount
387 /// type. Targets should return a legal type if the input type is legal.
388 /// Targets can return a type that is too small if the input type is illegal.
389 virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
390
391 /// Returns the type for the shift amount of a shift opcode. For vectors,
392 /// returns the input type. For scalars, behavior depends on \p LegalTypes. If
393 /// \p LegalTypes is true, calls getScalarShiftAmountTy, otherwise uses
394 /// pointer type. If getScalarShiftAmountTy or pointer type cannot represent
395 /// all possible shift amounts, returns MVT::i32. In general, \p LegalTypes
396 /// should be set to true for calls during type legalization and after type
397 /// legalization has been completed.
398 EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
399 bool LegalTypes = true) const;
400
401 /// Return the preferred type to use for a shift opcode, given the shifted
402 /// amount type is \p ShiftValueTy.
404 virtual LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const {
405 return ShiftValueTy;
406 }
407
408 /// Returns the type to be used for the index operand of:
409 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
410 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
411 virtual MVT getVectorIdxTy(const DataLayout &DL) const {
412 return getPointerTy(DL);
413 }
414
415 /// Returns the type to be used for the EVL/AVL operand of VP nodes:
416 /// ISD::VP_ADD, ISD::VP_SUB, etc. It must be a legal scalar integer type,
417 /// and must be at least as large as i32. The EVL is implicitly zero-extended
418 /// to any larger type.
419 virtual MVT getVPExplicitVectorLengthTy() const { return MVT::i32; }
420
421 /// This callback is used to inspect load/store instructions and add
422 /// target-specific MachineMemOperand flags to them. The default
423 /// implementation does nothing.
426 }
427
430 AssumptionCache *AC = nullptr,
431 const TargetLibraryInfo *LibInfo = nullptr) const;
433 const DataLayout &DL) const;
435 const DataLayout &DL) const;
436
437 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
438 return true;
439 }
440
441 /// Return true if the @llvm.get.active.lane.mask intrinsic should be expanded
442 /// using generic code in SelectionDAGBuilder.
443 virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const {
444 return true;
445 }
446
447 /// Return true if it is profitable to convert a select of FP constants into
448 /// a constant pool load whose address depends on the select condition. The
449 /// parameter may be used to differentiate a select with FP compare from
450 /// integer compare.
451 virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
452 return true;
453 }
454
455 /// Return true if multiple condition registers are available.
457 return HasMultipleConditionRegisters;
458 }
459
460 /// Return true if the target has BitExtract instructions.
461 bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
462
463 /// Return the preferred vector type legalization action.
466 // The default action for one element vectors is to scalarize
468 return TypeScalarizeVector;
469 // The default action for an odd-width vector is to widen.
470 if (!VT.isPow2VectorType())
471 return TypeWidenVector;
472 // The default action for other vectors is to promote
473 return TypePromoteInteger;
474 }
475
476 // Return true if the half type should be passed around as i16, but promoted
477 // to float around arithmetic. The default behavior is to pass around as
478 // float and convert around loads/stores/bitcasts and other places where
479 // the size matters.
480 virtual bool softPromoteHalfType() const { return false; }
481
482 // There are two general methods for expanding a BUILD_VECTOR node:
483 // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
484 // them together.
485 // 2. Build the vector on the stack and then load it.
486 // If this function returns true, then method (1) will be used, subject to
487 // the constraint that all of the necessary shuffles are legal (as determined
488 // by isShuffleMaskLegal). If this function returns false, then method (2) is
489 // always used. The vector type, and the number of defined values, are
490 // provided.
491 virtual bool
493 unsigned DefinedValues) const {
494 return DefinedValues < 3;
495 }
496
497 /// Return true if integer divide is usually cheaper than a sequence of
498 /// several shifts, adds, and multiplies for this target.
499 /// The definition of "cheaper" may depend on whether we're optimizing
500 /// for speed or for size.
501 virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; }
502
503 /// Return true if the target can handle a standalone remainder operation.
504 virtual bool hasStandaloneRem(EVT VT) const {
505 return true;
506 }
507
508 /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
509 virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
510 // Default behavior is to replace SQRT(X) with X*RSQRT(X).
511 return false;
512 }
513
514 /// Reciprocal estimate status values used by the functions below.
518 Enabled = 1
519 };
520
521 /// Return a ReciprocalEstimate enum value for a square root of the given type
522 /// based on the function's attributes. If the operation is not overridden by
523 /// the function's attributes, "Unspecified" is returned and target defaults
524 /// are expected to be used for instruction selection.
526
527 /// Return a ReciprocalEstimate enum value for a division of the given type
528 /// based on the function's attributes. If the operation is not overridden by
529 /// the function's attributes, "Unspecified" is returned and target defaults
530 /// are expected to be used for instruction selection.
532
533 /// Return the refinement step count for a square root of the given type based
534 /// on the function's attributes. If the operation is not overridden by
535 /// the function's attributes, "Unspecified" is returned and target defaults
536 /// are expected to be used for instruction selection.
537 int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
538
539 /// Return the refinement step count for a division of the given type based
540 /// on the function's attributes. If the operation is not overridden by
541 /// the function's attributes, "Unspecified" is returned and target defaults
542 /// are expected to be used for instruction selection.
543 int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
544
545 /// Returns true if target has indicated at least one type should be bypassed.
546 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
547
548 /// Returns map of slow types for division or remainder with corresponding
549 /// fast types
551 return BypassSlowDivWidths;
552 }
553
554 /// Return true only if vscale must be a power of two.
555 virtual bool isVScaleKnownToBeAPowerOfTwo() const { return false; }
556
557 /// Return true if Flow Control is an expensive operation that should be
558 /// avoided.
559 bool isJumpExpensive() const { return JumpIsExpensive; }
560
561 /// Return true if selects are only cheaper than branches if the branch is
562 /// unlikely to be predicted right.
565 }
566
567 virtual bool fallBackToDAGISel(const Instruction &Inst) const {
568 return false;
569 }
570
571 /// Return true if the following transform is beneficial:
572 /// fold (conv (load x)) -> (load (conv*)x)
573 /// On architectures that don't natively support some vector loads
574 /// efficiently, casting the load to a smaller vector of larger types and
575 /// loading is more efficient, however, this can be undone by optimizations in
576 /// dag combiner.
577 virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
578 const SelectionDAG &DAG,
579 const MachineMemOperand &MMO) const;
580
581 /// Return true if the following transform is beneficial:
582 /// (store (y (conv x)), y*)) -> (store x, (x*))
583 virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT,
584 const SelectionDAG &DAG,
585 const MachineMemOperand &MMO) const {
586 // Default to the same logic as loads.
587 return isLoadBitCastBeneficial(StoreVT, BitcastVT, DAG, MMO);
588 }
589
590 /// Return true if it is expected to be cheaper to do a store of a non-zero
591 /// vector constant with the given size and type for the address space than to
592 /// store the individual scalar element constants.
594 unsigned NumElem,
595 unsigned AddrSpace) const {
596 return false;
597 }
598
599 /// Allow store merging for the specified type after legalization in addition
600 /// to before legalization. This may transform stores that do not exist
601 /// earlier (for example, stores created from intrinsics).
602 virtual bool mergeStoresAfterLegalization(EVT MemVT) const {
603 return true;
604 }
605
606 /// Returns if it's reasonable to merge stores to MemVT size.
607 virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
608 const MachineFunction &MF) const {
609 return true;
610 }
611
612 /// Return true if it is cheap to speculate a call to intrinsic cttz.
613 virtual bool isCheapToSpeculateCttz(Type *Ty) const {
614 return false;
615 }
616
617 /// Return true if it is cheap to speculate a call to intrinsic ctlz.
618 virtual bool isCheapToSpeculateCtlz(Type *Ty) const {
619 return false;
620 }
621
622 /// Return true if ctlz instruction is fast.
623 virtual bool isCtlzFast() const {
624 return false;
625 }
626
627 /// Return the maximum number of "x & (x - 1)" operations that can be done
628 /// instead of deferring to a custom CTPOP.
629 virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const {
630 return 1;
631 }
632
633 /// Return true if instruction generated for equality comparison is folded
634 /// with instruction generated for signed comparison.
635 virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; }
636
637 /// Return true if the heuristic to prefer icmp eq zero should be used in code
638 /// gen prepare.
639 virtual bool preferZeroCompareBranch() const { return false; }
640
641 /// Return true if it is safe to transform an integer-domain bitwise operation
642 /// into the equivalent floating-point operation. This should be set to true
643 /// if the target has IEEE-754-compliant fabs/fneg operations for the input
644 /// type.
645 virtual bool hasBitPreservingFPLogic(EVT VT) const {
646 return false;
647 }
648
649 /// Return true if it is cheaper to split the store of a merged int val
650 /// from a pair of smaller values into multiple stores.
651 virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
652 return false;
653 }
654
655 /// Return if the target supports combining a
656 /// chain like:
657 /// \code
658 /// %andResult = and %val1, #mask
659 /// %icmpResult = icmp %andResult, 0
660 /// \endcode
661 /// into a single machine instruction of a form like:
662 /// \code
663 /// cc = test %register, #mask
664 /// \endcode
665 virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
666 return false;
667 }
668
669 /// Use bitwise logic to make pairs of compares more efficient. For example:
670 /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
671 /// This should be true when it takes more than one instruction to lower
672 /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on
673 /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win.
674 virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const {
675 return false;
676 }
677
678 /// Return the preferred operand type if the target has a quick way to compare
679 /// integer values of the given size. Assume that any legal integer type can
680 /// be compared efficiently. Targets may override this to allow illegal wide
681 /// types to return a vector type if there is support to compare that type.
682 virtual MVT hasFastEqualityCompare(unsigned NumBits) const {
683 MVT VT = MVT::getIntegerVT(NumBits);
685 }
686
687 /// Return true if the target should transform:
688 /// (X & Y) == Y ---> (~X & Y) == 0
689 /// (X & Y) != Y ---> (~X & Y) != 0
690 ///
691 /// This may be profitable if the target has a bitwise and-not operation that
692 /// sets comparison flags. A target may want to limit the transformation based
693 /// on the type of Y or if Y is a constant.
694 ///
695 /// Note that the transform will not occur if Y is known to be a power-of-2
696 /// because a mask and compare of a single bit can be handled by inverting the
697 /// predicate, for example:
698 /// (X & 8) == 8 ---> (X & 8) != 0
699 virtual bool hasAndNotCompare(SDValue Y) const {
700 return false;
701 }
702
703 /// Return true if the target has a bitwise and-not operation:
704 /// X = ~A & B
705 /// This can be used to simplify select or other instructions.
706 virtual bool hasAndNot(SDValue X) const {
707 // If the target has the more complex version of this operation, assume that
708 // it has this operation too.
709 return hasAndNotCompare(X);
710 }
711
712 /// Return true if the target has a bit-test instruction:
713 /// (X & (1 << Y)) ==/!= 0
714 /// This knowledge can be used to prevent breaking the pattern,
715 /// or creating it if it could be recognized.
716 virtual bool hasBitTest(SDValue X, SDValue Y) const { return false; }
717
718 /// There are two ways to clear extreme bits (either low or high):
719 /// Mask: x & (-1 << y) (the instcombine canonical form)
720 /// Shifts: x >> y << y
721 /// Return true if the variant with 2 variable shifts is preferred.
722 /// Return false if there is no preference.
724 // By default, let's assume that no one prefers shifts.
725 return false;
726 }
727
728 /// Return true if it is profitable to fold a pair of shifts into a mask.
729 /// This is usually true on most targets. But some targets, like Thumb1,
730 /// have immediate shift instructions, but no immediate "and" instruction;
731 /// this makes the fold unprofitable.
733 CombineLevel Level) const {
734 return true;
735 }
736
737 /// Should we tranform the IR-optimal check for whether given truncation
738 /// down into KeptBits would be truncating or not:
739 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
740 /// Into it's more traditional form:
741 /// ((%x << C) a>> C) dstcond %x
742 /// Return true if we should transform.
743 /// Return false if there is no preference.
745 unsigned KeptBits) const {
746 // By default, let's assume that no one prefers shifts.
747 return false;
748 }
749
750 /// Given the pattern
751 /// (X & (C l>>/<< Y)) ==/!= 0
752 /// return true if it should be transformed into:
753 /// ((X <</l>> Y) & C) ==/!= 0
754 /// WARNING: if 'X' is a constant, the fold may deadlock!
755 /// FIXME: we could avoid passing XC, but we can't use isConstOrConstSplat()
756 /// here because it can end up being not linked in.
759 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
760 SelectionDAG &DAG) const {
761 if (hasBitTest(X, Y)) {
762 // One interesting pattern that we'd want to form is 'bit test':
763 // ((1 << Y) & C) ==/!= 0
764 // But we also need to be careful not to try to reverse that fold.
765
766 // Is this '1 << Y' ?
767 if (OldShiftOpcode == ISD::SHL && CC->isOne())
768 return false; // Keep the 'bit test' pattern.
769
770 // Will it be '1 << Y' after the transform ?
771 if (XC && NewShiftOpcode == ISD::SHL && XC->isOne())
772 return true; // Do form the 'bit test' pattern.
773 }
774
775 // If 'X' is a constant, and we transform, then we will immediately
776 // try to undo the fold, thus causing endless combine loop.
777 // So by default, let's assume everyone prefers the fold
778 // iff 'X' is not a constant.
779 return !XC;
780 }
781
782 /// These two forms are equivalent:
783 /// sub %y, (xor %x, -1)
784 /// add (add %x, 1), %y
785 /// The variant with two add's is IR-canonical.
786 /// Some targets may prefer one to the other.
787 virtual bool preferIncOfAddToSubOfNot(EVT VT) const {
788 // By default, let's assume that everyone prefers the form with two add's.
789 return true;
790 }
791
792 // Return true if the target wants to transform Op(Splat(X)) -> Splat(Op(X))
793 virtual bool preferScalarizeSplat(unsigned Opc) const { return true; }
794
795 /// Return true if the target wants to use the optimization that
796 /// turns ext(promotableInst1(...(promotableInstN(load)))) into
797 /// promotedInst1(...(promotedInstN(ext(load)))).
799
800 /// Return true if the target can combine store(extractelement VectorTy,
801 /// Idx).
802 /// \p Cost[out] gives the cost of that transformation when this is true.
803 virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
804 unsigned &Cost) const {
805 return false;
806 }
807
808 /// Return true if inserting a scalar into a variable element of an undef
809 /// vector is more efficiently handled by splatting the scalar instead.
810 virtual bool shouldSplatInsEltVarIndex(EVT) const {
811 return false;
812 }
813
814 /// Return true if target always benefits from combining into FMA for a
815 /// given value type. This must typically return false on targets where FMA
816 /// takes more cycles to execute than FADD.
817 virtual bool enableAggressiveFMAFusion(EVT VT) const { return false; }
818
819 /// Return true if target always benefits from combining into FMA for a
820 /// given value type. This must typically return false on targets where FMA
821 /// takes more cycles to execute than FADD.
822 virtual bool enableAggressiveFMAFusion(LLT Ty) const { return false; }
823
824 /// Return the ValueType of the result of SETCC operations.
825 virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
826 EVT VT) const;
827
828 /// Return the ValueType for comparison libcalls. Comparison libcalls include
829 /// floating point comparison calls, and Ordered/Unordered check calls on
830 /// floating point numbers.
831 virtual
833
834 /// For targets without i1 registers, this gives the nature of the high-bits
835 /// of boolean values held in types wider than i1.
836 ///
837 /// "Boolean values" are special true/false values produced by nodes like
838 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
839 /// Not to be confused with general values promoted from i1. Some cpus
840 /// distinguish between vectors of boolean and scalars; the isVec parameter
841 /// selects between the two kinds. For example on X86 a scalar boolean should
842 /// be zero extended from i1, while the elements of a vector of booleans
843 /// should be sign extended from i1.
844 ///
845 /// Some cpus also treat floating point types the same way as they treat
846 /// vectors instead of the way they treat scalars.
847 BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
848 if (isVec)
849 return BooleanVectorContents;
850 return isFloat ? BooleanFloatContents : BooleanContents;
851 }
852
854 return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
855 }
856
857 /// Promote the given target boolean to a target boolean of the given type.
858 /// A target boolean is an integer value, not necessarily of type i1, the bits
859 /// of which conform to getBooleanContents.
860 ///
861 /// ValVT is the type of values that produced the boolean.
863 EVT ValVT) const {
864 SDLoc dl(Bool);
865 EVT BoolVT =
866 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ValVT);
868 return DAG.getNode(ExtendCode, dl, BoolVT, Bool);
869 }
870
871 /// Return target scheduling preference.
873 return SchedPreferenceInfo;
874 }
875
876 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
877 /// for different nodes. This function returns the preference (or none) for
878 /// the given node.
880 return Sched::None;
881 }
882
883 /// Return the register class that should be used for the specified value
884 /// type.
885 virtual const TargetRegisterClass *getRegClassFor(MVT VT, bool isDivergent = false) const {
886 (void)isDivergent;
887 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
888 assert(RC && "This value type is not natively supported!");
889 return RC;
890 }
891
892 /// Allows target to decide about the register class of the
893 /// specific value that is live outside the defining block.
894 /// Returns true if the value needs uniform register class.
896 const Value *) const {
897 return false;
898 }
899
900 /// Return the 'representative' register class for the specified value
901 /// type.
902 ///
903 /// The 'representative' register class is the largest legal super-reg
904 /// register class for the register class of the value type. For example, on
905 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
906 /// register class is GR64 on x86_64.
907 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
908 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
909 return RC;
910 }
911
912 /// Return the cost of the 'representative' register class for the specified
913 /// value type.
914 virtual uint8_t getRepRegClassCostFor(MVT VT) const {
915 return RepRegClassCostForVT[VT.SimpleTy];
916 }
917
918 /// Return the preferred strategy to legalize tihs SHIFT instruction, with
919 /// \p ExpansionFactor being the recursion depth - how many expansion needed.
924 };
927 unsigned ExpansionFactor) const {
928 if (ExpansionFactor == 1)
931 }
932
933 /// Return true if the target has native support for the specified value type.
934 /// This means that it has a register that directly holds it without
935 /// promotions or expansions.
936 bool isTypeLegal(EVT VT) const {
937 assert(!VT.isSimple() ||
938 (unsigned)VT.getSimpleVT().SimpleTy < std::size(RegClassForVT));
939 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
940 }
941
943 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
944 /// that indicates how instruction selection should deal with the type.
945 LegalizeTypeAction ValueTypeActions[MVT::VALUETYPE_SIZE];
946
947 public:
949 std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
950 TypeLegal);
951 }
952
954 return ValueTypeActions[VT.SimpleTy];
955 }
956
958 ValueTypeActions[VT.SimpleTy] = Action;
959 }
960 };
961
963 return ValueTypeActions;
964 }
965
966 /// Return pair that represents the legalization kind (first) that needs to
967 /// happen to EVT (second) in order to type-legalize it.
968 ///
969 /// First: how we should legalize values of this type, either it is already
970 /// legal (return 'Legal') or we need to promote it to a larger type (return
971 /// 'Promote'), or we need to expand it into multiple registers of smaller
972 /// integer type (return 'Expand'). 'Custom' is not an option.
973 ///
974 /// Second: for types supported by the target, this is an identity function.
975 /// For types that must be promoted to larger types, this returns the larger
976 /// type to promote to. For integer types that are larger than the largest
977 /// integer register, this contains one step in the expansion to get to the
978 /// smaller register. For illegal floating point types, this returns the
979 /// integer type to transform to.
980 LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
981
982 /// Return how we should legalize values of this type, either it is already
983 /// legal (return 'Legal') or we need to promote it to a larger type (return
984 /// 'Promote'), or we need to expand it into multiple registers of smaller
985 /// integer type (return 'Expand'). 'Custom' is not an option.
987 return getTypeConversion(Context, VT).first;
988 }
990 return ValueTypeActions.getTypeAction(VT);
991 }
992
993 /// For types supported by the target, this is an identity function. For
994 /// types that must be promoted to larger types, this returns the larger type
995 /// to promote to. For integer types that are larger than the largest integer
996 /// register, this contains one step in the expansion to get to the smaller
997 /// register. For illegal floating point types, this returns the integer type
998 /// to transform to.
999 virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
1000 return getTypeConversion(Context, VT).second;
1001 }
1002
1003 /// For types supported by the target, this is an identity function. For
1004 /// types that must be expanded (i.e. integer types that are larger than the
1005 /// largest integer register or illegal floating point types), this returns
1006 /// the largest legal type it will be expanded to.
1007 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
1008 assert(!VT.isVector());
1009 while (true) {
1010 switch (getTypeAction(Context, VT)) {
1011 case TypeLegal:
1012 return VT;
1013 case TypeExpandInteger:
1014 VT = getTypeToTransformTo(Context, VT);
1015 break;
1016 default:
1017 llvm_unreachable("Type is not legal nor is it to be expanded!");
1018 }
1019 }
1020 }
1021
1022 /// Vector types are broken down into some number of legal first class types.
1023 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
1024 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
1025 /// turns into 4 EVT::i32 values with both PPC and X86.
1026 ///
1027 /// This method returns the number of registers needed, and the VT for each
1028 /// register. It also returns the VT and quantity of the intermediate values
1029 /// before they are promoted/expanded.
1030 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
1031 EVT &IntermediateVT,
1032 unsigned &NumIntermediates,
1033 MVT &RegisterVT) const;
1034
1035 /// Certain targets such as MIPS require that some types such as vectors are
1036 /// always broken down into scalars in some contexts. This occurs even if the
1037 /// vector type is legal.
1039 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
1040 unsigned &NumIntermediates, MVT &RegisterVT) const {
1041 return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
1042 RegisterVT);
1043 }
1044
1046 unsigned opc = 0; // target opcode
1047 EVT memVT; // memory VT
1048
1049 // value representing memory location
1051
1052 // Fallback address space for use if ptrVal is nullptr. std::nullopt means
1053 // unknown address space.
1054 std::optional<unsigned> fallbackAddressSpace;
1055
1056 int offset = 0; // offset off of ptrVal
1057 uint64_t size = 0; // the size of the memory location
1058 // (taken from memVT if zero)
1059 MaybeAlign align = Align(1); // alignment
1060
1062 IntrinsicInfo() = default;
1063 };
1064
1065 /// Given an intrinsic, checks if on the target the intrinsic will need to map
1066 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
1067 /// true and store the intrinsic information into the IntrinsicInfo that was
1068 /// passed to the function.
1071 unsigned /*Intrinsic*/) const {
1072 return false;
1073 }
1074
1075 /// Returns true if the target can instruction select the specified FP
1076 /// immediate natively. If false, the legalizer will materialize the FP
1077 /// immediate as a load from a constant pool.
1078 virtual bool isFPImmLegal(const APFloat & /*Imm*/, EVT /*VT*/,
1079 bool ForCodeSize = false) const {
1080 return false;
1081 }
1082
1083 /// Targets can use this to indicate that they only support *some*
1084 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
1085 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
1086 /// legal.
1087 virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const {
1088 return true;
1089 }
1090
1091 /// Returns true if the operation can trap for the value type.
1092 ///
1093 /// VT must be a legal type. By default, we optimistically assume most
1094 /// operations don't trap except for integer divide and remainder.
1095 virtual bool canOpTrap(unsigned Op, EVT VT) const;
1096
1097 /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
1098 /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
1099 /// constant pool entry.
1101 EVT /*VT*/) const {
1102 return false;
1103 }
1104
1105 /// How to legalize this custom operation?
1107 return Legal;
1108 }
1109
1110 /// Return how this operation should be treated: either it is legal, needs to
1111 /// be promoted to a larger size, needs to be expanded to some other code
1112 /// sequence, or the target has a custom expander for it.
1113 LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
1114 if (VT.isExtended()) return Expand;
1115 // If a target-specific SDNode requires legalization, require the target
1116 // to provide custom legalization for it.
1117 if (Op >= std::size(OpActions[0]))
1118 return Custom;
1119 return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
1120 }
1121
1122 /// Custom method defined by each target to indicate if an operation which
1123 /// may require a scale is supported natively by the target.
1124 /// If not, the operation is illegal.
1125 virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT,
1126 unsigned Scale) const {
1127 return false;
1128 }
1129
1130 /// Some fixed point operations may be natively supported by the target but
1131 /// only for specific scales. This method allows for checking
1132 /// if the width is supported by the target for a given operation that may
1133 /// depend on scale.
1135 unsigned Scale) const {
1136 auto Action = getOperationAction(Op, VT);
1137 if (Action != Legal)
1138 return Action;
1139
1140 // This operation is supported in this type but may only work on specific
1141 // scales.
1142 bool Supported;
1143 switch (Op) {
1144 default:
1145 llvm_unreachable("Unexpected fixed point operation.");
1146 case ISD::SMULFIX:
1147 case ISD::SMULFIXSAT:
1148 case ISD::UMULFIX:
1149 case ISD::UMULFIXSAT:
1150 case ISD::SDIVFIX:
1151 case ISD::SDIVFIXSAT:
1152 case ISD::UDIVFIX:
1153 case ISD::UDIVFIXSAT:
1154 Supported = isSupportedFixedPointOperation(Op, VT, Scale);
1155 break;
1156 }
1157
1158 return Supported ? Action : Expand;
1159 }
1160
1161 // If Op is a strict floating-point operation, return the result
1162 // of getOperationAction for the equivalent non-strict operation.
1164 unsigned EqOpc;
1165 switch (Op) {
1166 default: llvm_unreachable("Unexpected FP pseudo-opcode");
1167#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1168 case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break;
1169#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1170 case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break;
1171#include "llvm/IR/ConstrainedOps.def"
1172 }
1173
1174 return getOperationAction(EqOpc, VT);
1175 }
1176
1177 /// Return true if the specified operation is legal on this target or can be
1178 /// made legal with custom lowering. This is used to help guide high-level
1179 /// lowering decisions. LegalOnly is an optional convenience for code paths
1180 /// traversed pre and post legalisation.
1181 bool isOperationLegalOrCustom(unsigned Op, EVT VT,
1182 bool LegalOnly = false) const {
1183 if (LegalOnly)
1184 return isOperationLegal(Op, VT);
1185
1186 return (VT == MVT::Other || isTypeLegal(VT)) &&
1187 (getOperationAction(Op, VT) == Legal ||
1188 getOperationAction(Op, VT) == Custom);
1189 }
1190
1191 /// Return true if the specified operation is legal on this target or can be
1192 /// made legal using promotion. This is used to help guide high-level lowering
1193 /// decisions. LegalOnly is an optional convenience for code paths traversed
1194 /// pre and post legalisation.
1195 bool isOperationLegalOrPromote(unsigned Op, EVT VT,
1196 bool LegalOnly = false) const {
1197 if (LegalOnly)
1198 return isOperationLegal(Op, VT);
1199
1200 return (VT == MVT::Other || isTypeLegal(VT)) &&
1201 (getOperationAction(Op, VT) == Legal ||
1202 getOperationAction(Op, VT) == Promote);
1203 }
1204
1205 /// Return true if the specified operation is legal on this target or can be
1206 /// made legal with custom lowering or using promotion. This is used to help
1207 /// guide high-level lowering decisions. LegalOnly is an optional convenience
1208 /// for code paths traversed pre and post legalisation.
1210 bool LegalOnly = false) const {
1211 if (LegalOnly)
1212 return isOperationLegal(Op, VT);
1213
1214 return (VT == MVT::Other || isTypeLegal(VT)) &&
1215 (getOperationAction(Op, VT) == Legal ||
1216 getOperationAction(Op, VT) == Custom ||
1217 getOperationAction(Op, VT) == Promote);
1218 }
1219
1220 /// Return true if the operation uses custom lowering, regardless of whether
1221 /// the type is legal or not.
1222 bool isOperationCustom(unsigned Op, EVT VT) const {
1223 return getOperationAction(Op, VT) == Custom;
1224 }
1225
1226 /// Return true if lowering to a jump table is allowed.
1227 virtual bool areJTsAllowed(const Function *Fn) const {
1228 if (Fn->getFnAttribute("no-jump-tables").getValueAsBool())
1229 return false;
1230
1233 }
1234
1235 /// Check whether the range [Low,High] fits in a machine word.
1236 bool rangeFitsInWord(const APInt &Low, const APInt &High,
1237 const DataLayout &DL) const {
1238 // FIXME: Using the pointer type doesn't seem ideal.
1239 uint64_t BW = DL.getIndexSizeInBits(0u);
1240 uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
1241 return Range <= BW;
1242 }
1243
1244 /// Return true if lowering to a jump table is suitable for a set of case
1245 /// clusters which may contain \p NumCases cases, \p Range range of values.
1246 virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
1247 uint64_t Range, ProfileSummaryInfo *PSI,
1248 BlockFrequencyInfo *BFI) const;
1249
1250 /// Returns preferred type for switch condition.
1252 EVT ConditionVT) const;
1253
1254 /// Return true if lowering to a bit test is suitable for a set of case
1255 /// clusters which contains \p NumDests unique destinations, \p Low and
1256 /// \p High as its lowest and highest case values, and expects \p NumCmps
1257 /// case value comparisons. Check if the number of destinations, comparison
1258 /// metric, and range are all suitable.
1259 bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps,
1260 const APInt &Low, const APInt &High,
1261 const DataLayout &DL) const {
1262 // FIXME: I don't think NumCmps is the correct metric: a single case and a
1263 // range of cases both require only one branch to lower. Just looking at the
1264 // number of clusters and destinations should be enough to decide whether to
1265 // build bit tests.
1266
1267 // To lower a range with bit tests, the range must fit the bitwidth of a
1268 // machine word.
1269 if (!rangeFitsInWord(Low, High, DL))
1270 return false;
1271
1272 // Decide whether it's profitable to lower this range with bit tests. Each
1273 // destination requires a bit test and branch, and there is an overall range
1274 // check branch. For a small number of clusters, separate comparisons might
1275 // be cheaper, and for many destinations, splitting the range might be
1276 // better.
1277 return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
1278 (NumDests == 3 && NumCmps >= 6);
1279 }
1280
1281 /// Return true if the specified operation is illegal on this target or
1282 /// unlikely to be made legal with custom lowering. This is used to help guide
1283 /// high-level lowering decisions.
1284 bool isOperationExpand(unsigned Op, EVT VT) const {
1285 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
1286 }
1287
1288 /// Return true if the specified operation is legal on this target.
1289 bool isOperationLegal(unsigned Op, EVT VT) const {
1290 return (VT == MVT::Other || isTypeLegal(VT)) &&
1291 getOperationAction(Op, VT) == Legal;
1292 }
1293
1294 /// Return how this load with extension should be treated: either it is legal,
1295 /// needs to be promoted to a larger size, needs to be expanded to some other
1296 /// code sequence, or the target has a custom expander for it.
1297 LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
1298 EVT MemVT) const {
1299 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1300 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1301 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1303 MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!");
1304 unsigned Shift = 4 * ExtType;
1305 return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
1306 }
1307
1308 /// Return true if the specified load with extension is legal on this target.
1309 bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1310 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
1311 }
1312
1313 /// Return true if the specified load with extension is legal or custom
1314 /// on this target.
1315 bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1316 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
1317 getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
1318 }
1319
1320 /// Return how this store with truncation should be treated: either it is
1321 /// legal, needs to be promoted to a larger size, needs to be expanded to some
1322 /// other code sequence, or the target has a custom expander for it.
1324 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1325 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1326 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1328 "Table isn't big enough!");
1329 return TruncStoreActions[ValI][MemI];
1330 }
1331
1332 /// Return true if the specified store with truncation is legal on this
1333 /// target.
1334 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
1335 return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal;
1336 }
1337
1338 /// Return true if the specified store with truncation has solution on this
1339 /// target.
1340 bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const {
1341 return isTypeLegal(ValVT) &&
1342 (getTruncStoreAction(ValVT, MemVT) == Legal ||
1343 getTruncStoreAction(ValVT, MemVT) == Custom);
1344 }
1345
1346 virtual bool canCombineTruncStore(EVT ValVT, EVT MemVT,
1347 bool LegalOnly) const {
1348 if (LegalOnly)
1349 return isTruncStoreLegal(ValVT, MemVT);
1350
1351 return isTruncStoreLegalOrCustom(ValVT, MemVT);
1352 }
1353
1354 /// Return how the indexed load should be treated: either it is legal, needs
1355 /// to be promoted to a larger size, needs to be expanded to some other code
1356 /// sequence, or the target has a custom expander for it.
1357 LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
1358 return getIndexedModeAction(IdxMode, VT, IMAB_Load);
1359 }
1360
1361 /// Return true if the specified indexed load is legal on this target.
1362 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
1363 return VT.isSimple() &&
1364 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1365 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1366 }
1367
1368 /// Return how the indexed store should be treated: either it is legal, needs
1369 /// to be promoted to a larger size, needs to be expanded to some other code
1370 /// sequence, or the target has a custom expander for it.
1371 LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
1372 return getIndexedModeAction(IdxMode, VT, IMAB_Store);
1373 }
1374
1375 /// Return true if the specified indexed load is legal on this target.
1376 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
1377 return VT.isSimple() &&
1378 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1379 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1380 }
1381
1382 /// Return how the indexed load should be treated: either it is legal, needs
1383 /// to be promoted to a larger size, needs to be expanded to some other code
1384 /// sequence, or the target has a custom expander for it.
1385 LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const {
1386 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad);
1387 }
1388
1389 /// Return true if the specified indexed load is legal on this target.
1390 bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const {
1391 return VT.isSimple() &&
1392 (getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1394 }
1395
1396 /// Return how the indexed store should be treated: either it is legal, needs
1397 /// to be promoted to a larger size, needs to be expanded to some other code
1398 /// sequence, or the target has a custom expander for it.
1399 LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const {
1400 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore);
1401 }
1402
1403 /// Return true if the specified indexed load is legal on this target.
1404 bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const {
1405 return VT.isSimple() &&
1406 (getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1408 }
1409
1410 /// Returns true if the index type for a masked gather/scatter requires
1411 /// extending
1412 virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const { return false; }
1413
1414 // Returns true if VT is a legal index type for masked gathers/scatters
1415 // on this target
1416 virtual bool shouldRemoveExtendFromGSIndex(EVT IndexVT, EVT DataVT) const {
1417 return false;
1418 }
1419
1420 // Return true if the target supports a scatter/gather instruction with
1421 // indices which are scaled by the particular value. Note that all targets
1422 // must by definition support scale of 1.
1424 uint64_t ElemSize) const {
1425 // MGATHER/MSCATTER are only required to support scaling by one or by the
1426 // element size.
1427 if (Scale != ElemSize && Scale != 1)
1428 return false;
1429 return true;
1430 }
1431
1432 /// Return how the condition code should be treated: either it is legal, needs
1433 /// to be expanded to some other code sequence, or the target has a custom
1434 /// expander for it.
1437 assert((unsigned)CC < std::size(CondCodeActions) &&
1438 ((unsigned)VT.SimpleTy >> 3) < std::size(CondCodeActions[0]) &&
1439 "Table isn't big enough!");
1440 // See setCondCodeAction for how this is encoded.
1441 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
1442 uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
1443 LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
1444 assert(Action != Promote && "Can't promote condition code!");
1445 return Action;
1446 }
1447
1448 /// Return true if the specified condition code is legal on this target.
1450 return getCondCodeAction(CC, VT) == Legal;
1451 }
1452
1453 /// Return true if the specified condition code is legal or custom on this
1454 /// target.
1456 return getCondCodeAction(CC, VT) == Legal ||
1457 getCondCodeAction(CC, VT) == Custom;
1458 }
1459
1460 /// If the action for this operation is to promote, this method returns the
1461 /// ValueType to promote to.
1462 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
1463 assert(getOperationAction(Op, VT) == Promote &&
1464 "This operation isn't promoted!");
1465
1466 // See if this has an explicit type specified.
1467 std::map<std::pair<unsigned, MVT::SimpleValueType>,
1469 PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
1470 if (PTTI != PromoteToType.end()) return PTTI->second;
1471
1472 assert((VT.isInteger() || VT.isFloatingPoint()) &&
1473 "Cannot autopromote this type, add it with AddPromotedToType.");
1474
1475 MVT NVT = VT;
1476 do {
1477 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
1478 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
1479 "Didn't find type to promote to!");
1480 } while (!isTypeLegal(NVT) ||
1481 getOperationAction(Op, NVT) == Promote);
1482 return NVT;
1483 }
1484
1486 bool AllowUnknown = false) const {
1487 return getValueType(DL, Ty, AllowUnknown);
1488 }
1489
1490 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
1491 /// operations except for the pointer size. If AllowUnknown is true, this
1492 /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
1493 /// otherwise it will assert.
1495 bool AllowUnknown = false) const {
1496 // Lower scalar pointers to native pointer types.
1497 if (auto *PTy = dyn_cast<PointerType>(Ty))
1498 return getPointerTy(DL, PTy->getAddressSpace());
1499
1500 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1501 Type *EltTy = VTy->getElementType();
1502 // Lower vectors of pointers to native pointer types.
1503 if (auto *PTy = dyn_cast<PointerType>(EltTy)) {
1504 EVT PointerTy(getPointerTy(DL, PTy->getAddressSpace()));
1505 EltTy = PointerTy.getTypeForEVT(Ty->getContext());
1506 }
1507 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false),
1508 VTy->getElementCount());
1509 }
1510
1511 return EVT::getEVT(Ty, AllowUnknown);
1512 }
1513
1515 bool AllowUnknown = false) const {
1516 // Lower scalar pointers to native pointer types.
1517 if (PointerType *PTy = dyn_cast<PointerType>(Ty))
1518 return getPointerMemTy(DL, PTy->getAddressSpace());
1519 else if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1520 Type *Elm = VTy->getElementType();
1521 if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
1522 EVT PointerTy(getPointerMemTy(DL, PT->getAddressSpace()));
1523 Elm = PointerTy.getTypeForEVT(Ty->getContext());
1524 }
1525 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
1526 VTy->getElementCount());
1527 }
1528
1529 return getValueType(DL, Ty, AllowUnknown);
1530 }
1531
1532
1533 /// Return the MVT corresponding to this LLVM type. See getValueType.
1535 bool AllowUnknown = false) const {
1536 return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
1537 }
1538
1539 /// Return the desired alignment for ByVal or InAlloca aggregate function
1540 /// arguments in the caller parameter area. This is the actual alignment, not
1541 /// its logarithm.
1542 virtual uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
1543
1544 /// Return the type of registers that this ValueType will eventually require.
1546 assert((unsigned)VT.SimpleTy < std::size(RegisterTypeForVT));
1547 return RegisterTypeForVT[VT.SimpleTy];
1548 }
1549
1550 /// Return the type of registers that this ValueType will eventually require.
1551 MVT getRegisterType(LLVMContext &Context, EVT VT) const {
1552 if (VT.isSimple()) {
1553 assert((unsigned)VT.getSimpleVT().SimpleTy <
1554 std::size(RegisterTypeForVT));
1555 return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
1556 }
1557 if (VT.isVector()) {
1558 EVT VT1;
1559 MVT RegisterVT;
1560 unsigned NumIntermediates;
1561 (void)getVectorTypeBreakdown(Context, VT, VT1,
1562 NumIntermediates, RegisterVT);
1563 return RegisterVT;
1564 }
1565 if (VT.isInteger()) {
1567 }
1568 llvm_unreachable("Unsupported extended type!");
1569 }
1570
1571 /// Return the number of registers that this ValueType will eventually
1572 /// require.
1573 ///
1574 /// This is one for any types promoted to live in larger registers, but may be
1575 /// more than one for types (like i64) that are split into pieces. For types
1576 /// like i140, which are first promoted then expanded, it is the number of
1577 /// registers needed to hold all the bits of the original type. For an i140
1578 /// on a 32 bit machine this means 5 registers.
1579 ///
1580 /// RegisterVT may be passed as a way to override the default settings, for
1581 /// instance with i128 inline assembly operands on SystemZ.
1582 virtual unsigned
1584 std::optional<MVT> RegisterVT = std::nullopt) const {
1585 if (VT.isSimple()) {
1586 assert((unsigned)VT.getSimpleVT().SimpleTy <
1587 std::size(NumRegistersForVT));
1588 return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
1589 }
1590 if (VT.isVector()) {
1591 EVT VT1;
1592 MVT VT2;
1593 unsigned NumIntermediates;
1594 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
1595 }
1596 if (VT.isInteger()) {
1597 unsigned BitWidth = VT.getSizeInBits();
1598 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
1599 return (BitWidth + RegWidth - 1) / RegWidth;
1600 }
1601 llvm_unreachable("Unsupported extended type!");
1602 }
1603
1604 /// Certain combinations of ABIs, Targets and features require that types
1605 /// are legal for some operations and not for other operations.
1606 /// For MIPS all vector types must be passed through the integer register set.
1608 CallingConv::ID CC, EVT VT) const {
1609 return getRegisterType(Context, VT);
1610 }
1611
1612 /// Certain targets require unusual breakdowns of certain types. For MIPS,
1613 /// this occurs when a vector type is used, as vector are passed through the
1614 /// integer register set.
1617 EVT VT) const {
1618 return getNumRegisters(Context, VT);
1619 }
1620
1621 /// Certain targets have context sensitive alignment requirements, where one
1622 /// type has the alignment requirement of another type.
1624 const DataLayout &DL) const {
1625 return DL.getABITypeAlign(ArgTy);
1626 }
1627
1628 /// If true, then instruction selection should seek to shrink the FP constant
1629 /// of the specified type to a smaller type in order to save space and / or
1630 /// reduce runtime.
1631 virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
1632
1633 /// Return true if it is profitable to reduce a load to a smaller type.
1634 /// Example: (i16 (trunc (i32 (load x))) -> i16 load x
1636 EVT NewVT) const {
1637 // By default, assume that it is cheaper to extract a subvector from a wide
1638 // vector load rather than creating multiple narrow vector loads.
1639 if (NewVT.isVector() && !Load->hasOneUse())
1640 return false;
1641
1642 return true;
1643 }
1644
1645 /// When splitting a value of the specified type into parts, does the Lo
1646 /// or Hi part come first? This usually follows the endianness, except
1647 /// for ppcf128, where the Hi part always comes first.
1649 return DL.isBigEndian() || VT == MVT::ppcf128;
1650 }
1651
1652 /// If true, the target has custom DAG combine transformations that it can
1653 /// perform for the specified node.
1655 assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray));
1656 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
1657 }
1658
1661 }
1662
1663 /// Returns the size of the platform's va_list object.
1664 virtual unsigned getVaListSizeInBits(const DataLayout &DL) const {
1665 return getPointerTy(DL).getSizeInBits();
1666 }
1667
1668 /// Get maximum # of store operations permitted for llvm.memset
1669 ///
1670 /// This function returns the maximum number of store operations permitted
1671 /// to replace a call to llvm.memset. The value is set by the target at the
1672 /// performance threshold for such a replacement. If OptSize is true,
1673 /// return the limit for functions that have OptSize attribute.
1674 unsigned getMaxStoresPerMemset(bool OptSize) const {
1676 }
1677
1678 /// Get maximum # of store operations permitted for llvm.memcpy
1679 ///
1680 /// This function returns the maximum number of store operations permitted
1681 /// to replace a call to llvm.memcpy. The value is set by the target at the
1682 /// performance threshold for such a replacement. If OptSize is true,
1683 /// return the limit for functions that have OptSize attribute.
1684 unsigned getMaxStoresPerMemcpy(bool OptSize) const {
1686 }
1687
1688 /// \brief Get maximum # of store operations to be glued together
1689 ///
1690 /// This function returns the maximum number of store operations permitted
1691 /// to glue together during lowering of llvm.memcpy. The value is set by
1692 // the target at the performance threshold for such a replacement.
1693 virtual unsigned getMaxGluedStoresPerMemcpy() const {
1695 }
1696
1697 /// Get maximum # of load operations permitted for memcmp
1698 ///
1699 /// This function returns the maximum number of load operations permitted
1700 /// to replace a call to memcmp. The value is set by the target at the
1701 /// performance threshold for such a replacement. If OptSize is true,
1702 /// return the limit for functions that have OptSize attribute.
1703 unsigned getMaxExpandSizeMemcmp(bool OptSize) const {
1705 }
1706
1707 /// Get maximum # of store operations permitted for llvm.memmove
1708 ///
1709 /// This function returns the maximum number of store operations permitted
1710 /// to replace a call to llvm.memmove. The value is set by the target at the
1711 /// performance threshold for such a replacement. If OptSize is true,
1712 /// return the limit for functions that have OptSize attribute.
1713 unsigned getMaxStoresPerMemmove(bool OptSize) const {
1715 }
1716
1717 /// Determine if the target supports unaligned memory accesses.
1718 ///
1719 /// This function returns true if the target allows unaligned memory accesses
1720 /// of the specified type in the given address space. If true, it also returns
1721 /// a relative speed of the unaligned memory access in the last argument by
1722 /// reference. The higher the speed number the faster the operation comparing
1723 /// to a number returned by another such call. This is used, for example, in
1724 /// situations where an array copy/move/set is converted to a sequence of
1725 /// store operations. Its use helps to ensure that such replacements don't
1726 /// generate code that causes an alignment error (trap) on the target machine.
1728 EVT, unsigned AddrSpace = 0, Align Alignment = Align(1),
1730 unsigned * /*Fast*/ = nullptr) const {
1731 return false;
1732 }
1733
1734 /// LLT handling variant.
1736 LLT, unsigned AddrSpace = 0, Align Alignment = Align(1),
1738 unsigned * /*Fast*/ = nullptr) const {
1739 return false;
1740 }
1741
1742 /// This function returns true if the memory access is aligned or if the
1743 /// target allows this specific unaligned memory access. If the access is
1744 /// allowed, the optional final parameter returns a relative speed of the
1745 /// access (as defined by the target).
1747 LLVMContext &Context, const DataLayout &DL, EVT VT,
1748 unsigned AddrSpace = 0, Align Alignment = Align(1),
1750 unsigned *Fast = nullptr) const;
1751
1752 /// Return true if the memory access of this type is aligned or if the target
1753 /// allows this specific unaligned access for the given MachineMemOperand.
1754 /// If the access is allowed, the optional final parameter returns a relative
1755 /// speed of the access (as defined by the target).
1757 const DataLayout &DL, EVT VT,
1758 const MachineMemOperand &MMO,
1759 unsigned *Fast = nullptr) const;
1760
1761 /// Return true if the target supports a memory access of this type for the
1762 /// given address space and alignment. If the access is allowed, the optional
1763 /// final parameter returns the relative speed of the access (as defined by
1764 /// the target).
1765 virtual bool
1766 allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1767 unsigned AddrSpace = 0, Align Alignment = Align(1),
1769 unsigned *Fast = nullptr) const;
1770
1771 /// Return true if the target supports a memory access of this type for the
1772 /// given MachineMemOperand. If the access is allowed, the optional
1773 /// final parameter returns the relative access speed (as defined by the
1774 /// target).
1775 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1776 const MachineMemOperand &MMO,
1777 unsigned *Fast = nullptr) const;
1778
1779 /// LLT handling variant.
1780 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, LLT Ty,
1781 const MachineMemOperand &MMO,
1782 unsigned *Fast = nullptr) const;
1783
1784 /// Returns the target specific optimal type for load and store operations as
1785 /// a result of memset, memcpy, and memmove lowering.
1786 /// It returns EVT::Other if the type should be determined using generic
1787 /// target-independent logic.
1788 virtual EVT
1790 const AttributeList & /*FuncAttributes*/) const {
1791 return MVT::Other;
1792 }
1793
1794 /// LLT returning variant.
1795 virtual LLT
1797 const AttributeList & /*FuncAttributes*/) const {
1798 return LLT();
1799 }
1800
1801 /// Returns true if it's safe to use load / store of the specified type to
1802 /// expand memcpy / memset inline.
1803 ///
1804 /// This is mostly true for all types except for some special cases. For
1805 /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
1806 /// fstpl which also does type conversion. Note the specified type doesn't
1807 /// have to be legal as the hook is used before type legalization.
1808 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
1809
1810 /// Return lower limit for number of blocks in a jump table.
1811 virtual unsigned getMinimumJumpTableEntries() const;
1812
1813 /// Return lower limit of the density in a jump table.
1814 unsigned getMinimumJumpTableDensity(bool OptForSize) const;
1815
1816 /// Return upper limit for number of entries in a jump table.
1817 /// Zero if no limit.
1818 unsigned getMaximumJumpTableSize() const;
1819
1820 virtual bool isJumpTableRelative() const;
1821
1822 /// If a physical register, this specifies the register that
1823 /// llvm.savestack/llvm.restorestack should save and restore.
1825 return StackPointerRegisterToSaveRestore;
1826 }
1827
1828 /// If a physical register, this returns the register that receives the
1829 /// exception address on entry to an EH pad.
1830 virtual Register
1831 getExceptionPointerRegister(const Constant *PersonalityFn) const {
1832 return Register();
1833 }
1834
1835 /// If a physical register, this returns the register that receives the
1836 /// exception typeid on entry to a landing pad.
1837 virtual Register
1838 getExceptionSelectorRegister(const Constant *PersonalityFn) const {
1839 return Register();
1840 }
1841
1842 virtual bool needsFixedCatchObjects() const {
1843 report_fatal_error("Funclet EH is not implemented for this target");
1844 }
1845
1846 /// Return the minimum stack alignment of an argument.
1848 return MinStackArgumentAlignment;
1849 }
1850
1851 /// Return the minimum function alignment.
1852 Align getMinFunctionAlignment() const { return MinFunctionAlignment; }
1853
1854 /// Return the preferred function alignment.
1855 Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
1856
1857 /// Return the preferred loop alignment.
1858 virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const;
1859
1860 /// Return the maximum amount of bytes allowed to be emitted when padding for
1861 /// alignment
1862 virtual unsigned
1864
1865 /// Should loops be aligned even when the function is marked OptSize (but not
1866 /// MinSize).
1867 virtual bool alignLoopsWithOptSize() const { return false; }
1868
1869 /// If the target has a standard location for the stack protector guard,
1870 /// returns the address of that location. Otherwise, returns nullptr.
1871 /// DEPRECATED: please override useLoadStackGuardNode and customize
1872 /// LOAD_STACK_GUARD, or customize \@llvm.stackguard().
1873 virtual Value *getIRStackGuard(IRBuilderBase &IRB) const;
1874
1875 /// Inserts necessary declarations for SSP (stack protection) purpose.
1876 /// Should be used only when getIRStackGuard returns nullptr.
1877 virtual void insertSSPDeclarations(Module &M) const;
1878
1879 /// Return the variable that's previously inserted by insertSSPDeclarations,
1880 /// if any, otherwise return nullptr. Should be used only when
1881 /// getIRStackGuard returns nullptr.
1882 virtual Value *getSDagStackGuard(const Module &M) const;
1883
1884 /// If this function returns true, stack protection checks should XOR the
1885 /// frame pointer (or whichever pointer is used to address locals) into the
1886 /// stack guard value before checking it. getIRStackGuard must return nullptr
1887 /// if this returns true.
1888 virtual bool useStackGuardXorFP() const { return false; }
1889
1890 /// If the target has a standard stack protection check function that
1891 /// performs validation and error handling, returns the function. Otherwise,
1892 /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
1893 /// Should be used only when getIRStackGuard returns nullptr.
1894 virtual Function *getSSPStackGuardCheck(const Module &M) const;
1895
1896 /// \returns true if a constant G_UBFX is legal on the target.
1897 virtual bool isConstantUnsignedBitfieldExtractLegal(unsigned Opc, LLT Ty1,
1898 LLT Ty2) const {
1899 return false;
1900 }
1901
1902protected:
1904 bool UseTLS) const;
1905
1906public:
1907 /// Returns the target-specific address of the unsafe stack pointer.
1908 virtual Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const;
1909
1910 /// Returns the name of the symbol used to emit stack probes or the empty
1911 /// string if not applicable.
1912 virtual bool hasStackProbeSymbol(const MachineFunction &MF) const { return false; }
1913
1914 virtual bool hasInlineStackProbe(const MachineFunction &MF) const { return false; }
1915
1917 return "";
1918 }
1919
1920 /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
1921 /// are happy to sink it into basic blocks. A cast may be free, but not
1922 /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer.
1923 virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const;
1924
1925 /// Return true if the pointer arguments to CI should be aligned by aligning
1926 /// the object whose address is being passed. If so then MinSize is set to the
1927 /// minimum size the object must be to be aligned and PrefAlign is set to the
1928 /// preferred alignment.
1929 virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
1930 Align & /*PrefAlign*/) const {
1931 return false;
1932 }
1933
1934 //===--------------------------------------------------------------------===//
1935 /// \name Helpers for TargetTransformInfo implementations
1936 /// @{
1937
1938 /// Get the ISD node that corresponds to the Instruction class opcode.
1939 int InstructionOpcodeToISD(unsigned Opcode) const;
1940
1941 /// @}
1942
1943 //===--------------------------------------------------------------------===//
1944 /// \name Helpers for atomic expansion.
1945 /// @{
1946
1947 /// Returns the maximum atomic operation size (in bits) supported by
1948 /// the backend. Atomic operations greater than this size (as well
1949 /// as ones that are not naturally aligned), will be expanded by
1950 /// AtomicExpandPass into an __atomic_* library call.
1952 return MaxAtomicSizeInBitsSupported;
1953 }
1954
1955 /// Returns the size in bits of the maximum div/rem the backend supports.
1956 /// Larger operations will be expanded by ExpandLargeDivRem.
1958 return MaxDivRemBitWidthSupported;
1959 }
1960
1961 /// Returns the size in bits of the maximum larget fp convert the backend
1962 /// supports. Larger operations will be expanded by ExpandLargeFPConvert.
1964 return MaxLargeFPConvertBitWidthSupported;
1965 }
1966
1967 /// Returns the size of the smallest cmpxchg or ll/sc instruction
1968 /// the backend supports. Any smaller operations are widened in
1969 /// AtomicExpandPass.
1970 ///
1971 /// Note that *unlike* operations above the maximum size, atomic ops
1972 /// are still natively supported below the minimum; they just
1973 /// require a more complex expansion.
1974 unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
1975
1976 /// Whether the target supports unaligned atomic operations.
1977 bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; }
1978
1979 /// Whether AtomicExpandPass should automatically insert fences and reduce
1980 /// ordering for this atomic. This should be true for most architectures with
1981 /// weak memory ordering. Defaults to false.
1982 virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
1983 return false;
1984 }
1985
1986 /// Whether AtomicExpandPass should automatically insert a trailing fence
1987 /// without reducing the ordering for this atomic. Defaults to false.
1988 virtual bool
1990 return false;
1991 }
1992
1993 /// Perform a load-linked operation on Addr, returning a "Value *" with the
1994 /// corresponding pointee type. This may entail some non-trivial operations to
1995 /// truncate or reconstruct types that will be illegal in the backend. See
1996 /// ARMISelLowering for an example implementation.
1997 virtual Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy,
1998 Value *Addr, AtomicOrdering Ord) const {
1999 llvm_unreachable("Load linked unimplemented on this target");
2000 }
2001
2002 /// Perform a store-conditional operation to Addr. Return the status of the
2003 /// store. This should be 0 if the store succeeded, non-zero otherwise.
2005 Value *Addr, AtomicOrdering Ord) const {
2006 llvm_unreachable("Store conditional unimplemented on this target");
2007 }
2008
2009 /// Perform a masked atomicrmw using a target-specific intrinsic. This
2010 /// represents the core LL/SC loop which will be lowered at a late stage by
2011 /// the backend. The target-specific intrinsic returns the loaded value and
2012 /// is not responsible for masking and shifting the result.
2014 AtomicRMWInst *AI,
2015 Value *AlignedAddr, Value *Incr,
2016 Value *Mask, Value *ShiftAmt,
2017 AtomicOrdering Ord) const {
2018 llvm_unreachable("Masked atomicrmw expansion unimplemented on this target");
2019 }
2020
2021 /// Perform a atomicrmw expansion using a target-specific way. This is
2022 /// expected to be called when masked atomicrmw and bit test atomicrmw don't
2023 /// work, and the target supports another way to lower atomicrmw.
2024 virtual void emitExpandAtomicRMW(AtomicRMWInst *AI) const {
2026 "Generic atomicrmw expansion unimplemented on this target");
2027 }
2028
2029 /// Perform a bit test atomicrmw using a target-specific intrinsic. This
2030 /// represents the combined bit test intrinsic which will be lowered at a late
2031 /// stage by the backend.
2034 "Bit test atomicrmw expansion unimplemented on this target");
2035 }
2036
2037 /// Perform a atomicrmw which the result is only used by comparison, using a
2038 /// target-specific intrinsic. This represents the combined atomic and compare
2039 /// intrinsic which will be lowered at a late stage by the backend.
2042 "Compare arith atomicrmw expansion unimplemented on this target");
2043 }
2044
2045 /// Perform a masked cmpxchg using a target-specific intrinsic. This
2046 /// represents the core LL/SC loop which will be lowered at a late stage by
2047 /// the backend. The target-specific intrinsic returns the loaded value and
2048 /// is not responsible for masking and shifting the result.
2050 IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
2051 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
2052 llvm_unreachable("Masked cmpxchg expansion unimplemented on this target");
2053 }
2054
2055 /// Inserts in the IR a target-specific intrinsic specifying a fence.
2056 /// It is called by AtomicExpandPass before expanding an
2057 /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
2058 /// if shouldInsertFencesForAtomic returns true.
2059 ///
2060 /// Inst is the original atomic instruction, prior to other expansions that
2061 /// may be performed.
2062 ///
2063 /// This function should either return a nullptr, or a pointer to an IR-level
2064 /// Instruction*. Even complex fence sequences can be represented by a
2065 /// single Instruction* through an intrinsic to be lowered later.
2066 /// Backends should override this method to produce target-specific intrinsic
2067 /// for their fences.
2068 /// FIXME: Please note that the default implementation here in terms of
2069 /// IR-level fences exists for historical/compatibility reasons and is
2070 /// *unsound* ! Fences cannot, in general, be used to restore sequential
2071 /// consistency. For example, consider the following example:
2072 /// atomic<int> x = y = 0;
2073 /// int r1, r2, r3, r4;
2074 /// Thread 0:
2075 /// x.store(1);
2076 /// Thread 1:
2077 /// y.store(1);
2078 /// Thread 2:
2079 /// r1 = x.load();
2080 /// r2 = y.load();
2081 /// Thread 3:
2082 /// r3 = y.load();
2083 /// r4 = x.load();
2084 /// r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all
2085 /// seq_cst. But if they are lowered to monotonic accesses, no amount of
2086 /// IR-level fences can prevent it.
2087 /// @{
2088 virtual Instruction *emitLeadingFence(IRBuilderBase &Builder,
2089 Instruction *Inst,
2090 AtomicOrdering Ord) const;
2091
2093 Instruction *Inst,
2094 AtomicOrdering Ord) const;
2095 /// @}
2096
2097 // Emits code that executes when the comparison result in the ll/sc
2098 // expansion of a cmpxchg instruction is such that the store-conditional will
2099 // not execute. This makes it possible to balance out the load-linked with
2100 // a dedicated instruction, if desired.
2101 // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
2102 // be unnecessarily held, except if clrex, inserted by this hook, is executed.
2103 virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const {}
2104
2105 /// Returns true if arguments should be sign-extended in lib calls.
2106 virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
2107 return IsSigned;
2108 }
2109
2110 /// Returns true if arguments should be extended in lib calls.
2111 virtual bool shouldExtendTypeInLibCall(EVT Type) const {
2112 return true;
2113 }
2114
2115 /// Returns how the given (atomic) load should be expanded by the
2116 /// IR-level AtomicExpand pass.
2119 }
2120
2121 /// Returns how the given (atomic) load should be cast by the IR-level
2122 /// AtomicExpand pass.
2124 if (LI->getType()->isFloatingPointTy())
2127 }
2128
2129 /// Returns how the given (atomic) store should be expanded by the IR-level
2130 /// AtomicExpand pass into. For instance AtomicExpansionKind::Expand will try
2131 /// to use an atomicrmw xchg.
2134 }
2135
2136 /// Returns how the given (atomic) store should be cast by the IR-level
2137 /// AtomicExpand pass into. For instance AtomicExpansionKind::CastToInteger
2138 /// will try to cast the operands to integer values.
2140 if (SI->getValueOperand()->getType()->isFloatingPointTy())
2143 }
2144
2145 /// Returns how the given atomic cmpxchg should be expanded by the IR-level
2146 /// AtomicExpand pass.
2147 virtual AtomicExpansionKind
2150 }
2151
2152 /// Returns how the IR-level AtomicExpand pass should expand the given
2153 /// AtomicRMW, if at all. Default is to never expand.
2155 return RMW->isFloatingPointOperation() ?
2157 }
2158
2159 /// Returns how the given atomic atomicrmw should be cast by the IR-level
2160 /// AtomicExpand pass.
2161 virtual AtomicExpansionKind
2163 if (RMWI->getOperation() == AtomicRMWInst::Xchg &&
2164 (RMWI->getValOperand()->getType()->isFloatingPointTy() ||
2165 RMWI->getValOperand()->getType()->isPointerTy()))
2167
2169 }
2170
2171 /// On some platforms, an AtomicRMW that never actually modifies the value
2172 /// (such as fetch_add of 0) can be turned into a fence followed by an
2173 /// atomic load. This may sound useless, but it makes it possible for the
2174 /// processor to keep the cacheline shared, dramatically improving
2175 /// performance. And such idempotent RMWs are useful for implementing some
2176 /// kinds of locks, see for example (justification + benchmarks):
2177 /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
2178 /// This method tries doing that transformation, returning the atomic load if
2179 /// it succeeds, and nullptr otherwise.
2180 /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
2181 /// another round of expansion.
2182 virtual LoadInst *
2184 return nullptr;
2185 }
2186
2187 /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
2188 /// SIGN_EXTEND, or ANY_EXTEND).
2190 return ISD::ZERO_EXTEND;
2191 }
2192
2193 /// Returns how the platform's atomic compare and swap expects its comparison
2194 /// value to be extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND). This is
2195 /// separate from getExtendForAtomicOps, which is concerned with the
2196 /// sign-extension of the instruction's output, whereas here we are concerned
2197 /// with the sign-extension of the input. For targets with compare-and-swap
2198 /// instructions (or sub-word comparisons in their LL/SC loop expansions),
2199 /// the input can be ANY_EXTEND, but the output will still have a specific
2200 /// extension.
2202 return ISD::ANY_EXTEND;
2203 }
2204
2205 /// @}
2206
2207 /// Returns true if we should normalize
2208 /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
2209 /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
2210 /// that it saves us from materializing N0 and N1 in an integer register.
2211 /// Targets that are able to perform and/or on flags should return false here.
2213 EVT VT) const {
2214 // If a target has multiple condition registers, then it likely has logical
2215 // operations on those registers.
2217 return false;
2218 // Only do the transform if the value won't be split into multiple
2219 // registers.
2221 return Action != TypeExpandInteger && Action != TypeExpandFloat &&
2222 Action != TypeSplitVector;
2223 }
2224
2225 virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const { return true; }
2226
2227 /// Return true if a select of constants (select Cond, C1, C2) should be
2228 /// transformed into simple math ops with the condition value. For example:
2229 /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
2230 virtual bool convertSelectOfConstantsToMath(EVT VT) const {
2231 return false;
2232 }
2233
2234 /// Return true if it is profitable to transform an integer
2235 /// multiplication-by-constant into simpler operations like shifts and adds.
2236 /// This may be true if the target does not directly support the
2237 /// multiplication operation for the specified type or the sequence of simpler
2238 /// ops is faster than the multiply.
2240 EVT VT, SDValue C) const {
2241 return false;
2242 }
2243
2244 /// Return true if it may be profitable to transform
2245 /// (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2).
2246 /// This may not be true if c1 and c2 can be represented as immediates but
2247 /// c1*c2 cannot, for example.
2248 /// The target should check if c1, c2 and c1*c2 can be represented as
2249 /// immediates, or have to be materialized into registers. If it is not sure
2250 /// about some cases, a default true can be returned to let the DAGCombiner
2251 /// decide.
2252 /// AddNode is (add x, c1), and ConstNode is c2.
2254 SDValue ConstNode) const {
2255 return true;
2256 }
2257
2258 /// Return true if it is more correct/profitable to use strict FP_TO_INT
2259 /// conversion operations - canonicalizing the FP source value instead of
2260 /// converting all cases and then selecting based on value.
2261 /// This may be true if the target throws exceptions for out of bounds
2262 /// conversions or has fast FP CMOV.
2263 virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
2264 bool IsSigned) const {
2265 return false;
2266 }
2267
2268 /// Return true if it is beneficial to expand an @llvm.powi.* intrinsic.
2269 /// If not optimizing for size, expanding @llvm.powi.* intrinsics is always
2270 /// considered beneficial.
2271 /// If optimizing for size, expansion is only considered beneficial for upto
2272 /// 5 multiplies and a divide (if the exponent is negative).
2273 bool isBeneficialToExpandPowI(int Exponent, bool OptForSize) const {
2274 if (Exponent < 0)
2275 Exponent = -Exponent;
2276 return !OptForSize ||
2277 (llvm::popcount((unsigned int)Exponent) + Log2_32(Exponent) < 7);
2278 }
2279
2280 //===--------------------------------------------------------------------===//
2281 // TargetLowering Configuration Methods - These methods should be invoked by
2282 // the derived class constructor to configure this object for the target.
2283 //
2284protected:
2285 /// Specify how the target extends the result of integer and floating point
2286 /// boolean values from i1 to a wider type. See getBooleanContents.
2288 BooleanContents = Ty;
2289 BooleanFloatContents = Ty;
2290 }
2291
2292 /// Specify how the target extends the result of integer and floating point
2293 /// boolean values from i1 to a wider type. See getBooleanContents.
2295 BooleanContents = IntTy;
2296 BooleanFloatContents = FloatTy;
2297 }
2298
2299 /// Specify how the target extends the result of a vector boolean value from a
2300 /// vector of i1 to a wider type. See getBooleanContents.
2302 BooleanVectorContents = Ty;
2303 }
2304
2305 /// Specify the target scheduling preference.
2307 SchedPreferenceInfo = Pref;
2308 }
2309
2310 /// Indicate the minimum number of blocks to generate jump tables.
2311 void setMinimumJumpTableEntries(unsigned Val);
2312
2313 /// Indicate the maximum number of entries in jump tables.
2314 /// Set to zero to generate unlimited jump tables.
2315 void setMaximumJumpTableSize(unsigned);
2316
2317 /// If set to a physical register, this specifies the register that
2318 /// llvm.savestack/llvm.restorestack should save and restore.
2320 StackPointerRegisterToSaveRestore = R;
2321 }
2322
2323 /// Tells the code generator that the target has multiple (allocatable)
2324 /// condition registers that can be used to store the results of comparisons
2325 /// for use by selects and conditional branches. With multiple condition
2326 /// registers, the code generator will not aggressively sink comparisons into
2327 /// the blocks of their users.
2328 void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
2329 HasMultipleConditionRegisters = hasManyRegs;
2330 }
2331
2332 /// Tells the code generator that the target has BitExtract instructions.
2333 /// The code generator will aggressively sink "shift"s into the blocks of
2334 /// their users if the users will generate "and" instructions which can be
2335 /// combined with "shift" to BitExtract instructions.
2336 void setHasExtractBitsInsn(bool hasExtractInsn = true) {
2337 HasExtractBitsInsn = hasExtractInsn;
2338 }
2339
2340 /// Tells the code generator not to expand logic operations on comparison
2341 /// predicates into separate sequences that increase the amount of flow
2342 /// control.
2343 void setJumpIsExpensive(bool isExpensive = true);
2344
2345 /// Tells the code generator which bitwidths to bypass.
2346 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
2347 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
2348 }
2349
2350 /// Add the specified register class as an available regclass for the
2351 /// specified value type. This indicates the selector can handle values of
2352 /// that class natively.
2354 assert((unsigned)VT.SimpleTy < std::size(RegClassForVT));
2355 RegClassForVT[VT.SimpleTy] = RC;
2356 }
2357
2358 /// Return the largest legal super-reg register class of the register class
2359 /// for the specified type and its associated "cost".
2360 virtual std::pair<const TargetRegisterClass *, uint8_t>
2362
2363 /// Once all of the register classes are added, this allows us to compute
2364 /// derived properties we expose.
2366
2367 /// Indicate that the specified operation does not work with the specified
2368 /// type and indicate what to do about it. Note that VT may refer to either
2369 /// the type of a result or that of an operand of Op.
2370 void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action) {
2371 assert(Op < std::size(OpActions[0]) && "Table isn't big enough!");
2372 OpActions[(unsigned)VT.SimpleTy][Op] = Action;
2373 }
2375 LegalizeAction Action) {
2376 for (auto Op : Ops)
2377 setOperationAction(Op, VT, Action);
2378 }
2380 LegalizeAction Action) {
2381 for (auto VT : VTs)
2382 setOperationAction(Ops, VT, Action);
2383 }
2384
2385 /// Indicate that the specified load with extension does not work with the
2386 /// specified type and indicate what to do about it.
2387 void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
2388 LegalizeAction Action) {
2389 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
2390 MemVT.isValid() && "Table isn't big enough!");
2391 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2392 unsigned Shift = 4 * ExtType;
2393 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
2394 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
2395 }
2396 void setLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT, MVT MemVT,
2397 LegalizeAction Action) {
2398 for (auto ExtType : ExtTypes)
2399 setLoadExtAction(ExtType, ValVT, MemVT, Action);
2400 }
2402 ArrayRef<MVT> MemVTs, LegalizeAction Action) {
2403 for (auto MemVT : MemVTs)
2404 setLoadExtAction(ExtTypes, ValVT, MemVT, Action);
2405 }
2406
2407 /// Indicate that the specified truncating store does not work with the
2408 /// specified type and indicate what to do about it.
2409 void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action) {
2410 assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
2411 TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
2412 }
2413
2414 /// Indicate that the specified indexed load does or does not work with the
2415 /// specified type and indicate what to do abort it.
2416 ///
2417 /// NOTE: All indexed mode loads are initialized to Expand in
2418 /// TargetLowering.cpp
2420 LegalizeAction Action) {
2421 for (auto IdxMode : IdxModes)
2422 setIndexedModeAction(IdxMode, VT, IMAB_Load, Action);
2423 }
2424
2426 LegalizeAction Action) {
2427 for (auto VT : VTs)
2428 setIndexedLoadAction(IdxModes, VT, Action);
2429 }
2430
2431 /// Indicate that the specified indexed store does or does not work with the
2432 /// specified type and indicate what to do about it.
2433 ///
2434 /// NOTE: All indexed mode stores are initialized to Expand in
2435 /// TargetLowering.cpp
2437 LegalizeAction Action) {
2438 for (auto IdxMode : IdxModes)
2439 setIndexedModeAction(IdxMode, VT, IMAB_Store, Action);
2440 }
2441
2443 LegalizeAction Action) {
2444 for (auto VT : VTs)
2445 setIndexedStoreAction(IdxModes, VT, Action);
2446 }
2447
2448 /// Indicate that the specified indexed masked load does or does not work with
2449 /// the specified type and indicate what to do about it.
2450 ///
2451 /// NOTE: All indexed mode masked loads are initialized to Expand in
2452 /// TargetLowering.cpp
2453 void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT,
2454 LegalizeAction Action) {
2455 setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action);
2456 }
2457
2458 /// Indicate that the specified indexed masked store does or does not work
2459 /// with the specified type and indicate what to do about it.
2460 ///
2461 /// NOTE: All indexed mode masked stores are initialized to Expand in
2462 /// TargetLowering.cpp
2463 void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT,
2464 LegalizeAction Action) {
2465 setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action);
2466 }
2467
2468 /// Indicate that the specified condition code is or isn't supported on the
2469 /// target and indicate what to do about it.
2471 LegalizeAction Action) {
2472 for (auto CC : CCs) {
2473 assert(VT.isValid() && (unsigned)CC < std::size(CondCodeActions) &&
2474 "Table isn't big enough!");
2475 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2476 /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the
2477 /// 32-bit value and the upper 29 bits index into the second dimension of
2478 /// the array to select what 32-bit value to use.
2479 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
2480 CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
2481 CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
2482 }
2483 }
2485 LegalizeAction Action) {
2486 for (auto VT : VTs)
2487 setCondCodeAction(CCs, VT, Action);
2488 }
2489
2490 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
2491 /// to trying a larger integer/fp until it can find one that works. If that
2492 /// default is insufficient, this method can be used by the target to override
2493 /// the default.
2494 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2495 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
2496 }
2497
2498 /// Convenience method to set an operation to Promote and specify the type
2499 /// in a single call.
2500 void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2501 setOperationAction(Opc, OrigVT, Promote);
2502 AddPromotedToType(Opc, OrigVT, DestVT);
2503 }
2504
2505 /// Targets should invoke this method for each target independent node that
2506 /// they want to provide a custom DAG combiner for by implementing the
2507 /// PerformDAGCombine virtual method.
2509 for (auto NT : NTs) {
2510 assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray));
2511 TargetDAGCombineArray[NT >> 3] |= 1 << (NT & 7);
2512 }
2513 }
2514
2515 /// Set the target's minimum function alignment.
2517 MinFunctionAlignment = Alignment;
2518 }
2519
2520 /// Set the target's preferred function alignment. This should be set if
2521 /// there is a performance benefit to higher-than-minimum alignment
2523 PrefFunctionAlignment = Alignment;
2524 }
2525
2526 /// Set the target's preferred loop alignment. Default alignment is one, it
2527 /// means the target does not care about loop alignment. The target may also
2528 /// override getPrefLoopAlignment to provide per-loop values.
2529 void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; }
2530 void setMaxBytesForAlignment(unsigned MaxBytes) {
2531 MaxBytesForAlignment = MaxBytes;
2532 }
2533
2534 /// Set the minimum stack alignment of an argument.
2536 MinStackArgumentAlignment = Alignment;
2537 }
2538
2539 /// Set the maximum atomic operation size supported by the
2540 /// backend. Atomic operations greater than this size (as well as
2541 /// ones that are not naturally aligned), will be expanded by
2542 /// AtomicExpandPass into an __atomic_* library call.
2543 void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
2544 MaxAtomicSizeInBitsSupported = SizeInBits;
2545 }
2546
2547 /// Set the size in bits of the maximum div/rem the backend supports.
2548 /// Larger operations will be expanded by ExpandLargeDivRem.
2549 void setMaxDivRemBitWidthSupported(unsigned SizeInBits) {
2550 MaxDivRemBitWidthSupported = SizeInBits;
2551 }
2552
2553 /// Set the size in bits of the maximum fp convert the backend supports.
2554 /// Larger operations will be expanded by ExpandLargeFPConvert.
2555 void setMaxLargeFPConvertBitWidthSupported(unsigned SizeInBits) {
2556 MaxLargeFPConvertBitWidthSupported = SizeInBits;
2557 }
2558
2559 /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
2560 void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
2561 MinCmpXchgSizeInBits = SizeInBits;
2562 }
2563
2564 /// Sets whether unaligned atomic operations are supported.
2565 void setSupportsUnalignedAtomics(bool UnalignedSupported) {
2566 SupportsUnalignedAtomics = UnalignedSupported;
2567 }
2568
2569public:
2570 //===--------------------------------------------------------------------===//
2571 // Addressing mode description hooks (used by LSR etc).
2572 //
2573
2574 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
2575 /// instructions reading the address. This allows as much computation as
2576 /// possible to be done in the address mode for that operand. This hook lets
2577 /// targets also pass back when this should be done on intrinsics which
2578 /// load/store.
2580 SmallVectorImpl<Value*> &/*Ops*/,
2581 Type *&/*AccessTy*/) const {
2582 return false;
2583 }
2584
2585 /// This represents an addressing mode of:
2586 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
2587 /// If BaseGV is null, there is no BaseGV.
2588 /// If BaseOffs is zero, there is no base offset.
2589 /// If HasBaseReg is false, there is no base register.
2590 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
2591 /// no scale.
2592 struct AddrMode {
2594 int64_t BaseOffs = 0;
2595 bool HasBaseReg = false;
2596 int64_t Scale = 0;
2597 AddrMode() = default;
2598 };
2599
2600 /// Return true if the addressing mode represented by AM is legal for this
2601 /// target, for a load/store of the specified type.
2602 ///
2603 /// The type may be VoidTy, in which case only return true if the addressing
2604 /// mode is legal for a load/store of any legal type. TODO: Handle
2605 /// pre/postinc as well.
2606 ///
2607 /// If the address space cannot be determined, it will be -1.
2608 ///
2609 /// TODO: Remove default argument
2610 virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
2611 Type *Ty, unsigned AddrSpace,
2612 Instruction *I = nullptr) const;
2613
2614 /// Return true if the specified immediate is legal icmp immediate, that is
2615 /// the target has icmp instructions which can compare a register against the
2616 /// immediate without having to materialize the immediate into a register.
2617 virtual bool isLegalICmpImmediate(int64_t) const {
2618 return true;
2619 }
2620
2621 /// Return true if the specified immediate is legal add immediate, that is the
2622 /// target has add instructions which can add a register with the immediate
2623 /// without having to materialize the immediate into a register.
2624 virtual bool isLegalAddImmediate(int64_t) const {
2625 return true;
2626 }
2627
2628 /// Return true if the specified immediate is legal for the value input of a
2629 /// store instruction.
2630 virtual bool isLegalStoreImmediate(int64_t Value) const {
2631 // Default implementation assumes that at least 0 works since it is likely
2632 // that a zero register exists or a zero immediate is allowed.
2633 return Value == 0;
2634 }
2635
2636 /// Return true if it's significantly cheaper to shift a vector by a uniform
2637 /// scalar than by an amount which will vary across each lane. On x86 before
2638 /// AVX2 for example, there is a "psllw" instruction for the former case, but
2639 /// no simple instruction for a general "a << b" operation on vectors.
2640 /// This should also apply to lowering for vector funnel shifts (rotates).
2641 virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
2642 return false;
2643 }
2644
2645 /// Given a shuffle vector SVI representing a vector splat, return a new
2646 /// scalar type of size equal to SVI's scalar type if the new type is more
2647 /// profitable. Returns nullptr otherwise. For example under MVE float splats
2648 /// are converted to integer to prevent the need to move from SPR to GPR
2649 /// registers.
2651 return nullptr;
2652 }
2653
2654 /// Given a set in interconnected phis of type 'From' that are loaded/stored
2655 /// or bitcast to type 'To', return true if the set should be converted to
2656 /// 'To'.
2657 virtual bool shouldConvertPhiType(Type *From, Type *To) const {
2658 return (From->isIntegerTy() || From->isFloatingPointTy()) &&
2659 (To->isIntegerTy() || To->isFloatingPointTy());
2660 }
2661
2662 /// Returns true if the opcode is a commutative binary operation.
2663 virtual bool isCommutativeBinOp(unsigned Opcode) const {
2664 // FIXME: This should get its info from the td file.
2665 switch (Opcode) {
2666 case ISD::ADD:
2667 case ISD::SMIN:
2668 case ISD::SMAX:
2669 case ISD::UMIN:
2670 case ISD::UMAX:
2671 case ISD::MUL:
2672 case ISD::MULHU:
2673 case ISD::MULHS:
2674 case ISD::SMUL_LOHI:
2675 case ISD::UMUL_LOHI:
2676 case ISD::FADD:
2677 case ISD::FMUL:
2678 case ISD::AND:
2679 case ISD::OR:
2680 case ISD::XOR:
2681 case ISD::SADDO:
2682 case ISD::UADDO:
2683 case ISD::ADDC:
2684 case ISD::ADDE:
2685 case ISD::SADDSAT:
2686 case ISD::UADDSAT:
2687 case ISD::FMINNUM:
2688 case ISD::FMAXNUM:
2689 case ISD::FMINNUM_IEEE:
2690 case ISD::FMAXNUM_IEEE:
2691 case ISD::FMINIMUM:
2692 case ISD::FMAXIMUM:
2693 case ISD::AVGFLOORS:
2694 case ISD::AVGFLOORU:
2695 case ISD::AVGCEILS:
2696 case ISD::AVGCEILU:
2697 return true;
2698 default: return false;
2699 }
2700 }
2701
2702 /// Return true if the node is a math/logic binary operator.
2703 virtual bool isBinOp(unsigned Opcode) const {
2704 // A commutative binop must be a binop.
2705 if (isCommutativeBinOp(Opcode))
2706 return true;
2707 // These are non-commutative binops.
2708 switch (Opcode) {
2709 case ISD::SUB:
2710 case ISD::SHL:
2711 case ISD::SRL:
2712 case ISD::SRA:
2713 case ISD::ROTL:
2714 case ISD::ROTR:
2715 case ISD::SDIV:
2716 case ISD::UDIV:
2717 case ISD::SREM:
2718 case ISD::UREM:
2719 case ISD::SSUBSAT:
2720 case ISD::USUBSAT:
2721 case ISD::FSUB:
2722 case ISD::FDIV:
2723 case ISD::FREM:
2724 return true;
2725 default:
2726 return false;
2727 }
2728 }
2729
2730 /// Return true if it's free to truncate a value of type FromTy to type
2731 /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
2732 /// by referencing its sub-register AX.
2733 /// Targets must return false when FromTy <= ToTy.
2734 virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
2735 return false;
2736 }
2737
2738 /// Return true if a truncation from FromTy to ToTy is permitted when deciding
2739 /// whether a call is in tail position. Typically this means that both results
2740 /// would be assigned to the same register or stack slot, but it could mean
2741 /// the target performs adequate checks of its own before proceeding with the
2742 /// tail call. Targets must return false when FromTy <= ToTy.
2743 virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
2744 return false;
2745 }
2746
2747 virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const { return false; }
2748 virtual bool isTruncateFree(LLT FromTy, LLT ToTy, const DataLayout &DL,
2749 LLVMContext &Ctx) const {
2750 return isTruncateFree(getApproximateEVTForLLT(FromTy, DL, Ctx),
2751 getApproximateEVTForLLT(ToTy, DL, Ctx));
2752 }
2753
2754 virtual bool isProfitableToHoist(Instruction *I) const { return true; }
2755
2756 /// Return true if the extension represented by \p I is free.
2757 /// Unlikely the is[Z|FP]ExtFree family which is based on types,
2758 /// this method can use the context provided by \p I to decide
2759 /// whether or not \p I is free.
2760 /// This method extends the behavior of the is[Z|FP]ExtFree family.
2761 /// In other words, if is[Z|FP]Free returns true, then this method
2762 /// returns true as well. The converse is not true.
2763 /// The target can perform the adequate checks by overriding isExtFreeImpl.
2764 /// \pre \p I must be a sign, zero, or fp extension.
2765 bool isExtFree(const Instruction *I) const {
2766 switch (I->getOpcode()) {
2767 case Instruction::FPExt:
2768 if (isFPExtFree(EVT::getEVT(I->getType()),
2769 EVT::getEVT(I->getOperand(0)->getType())))
2770 return true;
2771 break;
2772 case Instruction::ZExt:
2773 if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
2774 return true;
2775 break;
2776 case Instruction::SExt:
2777 break;
2778 default:
2779 llvm_unreachable("Instruction is not an extension");
2780 }
2781 return isExtFreeImpl(I);
2782 }
2783
2784 /// Return true if \p Load and \p Ext can form an ExtLoad.
2785 /// For example, in AArch64
2786 /// %L = load i8, i8* %ptr
2787 /// %E = zext i8 %L to i32
2788 /// can be lowered into one load instruction
2789 /// ldrb w0, [x0]
2790 bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
2791 const DataLayout &DL) const {
2792 EVT VT = getValueType(DL, Ext->getType());
2793 EVT LoadVT = getValueType(DL, Load->getType());
2794
2795 // If the load has other users and the truncate is not free, the ext
2796 // probably isn't free.
2797 if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
2798 !isTruncateFree(Ext->getType(), Load->getType()))
2799 return false;
2800
2801 // Check whether the target supports casts folded into loads.
2802 unsigned LType;
2803 if (isa<ZExtInst>(Ext))
2804 LType = ISD::ZEXTLOAD;
2805 else {
2806 assert(isa<SExtInst>(Ext) && "Unexpected ext type!");
2807 LType = ISD::SEXTLOAD;
2808 }
2809
2810 return isLoadExtLegal(LType, VT, LoadVT);
2811 }
2812
2813 /// Return true if any actual instruction that defines a value of type FromTy
2814 /// implicitly zero-extends the value to ToTy in the result register.
2815 ///
2816 /// The function should return true when it is likely that the truncate can
2817 /// be freely folded with an instruction defining a value of FromTy. If
2818 /// the defining instruction is unknown (because you're looking at a
2819 /// function argument, PHI, etc.) then the target may require an
2820 /// explicit truncate, which is not necessarily free, but this function
2821 /// does not deal with those cases.
2822 /// Targets must return false when FromTy >= ToTy.
2823 virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
2824 return false;
2825 }
2826
2827 virtual bool isZExtFree(EVT FromTy, EVT ToTy) const { return false; }
2828 virtual bool isZExtFree(LLT FromTy, LLT ToTy, const DataLayout &DL,
2829 LLVMContext &Ctx) const {
2830 return isZExtFree(getApproximateEVTForLLT(FromTy, DL, Ctx),
2831 getApproximateEVTForLLT(ToTy, DL, Ctx));
2832 }
2833
2834 /// Return true if sign-extension from FromTy to ToTy is cheaper than
2835 /// zero-extension.
2836 virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const {
2837 return false;
2838 }
2839
2840 /// Return true if this constant should be sign extended when promoting to
2841 /// a larger type.
2842 virtual bool signExtendConstant(const ConstantInt *C) const { return false; }
2843
2844 /// Return true if sinking I's operands to the same basic block as I is
2845 /// profitable, e.g. because the operands can be folded into a target
2846 /// instruction during instruction selection. After calling the function
2847 /// \p Ops contains the Uses to sink ordered by dominance (dominating users
2848 /// come first).
2850 SmallVectorImpl<Use *> &Ops) const {
2851 return false;
2852 }
2853
2854 /// Try to optimize extending or truncating conversion instructions (like
2855 /// zext, trunc, fptoui, uitofp) for the target.
2857 Loop *L) const {
2858 return false;
2859 }
2860
2861 /// Return true if the target supplies and combines to a paired load
2862 /// two loaded values of type LoadedType next to each other in memory.
2863 /// RequiredAlignment gives the minimal alignment constraints that must be met
2864 /// to be able to select this paired load.
2865 ///
2866 /// This information is *not* used to generate actual paired loads, but it is
2867 /// used to generate a sequence of loads that is easier to combine into a
2868 /// paired load.
2869 /// For instance, something like this:
2870 /// a = load i64* addr
2871 /// b = trunc i64 a to i32
2872 /// c = lshr i64 a, 32
2873 /// d = trunc i64 c to i32
2874 /// will be optimized into:
2875 /// b = load i32* addr1
2876 /// d = load i32* addr2
2877 /// Where addr1 = addr2 +/- sizeof(i32).
2878 ///
2879 /// In other words, unless the target performs a post-isel load combining,
2880 /// this information should not be provided because it will generate more
2881 /// loads.
2882 virtual bool hasPairedLoad(EVT /*LoadedType*/,
2883 Align & /*RequiredAlignment*/) const {
2884 return false;
2885 }
2886
2887 /// Return true if the target has a vector blend instruction.
2888 virtual bool hasVectorBlend() const { return false; }
2889
2890 /// Get the maximum supported factor for interleaved memory accesses.
2891 /// Default to be the minimum interleave factor: 2.
2892 virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
2893
2894 /// Lower an interleaved load to target specific intrinsics. Return
2895 /// true on success.
2896 ///
2897 /// \p LI is the vector load instruction.
2898 /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
2899 /// \p Indices is the corresponding indices for each shufflevector.
2900 /// \p Factor is the interleave factor.
2903 ArrayRef<unsigned> Indices,
2904 unsigned Factor) const {
2905 return false;
2906 }
2907
2908 /// Lower an interleaved store to target specific intrinsics. Return
2909 /// true on success.
2910 ///
2911 /// \p SI is the vector store instruction.
2912 /// \p SVI is the shufflevector to RE-interleave the stored vector.
2913 /// \p Factor is the interleave factor.
2915 unsigned Factor) const {
2916 return false;
2917 }
2918
2919 /// Return true if zero-extending the specific node Val to type VT2 is free
2920 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
2921 /// because it's folded such as X86 zero-extending loads).
2922 virtual bool isZExtFree(SDValue Val, EVT VT2) const {
2923 return isZExtFree(Val.getValueType(), VT2);
2924 }
2925
2926 /// Return true if an fpext operation is free (for instance, because
2927 /// single-precision floating-point numbers are implicitly extended to
2928 /// double-precision).
2929 virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const {
2930 assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() &&
2931 "invalid fpext types");
2932 return false;
2933 }
2934
2935 /// Return true if an fpext operation input to an \p Opcode operation is free
2936 /// (for instance, because half-precision floating-point numbers are
2937 /// implicitly extended to float-precision) for an FMA instruction.
2938 virtual bool isFPExtFoldable(const MachineInstr &MI, unsigned Opcode,
2939 LLT DestTy, LLT SrcTy) const {
2940 return false;
2941 }
2942
2943 /// Return true if an fpext operation input to an \p Opcode operation is free
2944 /// (for instance, because half-precision floating-point numbers are
2945 /// implicitly extended to float-precision) for an FMA instruction.
2946 virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode,
2947 EVT DestVT, EVT SrcVT) const {
2948 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
2949 "invalid fpext types");
2950 return isFPExtFree(DestVT, SrcVT);
2951 }
2952
2953 /// Return true if folding a vector load into ExtVal (a sign, zero, or any
2954 /// extend node) is profitable.
2955 virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
2956
2957 /// Return true if an fneg operation is free to the point where it is never
2958 /// worthwhile to replace it with a bitwise operation.
2959 virtual bool isFNegFree(EVT VT) const {
2960 assert(VT.isFloatingPoint());
2961 return false;
2962 }
2963
2964 /// Return true if an fabs operation is free to the point where it is never
2965 /// worthwhile to replace it with a bitwise operation.
2966 virtual bool isFAbsFree(EVT VT) const {
2967 assert(VT.isFloatingPoint());
2968 return false;
2969 }
2970
2971 /// Return true if an FMA operation is faster than a pair of fmul and fadd
2972 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
2973 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
2974 ///
2975 /// NOTE: This may be called before legalization on types for which FMAs are
2976 /// not legal, but should return true if those types will eventually legalize
2977 /// to types that support FMAs. After legalization, it will only be called on
2978 /// types that support FMAs (via Legal or Custom actions)
2980 EVT) const {
2981 return false;
2982 }
2983
2984 /// Return true if an FMA operation is faster than a pair of fmul and fadd
2985 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
2986 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
2987 ///
2988 /// NOTE: This may be called before legalization on types for which FMAs are
2989 /// not legal, but should return true if those types will eventually legalize
2990 /// to types that support FMAs. After legalization, it will only be called on
2991 /// types that support FMAs (via Legal or Custom actions)
2993 LLT) const {
2994 return false;
2995 }
2996
2997 /// IR version
2998 virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const {
2999 return false;
3000 }
3001
3002 /// Returns true if \p MI can be combined with another instruction to
3003 /// form TargetOpcode::G_FMAD. \p N may be an TargetOpcode::G_FADD,
3004 /// TargetOpcode::G_FSUB, or an TargetOpcode::G_FMUL which will be
3005 /// distributed into an fadd/fsub.
3006 virtual bool isFMADLegal(const MachineInstr &MI, LLT Ty) const {
3007 assert((MI.getOpcode() == TargetOpcode::G_FADD ||
3008 MI.getOpcode() == TargetOpcode::G_FSUB ||
3009 MI.getOpcode() == TargetOpcode::G_FMUL) &&
3010 "unexpected node in FMAD forming combine");
3011 switch (Ty.getScalarSizeInBits()) {
3012 case 16:
3013 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f16);
3014 case 32:
3015 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f32);
3016 case 64:
3017 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f64);
3018 default:
3019 break;
3020 }
3021
3022 return false;
3023 }
3024
3025 /// Returns true if be combined with to form an ISD::FMAD. \p N may be an
3026 /// ISD::FADD, ISD::FSUB, or an ISD::FMUL which will be distributed into an
3027 /// fadd/fsub.
3028 virtual bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const {
3029 assert((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB ||
3030 N->getOpcode() == ISD::FMUL) &&
3031 "unexpected node in FMAD forming combine");
3032 return isOperationLegal(ISD::FMAD, N->getValueType(0));
3033 }
3034
3035 // Return true when the decision to generate FMA's (or FMS, FMLA etc) rather
3036 // than FMUL and ADD is delegated to the machine combiner.
3038 CodeGenOpt::Level OptLevel) const {
3039 return false;
3040 }
3041
3042 /// Return true if it's profitable to narrow operations of type VT1 to
3043 /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
3044 /// i32 to i16.
3045 virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
3046 return false;
3047 }
3048
3049 /// Return true if pulling a binary operation into a select with an identity
3050 /// constant is profitable. This is the inverse of an IR transform.
3051 /// Example: X + (Cond ? Y : 0) --> Cond ? (X + Y) : X
3052 virtual bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
3053 EVT VT) const {
3054 return false;
3055 }
3056
3057 /// Return true if it is beneficial to convert a load of a constant to
3058 /// just the constant itself.
3059 /// On some targets it might be more efficient to use a combination of
3060 /// arithmetic instructions to materialize the constant instead of loading it
3061 /// from a constant pool.
3063 Type *Ty) const {
3064 return false;
3065 }
3066
3067 /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type
3068 /// from this source type with this index. This is needed because
3069 /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of
3070 /// the first element, and only the target knows which lowering is cheap.
3071 virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
3072 unsigned Index) const {
3073 return false;
3074 }
3075
3076 /// Try to convert an extract element of a vector binary operation into an
3077 /// extract element followed by a scalar operation.
3078 virtual bool shouldScalarizeBinop(SDValue VecOp) const {
3079 return false;
3080 }
3081
3082 /// Return true if extraction of a scalar element from the given vector type
3083 /// at the given index is cheap. For example, if scalar operations occur on
3084 /// the same register file as vector operations, then an extract element may
3085 /// be a sub-register rename rather than an actual instruction.
3086 virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const {
3087 return false;
3088 }
3089
3090 /// Try to convert math with an overflow comparison into the corresponding DAG
3091 /// node operation. Targets may want to override this independently of whether
3092 /// the operation is legal/custom for the given type because it may obscure
3093 /// matching of other patterns.
3094 virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
3095 bool MathUsed) const {
3096 // TODO: The default logic is inherited from code in CodeGenPrepare.
3097 // The opcode should not make a difference by default?
3098 if (Opcode != ISD::UADDO)
3099 return false;
3100
3101 // Allow the transform as long as we have an integer type that is not
3102 // obviously illegal and unsupported and if the math result is used
3103 // besides the overflow check. On some targets (e.g. SPARC), it is
3104 // not profitable to form on overflow op if the math result has no
3105 // concrete users.
3106 if (VT.isVector())
3107 return false;
3108 return MathUsed && (VT.isSimple() || !isOperationExpand(Opcode, VT));
3109 }
3110
3111 // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
3112 // even if the vector itself has multiple uses.
3113 virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
3114 return false;
3115 }
3116
3117 // Return true if CodeGenPrepare should consider splitting large offset of a
3118 // GEP to make the GEP fit into the addressing mode and can be sunk into the
3119 // same blocks of its users.
3120 virtual bool shouldConsiderGEPOffsetSplit() const { return false; }
3121
3122 /// Return true if creating a shift of the type by the given
3123 /// amount is not profitable.
3124 virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const {
3125 return false;
3126 }
3127
3128 /// Does this target require the clearing of high-order bits in a register
3129 /// passed to the fp16 to fp conversion library function.
3130 virtual bool shouldKeepZExtForFP16Conv() const { return false; }
3131
3132 /// Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT
3133 /// from min(max(fptoi)) saturation patterns.
3134 virtual bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const {
3135 return isOperationLegalOrCustom(Op, VT);
3136 }
3137
3138 /// Does this target support complex deinterleaving
3139 virtual bool isComplexDeinterleavingSupported() const { return false; }
3140
3141 /// Does this target support complex deinterleaving with the given operation
3142 /// and type
3145 return false;
3146 }
3147
3148 /// Create the IR node for the given complex deinterleaving operation.
3149 /// If one cannot be created using all the given inputs, nullptr should be
3150 /// returned.
3153 ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
3154 Value *Accumulator = nullptr) const {
3155 return nullptr;
3156 }
3157
3158 //===--------------------------------------------------------------------===//
3159 // Runtime Library hooks
3160 //
3161
3162 /// Rename the default libcall routine name for the specified libcall.
3163 void setLibcallName(RTLIB::Libcall Call, const char *Name) {
3164 LibcallRoutineNames[Call] = Name;
3165 }
3167 for (auto Call : Calls)
3168 setLibcallName(Call, Name);
3169 }
3170
3171 /// Get the libcall routine name for the specified libcall.
3172 const char *getLibcallName(RTLIB::Libcall Call) const {
3173 return LibcallRoutineNames[Call];
3174 }
3175
3176 /// Override the default CondCode to be used to test the result of the
3177 /// comparison libcall against zero.
3179 CmpLibcallCCs[Call] = CC;
3180 }
3181
3182 /// Get the CondCode that's to be used to test the result of the comparison
3183 /// libcall against zero.
3185 return CmpLibcallCCs[Call];
3186 }
3187
3188 /// Set the CallingConv that should be used for the specified libcall.
3190 LibcallCallingConvs[Call] = CC;
3191 }
3192
3193 /// Get the CallingConv that should be used for the specified libcall.
3195 return LibcallCallingConvs[Call];
3196 }
3197
3198 /// Execute target specific actions to finalize target lowering.
3199 /// This is used to set extra flags in MachineFrameInformation and freezing
3200 /// the set of reserved registers.
3201 /// The default implementation just freezes the set of reserved registers.
3202 virtual void finalizeLowering(MachineFunction &MF) const;
3203
3204 //===----------------------------------------------------------------------===//
3205 // GlobalISel Hooks
3206 //===----------------------------------------------------------------------===//
3207 /// Check whether or not \p MI needs to be moved close to its uses.
3208 virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const;
3209
3210
3211private:
3212 const TargetMachine &TM;
3213
3214 /// Tells the code generator that the target has multiple (allocatable)
3215 /// condition registers that can be used to store the results of comparisons
3216 /// for use by selects and conditional branches. With multiple condition
3217 /// registers, the code generator will not aggressively sink comparisons into
3218 /// the blocks of their users.
3219 bool HasMultipleConditionRegisters;
3220
3221 /// Tells the code generator that the target has BitExtract instructions.
3222 /// The code generator will aggressively sink "shift"s into the blocks of
3223 /// their users if the users will generate "and" instructions which can be
3224 /// combined with "shift" to BitExtract instructions.
3225 bool HasExtractBitsInsn;
3226
3227 /// Tells the code generator to bypass slow divide or remainder
3228 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
3229 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
3230 /// div/rem when the operands are positive and less than 256.
3231 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
3232
3233 /// Tells the code generator that it shouldn't generate extra flow control
3234 /// instructions and should attempt to combine flow control instructions via
3235 /// predication.
3236 bool JumpIsExpensive;
3237
3238 /// Information about the contents of the high-bits in boolean values held in
3239 /// a type wider than i1. See getBooleanContents.
3240 BooleanContent BooleanContents;
3241
3242 /// Information about the contents of the high-bits in boolean values held in
3243 /// a type wider than i1. See getBooleanContents.
3244 BooleanContent BooleanFloatContents;
3245
3246 /// Information about the contents of the high-bits in boolean vector values
3247 /// when the element type is wider than i1. See getBooleanContents.
3248 BooleanContent BooleanVectorContents;
3249
3250 /// The target scheduling preference: shortest possible total cycles or lowest
3251 /// register usage.
3252 Sched::Preference SchedPreferenceInfo;
3253
3254 /// The minimum alignment that any argument on the stack needs to have.
3255 Align MinStackArgumentAlignment;
3256
3257 /// The minimum function alignment (used when optimizing for size, and to
3258 /// prevent explicitly provided alignment from leading to incorrect code).
3259 Align MinFunctionAlignment;
3260
3261 /// The preferred function alignment (used when alignment unspecified and
3262 /// optimizing for speed).
3263 Align PrefFunctionAlignment;
3264
3265 /// The preferred loop alignment (in log2 bot in bytes).
3266 Align PrefLoopAlignment;
3267 /// The maximum amount of bytes permitted to be emitted for alignment.
3268 unsigned MaxBytesForAlignment;
3269
3270 /// Size in bits of the maximum atomics size the backend supports.
3271 /// Accesses larger than this will be expanded by AtomicExpandPass.
3272 unsigned MaxAtomicSizeInBitsSupported;
3273
3274 /// Size in bits of the maximum div/rem size the backend supports.
3275 /// Larger operations will be expanded by ExpandLargeDivRem.
3276 unsigned MaxDivRemBitWidthSupported;
3277
3278 /// Size in bits of the maximum larget fp convert size the backend
3279 /// supports. Larger operations will be expanded by ExpandLargeFPConvert.
3280 unsigned MaxLargeFPConvertBitWidthSupported;
3281
3282 /// Size in bits of the minimum cmpxchg or ll/sc operation the
3283 /// backend supports.
3284 unsigned MinCmpXchgSizeInBits;
3285
3286 /// This indicates if the target supports unaligned atomic operations.
3287 bool SupportsUnalignedAtomics;
3288
3289 /// If set to a physical register, this specifies the register that
3290 /// llvm.savestack/llvm.restorestack should save and restore.
3291 Register StackPointerRegisterToSaveRestore;
3292
3293 /// This indicates the default register class to use for each ValueType the
3294 /// target supports natively.
3295 const TargetRegisterClass *RegClassForVT[MVT::VALUETYPE_SIZE];
3296 uint16_t NumRegistersForVT[MVT::VALUETYPE_SIZE];
3297 MVT RegisterTypeForVT[MVT::VALUETYPE_SIZE];
3298
3299 /// This indicates the "representative" register class to use for each
3300 /// ValueType the target supports natively. This information is used by the
3301 /// scheduler to track register pressure. By default, the representative
3302 /// register class is the largest legal super-reg register class of the
3303 /// register class of the specified type. e.g. On x86, i8, i16, and i32's
3304 /// representative class would be GR32.
3305 const TargetRegisterClass *RepRegClassForVT[MVT::VALUETYPE_SIZE];
3306
3307 /// This indicates the "cost" of the "representative" register class for each
3308 /// ValueType. The cost is used by the scheduler to approximate register
3309 /// pressure.
3310 uint8_t RepRegClassCostForVT[MVT::VALUETYPE_SIZE];
3311
3312 /// For any value types we are promoting or expanding, this contains the value
3313 /// type that we are changing to. For Expanded types, this contains one step
3314 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
3315 /// (e.g. i64 -> i16). For types natively supported by the system, this holds
3316 /// the same type (e.g. i32 -> i32).
3317 MVT TransformToType[MVT::VALUETYPE_SIZE];
3318
3319 /// For each operation and each value type, keep a LegalizeAction that
3320 /// indicates how instruction selection should deal with the operation. Most
3321 /// operations are Legal (aka, supported natively by the target), but
3322 /// operations that are not should be described. Note that operations on
3323 /// non-legal value types are not described here.
3325
3326 /// For each load extension type and each value type, keep a LegalizeAction
3327 /// that indicates how instruction selection should deal with a load of a
3328 /// specific value type and extension type. Uses 4-bits to store the action
3329 /// for each of the 4 load ext types.
3331
3332 /// For each value type pair keep a LegalizeAction that indicates whether a
3333 /// truncating store of a specific value type and truncating type is legal.
3335
3336 /// For each indexed mode and each value type, keep a quad of LegalizeAction
3337 /// that indicates how instruction selection should deal with the load /
3338 /// store / maskedload / maskedstore.
3339 ///
3340 /// The first dimension is the value_type for the reference. The second
3341 /// dimension represents the various modes for load store.
3343
3344 /// For each condition code (ISD::CondCode) keep a LegalizeAction that
3345 /// indicates how instruction selection should deal with the condition code.
3346 ///
3347 /// Because each CC action takes up 4 bits, we need to have the array size be
3348 /// large enough to fit all of the value types. This can be done by rounding
3349 /// up the MVT::VALUETYPE_SIZE value to the next multiple of 8.
3350 uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::VALUETYPE_SIZE + 7) / 8];
3351
3352 ValueTypeActionImpl ValueTypeActions;
3353
3354private:
3355 /// Targets can specify ISD nodes that they would like PerformDAGCombine
3356 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
3357 /// array.
3358 unsigned char
3359 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
3360
3361 /// For operations that must be promoted to a specific type, this holds the
3362 /// destination type. This map should be sparse, so don't hold it as an
3363 /// array.
3364 ///
3365 /// Targets add entries to this map with AddPromotedToType(..), clients access
3366 /// this with getTypeToPromoteTo(..).
3367 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
3368 PromoteToType;
3369
3370 /// Stores the name each libcall.
3371 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL + 1];
3372
3373 /// The ISD::CondCode that should be used to test the result of each of the
3374 /// comparison libcall against zero.
3375 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
3376
3377 /// Stores the CallingConv that should be used for each libcall.
3378 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
3379
3380 /// Set default libcall names and calling conventions.
3381 void InitLibcalls(const Triple &TT);
3382
3383 /// The bits of IndexedModeActions used to store the legalisation actions
3384 /// We store the data as | ML | MS | L | S | each taking 4 bits.
3385 enum IndexedModeActionsBits {
3386 IMAB_Store = 0,
3387 IMAB_Load = 4,
3388 IMAB_MaskedStore = 8,
3389 IMAB_MaskedLoad = 12
3390 };
3391
3392 void setIndexedModeAction(unsigned IdxMode, MVT VT, unsigned Shift,
3393 LegalizeAction Action) {
3394 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
3395 (unsigned)Action < 0xf && "Table isn't big enough!");
3396 unsigned Ty = (unsigned)VT.SimpleTy;
3397 IndexedModeActions[Ty][IdxMode] &= ~(0xf << Shift);
3398 IndexedModeActions[Ty][IdxMode] |= ((uint16_t)Action) << Shift;
3399 }
3400
3401 LegalizeAction getIndexedModeAction(unsigned IdxMode, MVT VT,
3402 unsigned Shift) const {
3403 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
3404 "Table isn't big enough!");
3405 unsigned Ty = (unsigned)VT.SimpleTy;
3406 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] >> Shift) & 0xf);
3407 }
3408
3409protected:
3410 /// Return true if the extension represented by \p I is free.
3411 /// \pre \p I is a sign, zero, or fp extension and
3412 /// is[Z|FP]ExtFree of the related types is not true.
3413 virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
3414
3415 /// Depth that GatherAllAliases should should continue looking for chain
3416 /// dependencies when trying to find a more preferable chain. As an
3417 /// approximation, this should be more than the number of consecutive stores
3418 /// expected to be merged.
3420
3421 /// \brief Specify maximum number of store instructions per memset call.
3422 ///
3423 /// When lowering \@llvm.memset this field specifies the maximum number of
3424 /// store operations that may be substituted for the call to memset. Targets
3425 /// must set this value based on the cost threshold for that target. Targets
3426 /// should assume that the memset will be done using as many of the largest
3427 /// store operations first, followed by smaller ones, if necessary, per
3428 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
3429 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
3430 /// store. This only applies to setting a constant array of a constant size.
3432 /// Likewise for functions with the OptSize attribute.
3434
3435 /// \brief Specify maximum number of store instructions per memcpy call.
3436 ///
3437 /// When lowering \@llvm.memcpy this field specifies the maximum number of
3438 /// store operations that may be substituted for a call to memcpy. Targets
3439 /// must set this value based on the cost threshold for that target. Targets
3440 /// should assume that the memcpy will be done using as many of the largest
3441 /// store operations first, followed by smaller ones, if necessary, per
3442 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
3443 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
3444 /// and one 1-byte store. This only applies to copying a constant array of
3445 /// constant size.
3447 /// Likewise for functions with the OptSize attribute.
3449 /// \brief Specify max number of store instructions to glue in inlined memcpy.
3450 ///
3451 /// When memcpy is inlined based on MaxStoresPerMemcpy, specify maximum number
3452 /// of store instructions to keep together. This helps in pairing and
3453 // vectorization later on.
3455
3456 /// \brief Specify maximum number of load instructions per memcmp call.
3457 ///
3458 /// When lowering \@llvm.memcmp this field specifies the maximum number of
3459 /// pairs of load operations that may be substituted for a call to memcmp.
3460 /// Targets must set this value based on the cost threshold for that target.
3461 /// Targets should assume that the memcmp will be done using as many of the
3462 /// largest load operations first, followed by smaller ones, if necessary, per
3463 /// alignment restrictions. For example, loading 7 bytes on a 32-bit machine
3464 /// with 32-bit alignment would result in one 4-byte load, a one 2-byte load
3465 /// and one 1-byte load. This only applies to copying a constant array of
3466 /// constant size.
3468 /// Likewise for functions with the OptSize attribute.
3470
3471 /// \brief Specify maximum number of store instructions per memmove call.
3472 ///
3473 /// When lowering \@llvm.memmove this field specifies the maximum number of
3474 /// store instructions that may be substituted for a call to memmove. Targets
3475 /// must set this value based on the cost threshold for that target. Targets
3476 /// should assume that the memmove will be done using as many of the largest
3477 /// store operations first, followed by smaller ones, if necessary, per
3478 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
3479 /// with 8-bit alignment would result in nine 1-byte stores. This only
3480 /// applies to copying a constant array of constant size.
3482 /// Likewise for functions with the OptSize attribute.
3484
3485 /// Tells the code generator that select is more expensive than a branch if
3486 /// the branch is usually predicted right.
3488
3489 /// \see enableExtLdPromotion.
3491
3492 /// Return true if the value types that can be represented by the specified
3493 /// register class are all legal.
3494 bool isLegalRC(const TargetRegisterInfo &TRI,
3495 const TargetRegisterClass &RC) const;
3496
3497 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
3498 /// sequence of memory operands that is recognized by PrologEpilogInserter.
3500 MachineBasicBlock *MBB) const;
3501
3503};
3504
3505/// This class defines information used to lower LLVM code to legal SelectionDAG
3506/// operators that the target instruction selector can accept natively.
3507///
3508/// This class also defines callbacks that targets must implement to lower
3509/// target-specific constructs to SelectionDAG operators.
3511public:
3512 struct DAGCombinerInfo;
3513 struct MakeLibCallOptions;
3514
3517
3518 explicit TargetLowering(const TargetMachine &TM);
3519
3520 bool isPositionIndependent() const;
3521
3524 LegacyDivergenceAnalysis *DA) const {
3525 return false;
3526 }
3527
3528 // Lets target to control the following reassociation of operands: (op (op x,
3529 // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By
3530 // default consider profitable any case where N0 has single use. This
3531 // behavior reflects the condition replaced by this target hook call in the
3532 // DAGCombiner. Any particular target can implement its own heuristic to
3533 // restrict common combiner.
3535 SDValue N1) const {
3536 return N0.hasOneUse();
3537 }
3538
3539 virtual bool isSDNodeAlwaysUniform(const SDNode * N) const {
3540 return false;
3541 }
3542
3543 /// Returns true by value, base pointer and offset pointer and addressing mode
3544 /// by reference if the node's address can be legally represented as
3545 /// pre-indexed load / store address.
3546 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
3547 SDValue &/*Offset*/,
3548 ISD::MemIndexedMode &/*AM*/,
3549 SelectionDAG &/*DAG*/) const {
3550 return false;
3551 }
3552
3553 /// Returns true by value, base pointer and offset pointer and addressing mode
3554 /// by reference if this node can be combined with a load / store to form a
3555 /// post-indexed load / store.
3556 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
3557 SDValue &/*Base*/,
3558 SDValue &/*Offset*/,
3559 ISD::MemIndexedMode &/*AM*/,
3560 SelectionDAG &/*DAG*/) const {
3561 return false;
3562 }
3563
3564 /// Returns true if the specified base+offset is a legal indexed addressing
3565 /// mode for this target. \p MI is the load or store instruction that is being
3566 /// considered for transformation.
3568 bool IsPre, MachineRegisterInfo &MRI) const {
3569 return false;
3570 }
3571
3572 /// Return the entry encoding for a jump table in the current function. The
3573 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
3574 virtual unsigned getJumpTableEncoding() const;
3575
3576 virtual const MCExpr *
3578 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
3579 MCContext &/*Ctx*/) const {
3580 llvm_unreachable("Need to implement this hook if target has custom JTIs");
3581 }
3582
3583 /// Returns relocation base for the given PIC jumptable.
3585 SelectionDAG &DAG) const;
3586
3587 /// This returns the relocation base for the given PIC jumptable, the same as
3588 /// getPICJumpTableRelocBase, but as an MCExpr.
3589 virtual const MCExpr *
3591 unsigned JTI, MCContext &Ctx) const;
3592
3593 /// Return true if folding a constant offset with the given GlobalAddress is
3594 /// legal. It is frequently not legal in PIC relocation models.
3595 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
3596
3597 /// Return true if the operand with index OpNo corresponding to a target
3598 /// branch, for example, in following case
3599 ///
3600 /// call void asm "lea r8, $0\0A\09call qword ptr ${1:P}\0A\09ret",
3601 /// "*m,*m,~{r8},~{dirflag},~{fpsr},~{flags}"
3602 /// ([9 x i32]* @Arr), void (...)* @sincos_asm)
3603 ///
3604 /// the operand $1 (sincos_asm) is target branch in inline asm, but the
3605 /// operand $0 (Arr) is not.
3606 virtual bool
3608 unsigned OpNo) const {
3609 return false;
3610 }
3611
3613 SDValue &Chain) const;
3614
3615 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
3616 SDValue &NewRHS, ISD::CondCode &CCCode,
3617 const SDLoc &DL, const SDValue OldLHS,
3618 const SDValue OldRHS) const;
3619
3620 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
3621 SDValue &NewRHS, ISD::CondCode &CCCode,
3622 const SDLoc &DL, const SDValue OldLHS,
3623 const SDValue OldRHS, SDValue &Chain,
3624 bool IsSignaling = false) const;
3625
3626 /// Returns a pair of (return value, chain).
3627 /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
3628 std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
3629 EVT RetVT, ArrayRef<SDValue> Ops,
3630 MakeLibCallOptions CallOptions,
3631 const SDLoc &dl,
3632 SDValue Chain = SDValue()) const;
3633
3634 /// Check whether parameters to a call that are passed in callee saved
3635 /// registers are the same as from the calling function. This needs to be
3636 /// checked for tail call eligibility.
3638 const uint32_t *CallerPreservedMask,
3639 const SmallVectorImpl<CCValAssign> &ArgLocs,
3640 const SmallVectorImpl<SDValue> &OutVals) const;
3641
3642 //===--------------------------------------------------------------------===//
3643 // TargetLowering Optimization Methods
3644 //
3645
3646 /// A convenience struct that encapsulates a DAG, and two SDValues for
3647 /// returning information from TargetLowering to its clients that want to
3648 /// combine.
3655
3657 bool LT, bool LO) :
3658 DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
3659
3660 bool LegalTypes() const { return LegalTys; }
3661 bool LegalOperations() const { return LegalOps; }
3662
3664 Old = O;
3665 New = N;
3666 return true;
3667 }
3668 };
3669
3670 /// Determines the optimal series of memory ops to replace the memset / memcpy.
3671 /// Return true if the number of memory ops is below the threshold (Limit).
3672 /// Note that this is always the case when Limit is ~0.
3673 /// It returns the types of the sequence of memory ops to perform
3674 /// memset / memcpy by reference.
3675 virtual bool
3676 findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit,
3677 const MemOp &Op, unsigned DstAS, unsigned SrcAS,
3678 const AttributeList &FuncAttributes) const;
3679
3680 /// Check to see if the specified operand of the specified instruction is a
3681 /// constant integer. If so, check to see if there are any bits set in the
3682 /// constant that are not demanded. If so, shrink the constant and return
3683 /// true.
3685 const APInt &DemandedElts,
3686 TargetLoweringOpt &TLO) const;
3687
3688 /// Helper wrapper around ShrinkDemandedConstant, demanding all elements.
3690 TargetLoweringOpt &TLO) const;
3691
3692 // Target hook to do target-specific const optimization, which is called by
3693 // ShrinkDemandedConstant. This function should return true if the target
3694 // doesn't want ShrinkDemandedConstant to further optimize the constant.
3696 const APInt &DemandedBits,
3697 const APInt &DemandedElts,
3698 TargetLoweringOpt &TLO) const {
3699 return false;
3700 }
3701
3702 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This
3703 /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
3704 /// generalized for targets with other types of implicit widening casts.
3705 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
3706 TargetLoweringOpt &TLO) const;
3707
3708 /// Look at Op. At this point, we know that only the DemandedBits bits of the
3709 /// result of Op are ever used downstream. If we can use this information to
3710 /// simplify Op, create a new simplified DAG node and return true, returning
3711 /// the original and new nodes in Old and New. Otherwise, analyze the
3712 /// expression and return a mask of KnownOne and KnownZero bits for the
3713 /// expression (used to simplify the caller). The KnownZero/One bits may only
3714 /// be accurate for those bits in the Demanded masks.
3715 /// \p AssumeSingleUse When this parameter is true, this function will
3716 /// attempt to simplify \p Op even if there are multiple uses.
3717 /// Callers are responsible for correctly updating the DAG based on the
3718 /// results of this function, because simply replacing replacing TLO.Old
3719 /// with TLO.New will be incorrect when this parameter is true and TLO.Old
3720 /// has multiple uses.
3722 const APInt &DemandedElts, KnownBits &Known,
3723 TargetLoweringOpt &TLO, unsigned Depth = 0,
3724 bool AssumeSingleUse = false) const;
3725
3726 /// Helper wrapper around SimplifyDemandedBits, demanding all elements.
3727 /// Adds Op back to the worklist upon success.
3729 KnownBits &Known, TargetLoweringOpt &TLO,
3730 unsigned Depth = 0,
3731 bool AssumeSingleUse = false) const;
3732
3733 /// Helper wrapper around SimplifyDemandedBits.
3734 /// Adds Op back to the worklist upon success.
3736 DAGCombinerInfo &DCI) const;
3737
3738 /// Helper wrapper around SimplifyDemandedBits.
3739 /// Adds Op back to the worklist upon success.
3741 const APInt &DemandedElts,
3742 DAGCombinerInfo &DCI) const;
3743
3744 /// More limited version of SimplifyDemandedBits that can be used to "look
3745 /// through" ops that don't contribute to the DemandedBits/DemandedElts -
3746 /// bitwise ops etc.
3748 const APInt &DemandedElts,
3749 SelectionDAG &DAG,
3750 unsigned Depth = 0) const;
3751
3752 /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
3753 /// elements.
3755 SelectionDAG &DAG,
3756 unsigned Depth = 0) const;
3757
3758 /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
3759 /// bits from only some vector elements.
3761 const APInt &DemandedElts,
3762 SelectionDAG &DAG,
3763 unsigned Depth = 0) const;
3764
3765 /// Look at Vector Op. At this point, we know that only the DemandedElts
3766 /// elements of the result of Op are ever used downstream. If we can use
3767 /// this information to simplify Op, create a new simplified DAG node and
3768 /// return true, storing the original and new nodes in TLO.
3769 /// Otherwise, analyze the expression and return a mask of KnownUndef and
3770 /// KnownZero elements for the expression (used to simplify the caller).
3771 /// The KnownUndef/Zero elements may only be accurate for those bits
3772 /// in the DemandedMask.
3773 /// \p AssumeSingleUse When this parameter is true, this function will
3774 /// attempt to simplify \p Op even if there are multiple uses.
3775 /// Callers are responsible for correctly updating the DAG based on the
3776 /// results of this function, because simply replacing replacing TLO.Old
3777 /// with TLO.New will be incorrect when this parameter is true and TLO.Old
3778 /// has multiple uses.
3779 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask,
3780 APInt &KnownUndef, APInt &KnownZero,
3781 TargetLoweringOpt &TLO, unsigned Depth = 0,
3782 bool AssumeSingleUse = false) const;
3783
3784 /// Helper wrapper around SimplifyDemandedVectorElts.
3785 /// Adds Op back to the worklist upon success.
3786 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
3787 DAGCombinerInfo &DCI) const;
3788
3789 /// Return true if the target supports simplifying demanded vector elements by
3790 /// converting them to undefs.
3791 virtual bool
3793 const TargetLoweringOpt &TLO) const {
3794 return true;
3795 }
3796
3797 /// Determine which of the bits specified in Mask are known to be either zero
3798 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
3799 /// argument allows us to only collect the known bits that are shared by the
3800 /// requested vector elements.
3801 virtual void computeKnownBitsForTargetNode(const SDValue Op,
3802 KnownBits &Known,
3803 const APInt &DemandedElts,
3804 const SelectionDAG &DAG,
3805 unsigned Depth = 0) const;
3806
3807 /// Determine which of the bits specified in Mask are known to be either zero
3808 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
3809 /// argument allows us to only collect the known bits that are shared by the
3810 /// requested vector elements. This is for GISel.
3812 Register R, KnownBits &Known,
3813 const APInt &DemandedElts,
3814 const MachineRegisterInfo &MRI,
3815 unsigned Depth = 0) const;
3816
3817 /// Determine the known alignment for the pointer value \p R. This is can
3818 /// typically be inferred from the number of low known 0 bits. However, for a
3819 /// pointer with a non-integral address space, the alignment value may be
3820 /// independent from the known low bits.
3822 Register R,
3823 const MachineRegisterInfo &MRI,
3824 unsigned Depth = 0) const;
3825
3826 /// Determine which of the bits of FrameIndex \p FIOp are known to be 0.
3827 /// Default implementation computes low bits based on alignment
3828 /// information. This should preserve known bits passed into it.
3829 virtual void computeKnownBitsForFrameIndex(int FIOp,
3830 KnownBits &Known,
3831 const MachineFunction &MF) const;
3832
3833 /// This method can be implemented by targets that want to expose additional
3834 /// information about sign bits to the DAG Combiner. The DemandedElts
3835 /// argument allows us to only collect the minimum sign bits that are shared
3836 /// by the requested vector elements.
3837 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
3838 const APInt &DemandedElts,
3839 const SelectionDAG &DAG,
3840 unsigned Depth = 0) const;
3841
3842 /// This method can be implemented by targets that want to expose additional
3843 /// information about sign bits to GlobalISel combiners. The DemandedElts
3844 /// argument allows us to only collect the minimum sign bits that are shared
3845 /// by the requested vector elements.
3847 Register R,
3848 const APInt &DemandedElts,
3849 const MachineRegisterInfo &MRI,
3850 unsigned Depth = 0) const;
3851
3852 /// Attempt to simplify any target nodes based on the demanded vector
3853 /// elements, returning true on success. Otherwise, analyze the expression and
3854 /// return a mask of KnownUndef and KnownZero elements for the expression
3855 /// (used to simplify the caller). The KnownUndef/Zero elements may only be
3856 /// accurate for those bits in the DemandedMask.
3858 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef,
3859 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const;
3860
3861 /// Attempt to simplify any target nodes based on the demanded bits/elts,
3862 /// returning true on success. Otherwise, analyze the
3863 /// expression and return a mask of KnownOne and KnownZero bits for the
3864 /// expression (used to simplify the caller). The KnownZero/One bits may only
3865 /// be accurate for those bits in the Demanded masks.
3867 const APInt &DemandedBits,
3868 const APInt &DemandedElts,
3869 KnownBits &Known,
3870 TargetLoweringOpt &TLO,
3871 unsigned Depth = 0) const;
3872
3873 /// More limited version of SimplifyDemandedBits that can be used to "look
3874 /// through" ops that don't contribute to the DemandedBits/DemandedElts -
3875 /// bitwise ops etc.
3877 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
3878 SelectionDAG &DAG, unsigned Depth) const;
3879
3880 /// Return true if this function can prove that \p Op is never poison
3881 /// and, if \p PoisonOnly is false, does not have undef bits. The DemandedElts
3882 /// argument limits the check to the requested vector elements.
3884 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
3885 bool PoisonOnly, unsigned Depth) const;
3886
3887 /// Return true if Op can create undef or poison from non-undef & non-poison
3888 /// operands. The DemandedElts argument limits the check to the requested
3889 /// vector elements.
3890 virtual bool
3891 canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts,
3892 const SelectionDAG &DAG, bool PoisonOnly,
3893 bool ConsiderFlags, unsigned Depth) const;
3894
3895 /// Tries to build a legal vector shuffle using the provided parameters
3896 /// or equivalent variations. The Mask argument maybe be modified as the
3897 /// function tries different variations.
3898 /// Returns an empty SDValue if the operation fails.
3901 SelectionDAG &DAG) const;
3902
3903 /// This method returns the constant pool value that will be loaded by LD.
3904 /// NOTE: You must check for implicit extensions of the constant by LD.
3905 virtual const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const;
3906
3907 /// If \p SNaN is false, \returns true if \p Op is known to never be any
3908 /// NaN. If \p sNaN is true, returns if \p Op is known to never be a signaling
3909 /// NaN.
3910 virtual bool isKnownNeverNaNForTargetNode(SDValue Op,
3911 const SelectionDAG &DAG,
3912 bool SNaN = false,
3913 unsigned Depth = 0) const;
3914
3915 /// Return true if vector \p Op has the same value across all \p DemandedElts,
3916 /// indicating any elements which may be undef in the output \p UndefElts.
3917 virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts,
3918 APInt &UndefElts,
3919 const SelectionDAG &DAG,
3920 unsigned Depth = 0) const;
3921
3922 /// Returns true if the given Opc is considered a canonical constant for the
3923 /// target, which should not be transformed back into a BUILD_VECTOR.
3925 return Op.getOpcode() == ISD::SPLAT_VECTOR;
3926 }
3927
3929 void *DC; // The DAG Combiner object.
3932
3933 public:
3935
3936 DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
3937 : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
3938
3939 bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
3941 bool isAfterLegalizeDAG() const { return Level >= AfterLegalizeDAG; }
3944
3945 void AddToWorklist(SDNode *N);
3946 SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
3947 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
3948 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
3949
3951
3953 };
3954
3955 /// Return if the N is a constant or constant vector equal to the true value
3956 /// from getBooleanContents().
3957 bool isConstTrueVal(SDValue N) const;
3958
3959 /// Return if the N is a constant or constant vector equal to the false value
3960 /// from getBooleanContents().
3961 bool isConstFalseVal(SDValue N) const;
3962
3963 /// Return if \p N is a True value when extended to \p VT.
3964 bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const;
3965
3966 /// Try to simplify a setcc built with the specified operands and cc. If it is
3967 /// unable to simplify it, return a null SDValue.
3969 bool foldBooleans, DAGCombinerInfo &DCI,
3970 const SDLoc &dl) const;
3971
3972 // For targets which wrap address, unwrap for analysis.
3973 virtual SDValue unwrapAddress(SDValue N) const { return N; }
3974
3975 /// Returns true (and the GlobalValue and the offset) if the node is a
3976 /// GlobalAddress + offset.
3977 virtual bool
3978 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
3979
3980 /// This method will be invoked for all target nodes and for any
3981 /// target-independent nodes that the target has registered with invoke it
3982 /// for.
3983 ///
3984 /// The semantics are as follows:
3985 /// Return Value:
3986 /// SDValue.Val == 0 - No change was made
3987 /// SDValue.Val == N - N was replaced, is dead, and is already handled.
3988 /// otherwise - N should be replaced by the returned Operand.
3989 ///
3990 /// In addition, methods provided by DAGCombinerInfo may be used to perform
3991 /// more complex transformations.
3992 ///
3993 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
3994
3995 /// Return true if it is profitable to move this shift by a constant amount
3996 /// through its operand, adjusting any immediate operands as necessary to
3997 /// preserve semantics. This transformation may not be desirable if it
3998 /// disrupts a particularly auspicious target-specific tree (e.g. bitfield
3999 /// extraction in AArch64). By default, it returns true.
4000 ///
4001 /// @param N the shift node
4002 /// @param Level the current DAGCombine legalization level.
4004 CombineLevel Level) const {
4005 return true;
4006 }
4007
4008 /// Return true if it is profitable to combine an XOR of a logical shift
4009 /// to create a logical shift of NOT. This transformation may not be desirable
4010 /// if it disrupts a particularly auspicious target-specific tree (e.g.
4011 /// BIC on ARM/AArch64). By default, it returns true.
4012 virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const {
4013 return true;
4014 }
4015
4016 /// Return true if the target has native support for the specified value type
4017 /// and it is 'desirable' to use the type for the given node type. e.g. On x86
4018 /// i16 is legal, but undesirable since i16 instruction encodings are longer
4019 /// and some i16 instructions are slow.
4020 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
4021 // By default, assume all legal types are desirable.
4022 return isTypeLegal(VT);
4023 }
4024
4025 /// Return true if it is profitable for dag combiner to transform a floating
4026 /// point op of specified opcode to a equivalent op of an integer
4027 /// type. e.g. f32 load -> i32 load can be profitable on ARM.
4028 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
4029 EVT /*VT*/) const {
4030 return false;
4031 }
4032
4033 /// This method query the target whether it is beneficial for dag combiner to
4034 /// promote the specified node. If true, it should return the desired
4035 /// promotion type by reference.
4036 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
4037 return false;
4038 }
4039
4040 /// Return true if the target supports swifterror attribute. It optimizes
4041 /// loads and stores to reading and writing a specific register.
4042 virtual bool supportSwiftError() const {
4043 return false;
4044 }
4045
4046 /// Return true if the target supports that a subset of CSRs for the given
4047 /// machine function is handled explicitly via copies.
4048 virtual bool supportSplitCSR(MachineFunction *MF) const {
4049 return false;
4050 }
4051
4052 /// Return true if the target supports kcfi operand bundles.
4053 virtual bool supportKCFIBundles() const { return false; }
4054
4055 /// Perform necessary initialization to handle a subset of CSRs explicitly
4056 /// via copies. This function is called at the beginning of instruction
4057 /// selection.
4058 virtual void initializeSplitCSR(MachineBasicBlock *Entry) const {
4059 llvm_unreachable("Not Implemented");
4060 }
4061
4062 /// Insert explicit copies in entry and exit blocks. We copy a subset of
4063 /// CSRs to virtual registers in the entry block, and copy them back to
4064 /// physical registers in the exit blocks. This function is called at the end
4065 /// of instruction selection.
4067 MachineBasicBlock *Entry,
4068 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
4069 llvm_unreachable("Not Implemented");
4070 }
4071
4072 /// Return the newly negated expression if the cost is not expensive and
4073 /// set the cost in \p Cost to indicate that if it is cheaper or neutral to
4074 /// do the negation.
4076 bool LegalOps, bool OptForSize,
4077 NegatibleCost &Cost,
4078 unsigned Depth = 0) const;
4079
4081 SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize,
4083 unsigned Depth = 0) const {
4085 SDValue Neg =
4086 getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth);
4087 if (!Neg)
4088 return SDValue();
4089
4090 if (Cost <= CostThreshold)
4091 return Neg;
4092
4093 // Remove the new created node to avoid the side effect to the DAG.
4094 if (Neg->use_empty())
4095 DAG.RemoveDeadNode(Neg.getNode());
4096 return SDValue();
4097 }
4098
4099 /// This is the helper function to return the newly negated expression only
4100 /// when the cost is cheaper.
4102 bool LegalOps, bool OptForSize,
4103 unsigned Depth = 0) const {
4104 return getCheaperOrNeutralNegatedExpression(Op, DAG, LegalOps, OptForSize,
4106 }
4107
4108 /// This is the helper function to return the newly negated expression if
4109 /// the cost is not expensive.
4111 bool OptForSize, unsigned Depth = 0) const {
4113 return getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth);
4114 }
4115
4116 //===--------------------------------------------------------------------===//
4117 // Lowering methods - These methods must be implemented by targets so that
4118 // the SelectionDAGBuilder code knows how to lower these.
4119 //
4120
4121 /// Target-specific splitting of values into parts that fit a register
4122 /// storing a legal type
4124 SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
4125 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
4126 return false;
4127 }
4128
4129 /// Allows the target to handle physreg-carried dependency
4130 /// in target-specific way. Used from the ScheduleDAGSDNodes to decide whether
4131 /// to add the edge to the dependency graph.
4132 /// Def - input: Selection DAG node defininfg physical register
4133 /// User - input: Selection DAG node using physical register
4134 /// Op - input: Number of User operand
4135 /// PhysReg - inout: set to the physical register if the edge is
4136 /// necessary, unchanged otherwise
4137 /// Cost - inout: physical register copy cost.
4138 /// Returns 'true' is the edge is necessary, 'false' otherwise
4139 virtual bool checkForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
4140 const TargetRegisterInfo *TRI,
4141 const TargetInstrInfo *TII,
4142 unsigned &PhysReg, int &Cost) const {
4143 return false;
4144 }
4145
4146 /// Target-specific combining of register parts into its original value
4147 virtual SDValue
4149 const SDValue *Parts, unsigned NumParts,
4150 MVT PartVT, EVT ValueVT,
4151 std::optional<CallingConv::ID> CC) const {
4152 return SDValue();
4153 }
4154
4155 /// This hook must be implemented to lower the incoming (formal) arguments,
4156 /// described by the Ins array, into the specified DAG. The implementation
4157 /// should fill in the InVals array with legal-type argument values, and
4158 /// return the resulting token chain value.
4160 SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
4161 const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/,
4162 SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const {
4163 llvm_unreachable("Not Implemented");
4164 }
4165
4166 /// This structure contains all information that is necessary for lowering
4167 /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
4168 /// needs to lower a call, and targets will see this struct in their LowerCall
4169 /// implementation.
4172 Type *RetTy = nullptr;
4173 bool RetSExt : 1;
4174 bool RetZExt : 1;
4175 bool IsVarArg : 1;
4176 bool IsInReg : 1;
4182 bool NoMerge : 1;
4183
4184 // IsTailCall should be modified by implementations of
4185 // TargetLowering::LowerCall that perform tail call conversions.
4186 bool IsTailCall = false;
4187
4188 // Is Call lowering done post SelectionDAG type legalization.
4190
4191 unsigned NumFixedArgs = -1;
4197 const CallBase *CB = nullptr;
4202 const ConstantInt *CFIType = nullptr;
4203
4208 DAG(DAG) {}
4209
4211 DL = dl;
4212 return *this;
4213 }
4214
4216 Chain = InChain;
4217 return *this;
4218 }
4219
4220 // setCallee with target/module-specific attributes
4222 SDValue Target, ArgListTy &&ArgsList) {
4223 RetTy = ResultType;
4224 Callee = Target;
4225 CallConv = CC;
4226 NumFixedArgs = ArgsList.size();
4227 Args = std::move(ArgsList);
4228
4230 &(DAG.getMachineFunction()), CC, Args);
4231 return *this;
4232 }
4233
4235 SDValue Target, ArgListTy &&ArgsList) {
4236 RetTy = ResultType;
4237 Callee = Target;
4238 CallConv = CC;
4239 NumFixedArgs = ArgsList.size();
4240 Args = std::move(ArgsList);
4241 return *this;
4242 }
4243
4245 SDValue Target, ArgListTy &&ArgsList,
4246 const CallBase &Call) {
4247 RetTy = ResultType;
4248
4249 IsInReg = Call.hasRetAttr(Attribute::InReg);
4251 Call.doesNotReturn() ||
4252 (!isa<InvokeInst>(Call) && isa<UnreachableInst>(Call.getNextNode()));
4253 IsVarArg = FTy->isVarArg();
4254 IsReturnValueUsed = !Call.use_empty();
4255 RetSExt = Call.hasRetAttr(Attribute::SExt);
4256 RetZExt = Call.hasRetAttr(Attribute::ZExt);
4257 NoMerge = Call.hasFnAttr(Attribute::NoMerge);
4258
4259 Callee = Target;
4260
4261 CallConv = Call.getCallingConv();
4262 NumFixedArgs = FTy->getNumParams();
4263 Args = std::move(ArgsList);
4264
4265 CB = &Call;
4266
4267 return *this;
4268 }
4269
4271 IsInReg = Value;
4272 return *this;
4273 }
4274
4277 return *this;
4278 }
4279
4281 IsVarArg = Value;
4282 return *this;
4283 }
4284
4286 IsTailCall = Value;
4287 return *this;
4288 }
4289
4292 return *this;
4293 }
4294
4297 return *this;
4298 }
4299
4301 RetSExt = Value;
4302 return *this;
4303 }
4304
4306 RetZExt = Value;
4307 return *this;
4308 }
4309
4312 return *this;
4313 }
4314
4317 return *this;
4318 }
4319
4322 return *this;
4323 }
4324
4326 CFIType = Type;
4327 return *this;
4328 }
4329
4331 return Args;
4332 }
4333 };
4334
4335 /// This structure is used to pass arguments to makeLibCall function.
4337 // By passing type list before soften to makeLibCall, the target hook
4338 // shouldExtendTypeInLibCall can get the original type before soften.
4341 bool IsSExt : 1;
4345 bool IsSoften : 1;
4346
4350
4352 IsSExt = Value;
4353 return *this;
4354 }
4355
4358 return *this;
4359 }
4360
4363 return *this;
4364 }
4365
4368 return *this;
4369 }
4370
4372 bool Value = true) {
4373 OpsVTBeforeSoften = OpsVT;
4374 RetVTBeforeSoften = RetVT;
4375 IsSoften = Value;
4376 return *this;
4377 }
4378 };
4379
4380 /// This function lowers an abstract call to a function into an actual call.
4381 /// This returns a pair of operands. The first element is the return value
4382 /// for the function (if RetTy is not VoidTy). The second element is the
4383 /// outgoing token chain. It calls LowerCall to do the actual lowering.
4384 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
4385