LLVM  14.0.0git
TargetLowering.h
Go to the documentation of this file.
1 //===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file describes how to lower LLVM code to machine code. This has two
11 /// main components:
12 ///
13 /// 1. Which ValueTypes are natively supported by the target.
14 /// 2. Which operations are supported for supported ValueTypes.
15 /// 3. Cost thresholds for alternative implementations of certain operations.
16 ///
17 /// In addition it has a few other components, like information about FP
18 /// immediates.
19 ///
20 //===----------------------------------------------------------------------===//
21 
22 #ifndef LLVM_CODEGEN_TARGETLOWERING_H
23 #define LLVM_CODEGEN_TARGETLOWERING_H
24 
25 #include "llvm/ADT/APInt.h"
26 #include "llvm/ADT/ArrayRef.h"
27 #include "llvm/ADT/DenseMap.h"
28 #include "llvm/ADT/STLExtras.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/StringRef.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/CallingConv.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/InlineAsm.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/Type.h"
47 #include "llvm/Support/Alignment.h"
49 #include "llvm/Support/Casting.h"
53 #include <algorithm>
54 #include <cassert>
55 #include <climits>
56 #include <cstdint>
57 #include <iterator>
58 #include <map>
59 #include <string>
60 #include <utility>
61 #include <vector>
62 
63 namespace llvm {
64 
65 class BranchProbability;
66 class CCState;
67 class CCValAssign;
68 class Constant;
69 class FastISel;
70 class FunctionLoweringInfo;
71 class GlobalValue;
72 class GISelKnownBits;
73 class IntrinsicInst;
74 class IRBuilderBase;
75 struct KnownBits;
76 class LegacyDivergenceAnalysis;
77 class LLVMContext;
78 class MachineBasicBlock;
79 class MachineFunction;
80 class MachineInstr;
81 class MachineJumpTableInfo;
82 class MachineLoop;
83 class MachineRegisterInfo;
84 class MCContext;
85 class MCExpr;
86 class Module;
87 class ProfileSummaryInfo;
88 class TargetLibraryInfo;
89 class TargetMachine;
90 class TargetRegisterClass;
91 class TargetRegisterInfo;
92 class TargetTransformInfo;
93 class Value;
94 
95 namespace Sched {
96 
97 enum Preference {
98  None, // No preference
99  Source, // Follow source order.
100  RegPressure, // Scheduling for lowest register pressure.
101  Hybrid, // Scheduling for both latency and register pressure.
102  ILP, // Scheduling for ILP in low register pressure mode.
103  VLIW, // Scheduling for VLIW targets.
104  Fast, // Fast suboptimal list scheduling
105  Linearize // Linearize DAG, no scheduling
106 };
107 
108 } // end namespace Sched
109 
110 // MemOp models a memory operation, either memset or memcpy/memmove.
111 struct MemOp {
112 private:
113  // Shared
114  uint64_t Size;
115  bool DstAlignCanChange; // true if destination alignment can satisfy any
116  // constraint.
117  Align DstAlign; // Specified alignment of the memory operation.
118 
119  bool AllowOverlap;
120  // memset only
121  bool IsMemset; // If setthis memory operation is a memset.
122  bool ZeroMemset; // If set clears out memory with zeros.
123  // memcpy only
124  bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register
125  // constant so it does not need to be loaded.
126  Align SrcAlign; // Inferred alignment of the source or default value if the
127  // memory operation does not need to load the value.
128 public:
129  static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
130  Align SrcAlign, bool IsVolatile,
131  bool MemcpyStrSrc = false) {
132  MemOp Op;
133  Op.Size = Size;
134  Op.DstAlignCanChange = DstAlignCanChange;
135  Op.DstAlign = DstAlign;
136  Op.AllowOverlap = !IsVolatile;
137  Op.IsMemset = false;
138  Op.ZeroMemset = false;
139  Op.MemcpyStrSrc = MemcpyStrSrc;
140  Op.SrcAlign = SrcAlign;
141  return Op;
142  }
143 
144  static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
145  bool IsZeroMemset, bool IsVolatile) {
146  MemOp Op;
147  Op.Size = Size;
148  Op.DstAlignCanChange = DstAlignCanChange;
149  Op.DstAlign = DstAlign;
150  Op.AllowOverlap = !IsVolatile;
151  Op.IsMemset = true;
152  Op.ZeroMemset = IsZeroMemset;
153  Op.MemcpyStrSrc = false;
154  return Op;
155  }
156 
157  uint64_t size() const { return Size; }
158  Align getDstAlign() const {
159  assert(!DstAlignCanChange);
160  return DstAlign;
161  }
162  bool isFixedDstAlign() const { return !DstAlignCanChange; }
163  bool allowOverlap() const { return AllowOverlap; }
164  bool isMemset() const { return IsMemset; }
165  bool isMemcpy() const { return !IsMemset; }
167  return isMemcpy() && !DstAlignCanChange;
168  }
169  bool isZeroMemset() const { return isMemset() && ZeroMemset; }
170  bool isMemcpyStrSrc() const {
171  assert(isMemcpy() && "Must be a memcpy");
172  return MemcpyStrSrc;
173  }
174  Align getSrcAlign() const {
175  assert(isMemcpy() && "Must be a memcpy");
176  return SrcAlign;
177  }
178  bool isSrcAligned(Align AlignCheck) const {
179  return isMemset() || llvm::isAligned(AlignCheck, SrcAlign.value());
180  }
181  bool isDstAligned(Align AlignCheck) const {
182  return DstAlignCanChange || llvm::isAligned(AlignCheck, DstAlign.value());
183  }
184  bool isAligned(Align AlignCheck) const {
185  return isSrcAligned(AlignCheck) && isDstAligned(AlignCheck);
186  }
187 };
188 
189 /// This base class for TargetLowering contains the SelectionDAG-independent
190 /// parts that can be used from the rest of CodeGen.
192 public:
193  /// This enum indicates whether operations are valid for a target, and if not,
194  /// what action should be used to make them valid.
195  enum LegalizeAction : uint8_t {
196  Legal, // The target natively supports this operation.
197  Promote, // This operation should be executed in a larger type.
198  Expand, // Try to expand this to other ops, otherwise use a libcall.
199  LibCall, // Don't try to expand this to other ops, always use a libcall.
200  Custom // Use the LowerOperation hook to implement custom lowering.
201  };
202 
203  /// This enum indicates whether a types are legal for a target, and if not,
204  /// what action should be used to make them valid.
205  enum LegalizeTypeAction : uint8_t {
206  TypeLegal, // The target natively supports this type.
207  TypePromoteInteger, // Replace this integer with a larger one.
208  TypeExpandInteger, // Split this integer into two of half the size.
209  TypeSoftenFloat, // Convert this float to a same size integer type.
210  TypeExpandFloat, // Split this float into two of half the size.
211  TypeScalarizeVector, // Replace this one-element vector with its element.
212  TypeSplitVector, // Split this vector into two of half the size.
213  TypeWidenVector, // This vector should be widened into a larger vector.
214  TypePromoteFloat, // Replace this float with a larger one.
215  TypeSoftPromoteHalf, // Soften half to i16 and use float to do arithmetic.
216  TypeScalarizeScalableVector, // This action is explicitly left unimplemented.
217  // While it is theoretically possible to
218  // legalize operations on scalable types with a
219  // loop that handles the vscale * #lanes of the
220  // vector, this is non-trivial at SelectionDAG
221  // level and these types are better to be
222  // widened or promoted.
223  };
224 
225  /// LegalizeKind holds the legalization kind that needs to happen to EVT
226  /// in order to type-legalize it.
227  using LegalizeKind = std::pair<LegalizeTypeAction, EVT>;
228 
229  /// Enum that describes how the target represents true/false values.
231  UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
232  ZeroOrOneBooleanContent, // All bits zero except for bit 0.
233  ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
234  };
235 
236  /// Enum that describes what type of support for selects the target has.
238  ScalarValSelect, // The target supports scalar selects (ex: cmov).
239  ScalarCondVectorVal, // The target supports selects with a scalar condition
240  // and vector values (ex: cmov).
241  VectorMaskSelect // The target supports vector selects with a vector
242  // mask (ex: x86 blends).
243  };
244 
245  /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
246  /// to, if at all. Exists because different targets have different levels of
247  /// support for these atomic instructions, and also have different options
248  /// w.r.t. what they should expand to.
249  enum class AtomicExpansionKind {
250  None, // Don't expand the instruction.
251  LLSC, // Expand the instruction into loadlinked/storeconditional; used
252  // by ARM/AArch64.
253  LLOnly, // Expand the (load) instruction into just a load-linked, which has
254  // greater atomic guarantees than a normal load.
255  CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
256  MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
257  };
258 
259  /// Enum that specifies when a multiplication should be expanded.
260  enum class MulExpansionKind {
261  Always, // Always expand the instruction.
262  OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
263  // or custom.
264  };
265 
266  /// Enum that specifies when a float negation is beneficial.
267  enum class NegatibleCost {
268  Cheaper = 0, // Negated expression is cheaper.
269  Neutral = 1, // Negated expression has the same cost.
270  Expensive = 2 // Negated expression is more expensive.
271  };
272 
273  class ArgListEntry {
274  public:
275  Value *Val = nullptr;
277  Type *Ty = nullptr;
278  bool IsSExt : 1;
279  bool IsZExt : 1;
280  bool IsInReg : 1;
281  bool IsSRet : 1;
282  bool IsNest : 1;
283  bool IsByVal : 1;
284  bool IsByRef : 1;
285  bool IsInAlloca : 1;
286  bool IsPreallocated : 1;
287  bool IsReturned : 1;
288  bool IsSwiftSelf : 1;
289  bool IsSwiftAsync : 1;
290  bool IsSwiftError : 1;
291  bool IsCFGuardTarget : 1;
293  Type *IndirectType = nullptr;
294 
300 
301  void setAttributes(const CallBase *Call, unsigned ArgIdx);
302  };
303  using ArgListTy = std::vector<ArgListEntry>;
304 
305  virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC,
306  ArgListTy &Args) const {};
307 
309  switch (Content) {
311  // Extend by adding rubbish bits.
312  return ISD::ANY_EXTEND;
314  // Extend by adding zero bits.
315  return ISD::ZERO_EXTEND;
317  // Extend by copying the sign bit.
318  return ISD::SIGN_EXTEND;
319  }
320  llvm_unreachable("Invalid content kind");
321  }
322 
323  explicit TargetLoweringBase(const TargetMachine &TM);
324  TargetLoweringBase(const TargetLoweringBase &) = delete;
326  virtual ~TargetLoweringBase() = default;
327 
328  /// Return true if the target support strict float operation
329  bool isStrictFPEnabled() const {
330  return IsStrictFPEnabled;
331  }
332 
333 protected:
334  /// Initialize all of the actions to default values.
335  void initActions();
336 
337 public:
338  const TargetMachine &getTargetMachine() const { return TM; }
339 
340  virtual bool useSoftFloat() const { return false; }
341 
342  /// Return the pointer type for the given address space, defaults to
343  /// the pointer type from the data layout.
344  /// FIXME: The default needs to be removed once all the code is updated.
345  virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
346  return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
347  }
348 
349  /// Return the in-memory pointer type for the given address space, defaults to
350  /// the pointer type from the data layout. FIXME: The default needs to be
351  /// removed once all the code is updated.
352  virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const {
353  return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
354  }
355 
356  /// Return the type for frame index, which is determined by
357  /// the alloca address space specified through the data layout.
359  return getPointerTy(DL, DL.getAllocaAddrSpace());
360  }
361 
362  /// Return the type for code pointers, which is determined by the program
363  /// address space specified through the data layout.
365  return getPointerTy(DL, DL.getProgramAddressSpace());
366  }
367 
368  /// Return the type for operands of fence.
369  /// TODO: Let fence operands be of i32 type and remove this.
370  virtual MVT getFenceOperandTy(const DataLayout &DL) const {
371  return getPointerTy(DL);
372  }
373 
374  /// EVT is not used in-tree, but is used by out-of-tree target.
375  /// A documentation for this function would be nice...
376  virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
377 
378  EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
379  bool LegalTypes = true) const;
380 
381  /// Return the preferred type to use for a shift opcode, given the shifted
382  /// amount type is \p ShiftValueTy.
384  virtual LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const {
385  return ShiftValueTy;
386  }
387 
388  /// Returns the type to be used for the index operand of:
389  /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
390  /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
391  virtual MVT getVectorIdxTy(const DataLayout &DL) const {
392  return getPointerTy(DL);
393  }
394 
395  /// Returns the type to be used for the EVL/AVL operand of VP nodes:
396  /// ISD::VP_ADD, ISD::VP_SUB, etc. It must be a legal scalar integer type,
397  /// and must be at least as large as i32. The EVL is implicitly zero-extended
398  /// to any larger type.
399  virtual MVT getVPExplicitVectorLengthTy() const { return MVT::i32; }
400 
401  /// This callback is used to inspect load/store instructions and add
402  /// target-specific MachineMemOperand flags to them. The default
403  /// implementation does nothing.
406  }
407 
409  const DataLayout &DL) const;
411  const DataLayout &DL) const;
413  const DataLayout &DL) const;
414 
415  virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
416  return true;
417  }
418 
419  /// Return true if it is profitable to convert a select of FP constants into
420  /// a constant pool load whose address depends on the select condition. The
421  /// parameter may be used to differentiate a select with FP compare from
422  /// integer compare.
423  virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
424  return true;
425  }
426 
427  /// Return true if multiple condition registers are available.
429  return HasMultipleConditionRegisters;
430  }
431 
432  /// Return true if the target has BitExtract instructions.
433  bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
434 
435  /// Return the preferred vector type legalization action.
438  // The default action for one element vectors is to scalarize
439  if (VT.getVectorElementCount().isScalar())
440  return TypeScalarizeVector;
441  // The default action for an odd-width vector is to widen.
442  if (!VT.isPow2VectorType())
443  return TypeWidenVector;
444  // The default action for other vectors is to promote
445  return TypePromoteInteger;
446  }
447 
448  // Return true if the half type should be passed around as i16, but promoted
449  // to float around arithmetic. The default behavior is to pass around as
450  // float and convert around loads/stores/bitcasts and other places where
451  // the size matters.
452  virtual bool softPromoteHalfType() const { return false; }
453 
454  // There are two general methods for expanding a BUILD_VECTOR node:
455  // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
456  // them together.
457  // 2. Build the vector on the stack and then load it.
458  // If this function returns true, then method (1) will be used, subject to
459  // the constraint that all of the necessary shuffles are legal (as determined
460  // by isShuffleMaskLegal). If this function returns false, then method (2) is
461  // always used. The vector type, and the number of defined values, are
462  // provided.
463  virtual bool
465  unsigned DefinedValues) const {
466  return DefinedValues < 3;
467  }
468 
469  /// Return true if integer divide is usually cheaper than a sequence of
470  /// several shifts, adds, and multiplies for this target.
471  /// The definition of "cheaper" may depend on whether we're optimizing
472  /// for speed or for size.
473  virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; }
474 
475  /// Return true if the target can handle a standalone remainder operation.
476  virtual bool hasStandaloneRem(EVT VT) const {
477  return true;
478  }
479 
480  /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
481  virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
482  // Default behavior is to replace SQRT(X) with X*RSQRT(X).
483  return false;
484  }
485 
486  /// Reciprocal estimate status values used by the functions below.
487  enum ReciprocalEstimate : int {
489  Disabled = 0,
491  };
492 
493  /// Return a ReciprocalEstimate enum value for a square root of the given type
494  /// based on the function's attributes. If the operation is not overridden by
495  /// the function's attributes, "Unspecified" is returned and target defaults
496  /// are expected to be used for instruction selection.
498 
499  /// Return a ReciprocalEstimate enum value for a division of the given type
500  /// based on the function's attributes. If the operation is not overridden by
501  /// the function's attributes, "Unspecified" is returned and target defaults
502  /// are expected to be used for instruction selection.
504 
505  /// Return the refinement step count for a square root of the given type based
506  /// on the function's attributes. If the operation is not overridden by
507  /// the function's attributes, "Unspecified" is returned and target defaults
508  /// are expected to be used for instruction selection.
509  int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
510 
511  /// Return the refinement step count for a division of the given type based
512  /// on the function's attributes. If the operation is not overridden by
513  /// the function's attributes, "Unspecified" is returned and target defaults
514  /// are expected to be used for instruction selection.
515  int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
516 
517  /// Returns true if target has indicated at least one type should be bypassed.
518  bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
519 
520  /// Returns map of slow types for division or remainder with corresponding
521  /// fast types
523  return BypassSlowDivWidths;
524  }
525 
526  /// Return true if Flow Control is an expensive operation that should be
527  /// avoided.
528  bool isJumpExpensive() const { return JumpIsExpensive; }
529 
530  /// Return true if selects are only cheaper than branches if the branch is
531  /// unlikely to be predicted right.
534  }
535 
536  virtual bool fallBackToDAGISel(const Instruction &Inst) const {
537  return false;
538  }
539 
540  /// Return true if the following transform is beneficial:
541  /// fold (conv (load x)) -> (load (conv*)x)
542  /// On architectures that don't natively support some vector loads
543  /// efficiently, casting the load to a smaller vector of larger types and
544  /// loading is more efficient, however, this can be undone by optimizations in
545  /// dag combiner.
546  virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
547  const SelectionDAG &DAG,
548  const MachineMemOperand &MMO) const {
549  // Don't do if we could do an indexed load on the original type, but not on
550  // the new one.
551  if (!LoadVT.isSimple() || !BitcastVT.isSimple())
552  return true;
553 
554  MVT LoadMVT = LoadVT.getSimpleVT();
555 
556  // Don't bother doing this if it's just going to be promoted again later, as
557  // doing so might interfere with other combines.
558  if (getOperationAction(ISD::LOAD, LoadMVT) == Promote &&
559  getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT())
560  return false;
561 
562  bool Fast = false;
563  return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), BitcastVT,
564  MMO, &Fast) && Fast;
565  }
566 
567  /// Return true if the following transform is beneficial:
568  /// (store (y (conv x)), y*)) -> (store x, (x*))
569  virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT,
570  const SelectionDAG &DAG,
571  const MachineMemOperand &MMO) const {
572  // Default to the same logic as loads.
573  return isLoadBitCastBeneficial(StoreVT, BitcastVT, DAG, MMO);
574  }
575 
576  /// Return true if it is expected to be cheaper to do a store of a non-zero
577  /// vector constant with the given size and type for the address space than to
578  /// store the individual scalar element constants.
579  virtual bool storeOfVectorConstantIsCheap(EVT MemVT,
580  unsigned NumElem,
581  unsigned AddrSpace) const {
582  return false;
583  }
584 
585  /// Allow store merging for the specified type after legalization in addition
586  /// to before legalization. This may transform stores that do not exist
587  /// earlier (for example, stores created from intrinsics).
588  virtual bool mergeStoresAfterLegalization(EVT MemVT) const {
589  return true;
590  }
591 
592  /// Returns if it's reasonable to merge stores to MemVT size.
593  virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
594  const SelectionDAG &DAG) const {
595  return true;
596  }
597 
598  /// Return true if it is cheap to speculate a call to intrinsic cttz.
599  virtual bool isCheapToSpeculateCttz() const {
600  return false;
601  }
602 
603  /// Return true if it is cheap to speculate a call to intrinsic ctlz.
604  virtual bool isCheapToSpeculateCtlz() const {
605  return false;
606  }
607 
608  /// Return true if ctlz instruction is fast.
609  virtual bool isCtlzFast() const {
610  return false;
611  }
612 
613  /// Return the maximum number of "x & (x - 1)" operations that can be done
614  /// instead of deferring to a custom CTPOP.
615  virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const {
616  return 1;
617  }
618 
619  /// Return true if instruction generated for equality comparison is folded
620  /// with instruction generated for signed comparison.
621  virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; }
622 
623  /// Return true if the heuristic to prefer icmp eq zero should be used in code
624  /// gen prepare.
625  virtual bool preferZeroCompareBranch() const { return false; }
626 
627  /// Return true if it is safe to transform an integer-domain bitwise operation
628  /// into the equivalent floating-point operation. This should be set to true
629  /// if the target has IEEE-754-compliant fabs/fneg operations for the input
630  /// type.
631  virtual bool hasBitPreservingFPLogic(EVT VT) const {
632  return false;
633  }
634 
635  /// Return true if it is cheaper to split the store of a merged int val
636  /// from a pair of smaller values into multiple stores.
637  virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
638  return false;
639  }
640 
641  /// Return if the target supports combining a
642  /// chain like:
643  /// \code
644  /// %andResult = and %val1, #mask
645  /// %icmpResult = icmp %andResult, 0
646  /// \endcode
647  /// into a single machine instruction of a form like:
648  /// \code
649  /// cc = test %register, #mask
650  /// \endcode
651  virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
652  return false;
653  }
654 
655  /// Use bitwise logic to make pairs of compares more efficient. For example:
656  /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
657  /// This should be true when it takes more than one instruction to lower
658  /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on
659  /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win.
660  virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const {
661  return false;
662  }
663 
664  /// Return the preferred operand type if the target has a quick way to compare
665  /// integer values of the given size. Assume that any legal integer type can
666  /// be compared efficiently. Targets may override this to allow illegal wide
667  /// types to return a vector type if there is support to compare that type.
668  virtual MVT hasFastEqualityCompare(unsigned NumBits) const {
669  MVT VT = MVT::getIntegerVT(NumBits);
671  }
672 
673  /// Return true if the target should transform:
674  /// (X & Y) == Y ---> (~X & Y) == 0
675  /// (X & Y) != Y ---> (~X & Y) != 0
676  ///
677  /// This may be profitable if the target has a bitwise and-not operation that
678  /// sets comparison flags. A target may want to limit the transformation based
679  /// on the type of Y or if Y is a constant.
680  ///
681  /// Note that the transform will not occur if Y is known to be a power-of-2
682  /// because a mask and compare of a single bit can be handled by inverting the
683  /// predicate, for example:
684  /// (X & 8) == 8 ---> (X & 8) != 0
685  virtual bool hasAndNotCompare(SDValue Y) const {
686  return false;
687  }
688 
689  /// Return true if the target has a bitwise and-not operation:
690  /// X = ~A & B
691  /// This can be used to simplify select or other instructions.
692  virtual bool hasAndNot(SDValue X) const {
693  // If the target has the more complex version of this operation, assume that
694  // it has this operation too.
695  return hasAndNotCompare(X);
696  }
697 
698  /// Return true if the target has a bit-test instruction:
699  /// (X & (1 << Y)) ==/!= 0
700  /// This knowledge can be used to prevent breaking the pattern,
701  /// or creating it if it could be recognized.
702  virtual bool hasBitTest(SDValue X, SDValue Y) const { return false; }
703 
704  /// There are two ways to clear extreme bits (either low or high):
705  /// Mask: x & (-1 << y) (the instcombine canonical form)
706  /// Shifts: x >> y << y
707  /// Return true if the variant with 2 variable shifts is preferred.
708  /// Return false if there is no preference.
710  // By default, let's assume that no one prefers shifts.
711  return false;
712  }
713 
714  /// Return true if it is profitable to fold a pair of shifts into a mask.
715  /// This is usually true on most targets. But some targets, like Thumb1,
716  /// have immediate shift instructions, but no immediate "and" instruction;
717  /// this makes the fold unprofitable.
719  CombineLevel Level) const {
720  return true;
721  }
722 
723  /// Should we tranform the IR-optimal check for whether given truncation
724  /// down into KeptBits would be truncating or not:
725  /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
726  /// Into it's more traditional form:
727  /// ((%x << C) a>> C) dstcond %x
728  /// Return true if we should transform.
729  /// Return false if there is no preference.
731  unsigned KeptBits) const {
732  // By default, let's assume that no one prefers shifts.
733  return false;
734  }
735 
736  /// Given the pattern
737  /// (X & (C l>>/<< Y)) ==/!= 0
738  /// return true if it should be transformed into:
739  /// ((X <</l>> Y) & C) ==/!= 0
740  /// WARNING: if 'X' is a constant, the fold may deadlock!
741  /// FIXME: we could avoid passing XC, but we can't use isConstOrConstSplat()
742  /// here because it can end up being not linked in.
745  unsigned OldShiftOpcode, unsigned NewShiftOpcode,
746  SelectionDAG &DAG) const {
747  if (hasBitTest(X, Y)) {
748  // One interesting pattern that we'd want to form is 'bit test':
749  // ((1 << Y) & C) ==/!= 0
750  // But we also need to be careful not to try to reverse that fold.
751 
752  // Is this '1 << Y' ?
753  if (OldShiftOpcode == ISD::SHL && CC->isOne())
754  return false; // Keep the 'bit test' pattern.
755 
756  // Will it be '1 << Y' after the transform ?
757  if (XC && NewShiftOpcode == ISD::SHL && XC->isOne())
758  return true; // Do form the 'bit test' pattern.
759  }
760 
761  // If 'X' is a constant, and we transform, then we will immediately
762  // try to undo the fold, thus causing endless combine loop.
763  // So by default, let's assume everyone prefers the fold
764  // iff 'X' is not a constant.
765  return !XC;
766  }
767 
768  /// These two forms are equivalent:
769  /// sub %y, (xor %x, -1)
770  /// add (add %x, 1), %y
771  /// The variant with two add's is IR-canonical.
772  /// Some targets may prefer one to the other.
773  virtual bool preferIncOfAddToSubOfNot(EVT VT) const {
774  // By default, let's assume that everyone prefers the form with two add's.
775  return true;
776  }
777 
778  /// Return true if the target wants to use the optimization that
779  /// turns ext(promotableInst1(...(promotableInstN(load)))) into
780  /// promotedInst1(...(promotedInstN(ext(load)))).
782 
783  /// Return true if the target can combine store(extractelement VectorTy,
784  /// Idx).
785  /// \p Cost[out] gives the cost of that transformation when this is true.
786  virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
787  unsigned &Cost) const {
788  return false;
789  }
790 
791  /// Return true if inserting a scalar into a variable element of an undef
792  /// vector is more efficiently handled by splatting the scalar instead.
793  virtual bool shouldSplatInsEltVarIndex(EVT) const {
794  return false;
795  }
796 
797  /// Return true if target always benefits from combining into FMA for a
798  /// given value type. This must typically return false on targets where FMA
799  /// takes more cycles to execute than FADD.
800  virtual bool enableAggressiveFMAFusion(EVT VT) const {
801  return false;
802  }
803 
804  /// Return the ValueType of the result of SETCC operations.
806  EVT VT) const;
807 
808  /// Return the ValueType for comparison libcalls. Comparions libcalls include
809  /// floating point comparion calls, and Ordered/Unordered check calls on
810  /// floating point numbers.
811  virtual
813 
814  /// For targets without i1 registers, this gives the nature of the high-bits
815  /// of boolean values held in types wider than i1.
816  ///
817  /// "Boolean values" are special true/false values produced by nodes like
818  /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
819  /// Not to be confused with general values promoted from i1. Some cpus
820  /// distinguish between vectors of boolean and scalars; the isVec parameter
821  /// selects between the two kinds. For example on X86 a scalar boolean should
822  /// be zero extended from i1, while the elements of a vector of booleans
823  /// should be sign extended from i1.
824  ///
825  /// Some cpus also treat floating point types the same way as they treat
826  /// vectors instead of the way they treat scalars.
827  BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
828  if (isVec)
829  return BooleanVectorContents;
830  return isFloat ? BooleanFloatContents : BooleanContents;
831  }
832 
834  return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
835  }
836 
837  /// Return target scheduling preference.
839  return SchedPreferenceInfo;
840  }
841 
842  /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
843  /// for different nodes. This function returns the preference (or none) for
844  /// the given node.
846  return Sched::None;
847  }
848 
849  /// Return the register class that should be used for the specified value
850  /// type.
851  virtual const TargetRegisterClass *getRegClassFor(MVT VT, bool isDivergent = false) const {
852  (void)isDivergent;
853  const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
854  assert(RC && "This value type is not natively supported!");
855  return RC;
856  }
857 
858  /// Allows target to decide about the register class of the
859  /// specific value that is live outside the defining block.
860  /// Returns true if the value needs uniform register class.
862  const Value *) const {
863  return false;
864  }
865 
866  /// Return the 'representative' register class for the specified value
867  /// type.
868  ///
869  /// The 'representative' register class is the largest legal super-reg
870  /// register class for the register class of the value type. For example, on
871  /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
872  /// register class is GR64 on x86_64.
873  virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
874  const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
875  return RC;
876  }
877 
878  /// Return the cost of the 'representative' register class for the specified
879  /// value type.
880  virtual uint8_t getRepRegClassCostFor(MVT VT) const {
881  return RepRegClassCostForVT[VT.SimpleTy];
882  }
883 
884  /// Return true if SHIFT instructions should be expanded to SHIFT_PARTS
885  /// instructions, and false if a library call is preferred (e.g for code-size
886  /// reasons).
887  virtual bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const {
888  return true;
889  }
890 
891  /// Return true if the target has native support for the specified value type.
892  /// This means that it has a register that directly holds it without
893  /// promotions or expansions.
894  bool isTypeLegal(EVT VT) const {
895  assert(!VT.isSimple() ||
896  (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
897  return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
898  }
899 
901  /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
902  /// that indicates how instruction selection should deal with the type.
903  LegalizeTypeAction ValueTypeActions[MVT::VALUETYPE_SIZE];
904 
905  public:
907  std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
908  TypeLegal);
909  }
910 
912  return ValueTypeActions[VT.SimpleTy];
913  }
914 
916  ValueTypeActions[VT.SimpleTy] = Action;
917  }
918  };
919 
921  return ValueTypeActions;
922  }
923 
924  /// Return how we should legalize values of this type, either it is already
925  /// legal (return 'Legal') or we need to promote it to a larger type (return
926  /// 'Promote'), or we need to expand it into multiple registers of smaller
927  /// integer type (return 'Expand'). 'Custom' is not an option.
929  return getTypeConversion(Context, VT).first;
930  }
932  return ValueTypeActions.getTypeAction(VT);
933  }
934 
935  /// For types supported by the target, this is an identity function. For
936  /// types that must be promoted to larger types, this returns the larger type
937  /// to promote to. For integer types that are larger than the largest integer
938  /// register, this contains one step in the expansion to get to the smaller
939  /// register. For illegal floating point types, this returns the integer type
940  /// to transform to.
942  return getTypeConversion(Context, VT).second;
943  }
944 
945  /// For types supported by the target, this is an identity function. For
946  /// types that must be expanded (i.e. integer types that are larger than the
947  /// largest integer register or illegal floating point types), this returns
948  /// the largest legal type it will be expanded to.
950  assert(!VT.isVector());
951  while (true) {
952  switch (getTypeAction(Context, VT)) {
953  case TypeLegal:
954  return VT;
955  case TypeExpandInteger:
956  VT = getTypeToTransformTo(Context, VT);
957  break;
958  default:
959  llvm_unreachable("Type is not legal nor is it to be expanded!");
960  }
961  }
962  }
963 
964  /// Vector types are broken down into some number of legal first class types.
965  /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
966  /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
967  /// turns into 4 EVT::i32 values with both PPC and X86.
968  ///
969  /// This method returns the number of registers needed, and the VT for each
970  /// register. It also returns the VT and quantity of the intermediate values
971  /// before they are promoted/expanded.
973  EVT &IntermediateVT,
974  unsigned &NumIntermediates,
975  MVT &RegisterVT) const;
976 
977  /// Certain targets such as MIPS require that some types such as vectors are
978  /// always broken down into scalars in some contexts. This occurs even if the
979  /// vector type is legal.
981  LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
982  unsigned &NumIntermediates, MVT &RegisterVT) const {
983  return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
984  RegisterVT);
985  }
986 
987  struct IntrinsicInfo {
988  unsigned opc = 0; // target opcode
989  EVT memVT; // memory VT
990 
991  // value representing memory location
993 
994  int offset = 0; // offset off of ptrVal
995  uint64_t size = 0; // the size of the memory location
996  // (taken from memVT if zero)
997  MaybeAlign align = Align(1); // alignment
998 
1000  IntrinsicInfo() = default;
1001  };
1002 
1003  /// Given an intrinsic, checks if on the target the intrinsic will need to map
1004  /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
1005  /// true and store the intrinsic information into the IntrinsicInfo that was
1006  /// passed to the function.
1007  virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
1008  MachineFunction &,
1009  unsigned /*Intrinsic*/) const {
1010  return false;
1011  }
1012 
1013  /// Returns true if the target can instruction select the specified FP
1014  /// immediate natively. If false, the legalizer will materialize the FP
1015  /// immediate as a load from a constant pool.
1016  virtual bool isFPImmLegal(const APFloat & /*Imm*/, EVT /*VT*/,
1017  bool ForCodeSize = false) const {
1018  return false;
1019  }
1020 
1021  /// Targets can use this to indicate that they only support *some*
1022  /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
1023  /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
1024  /// legal.
1025  virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const {
1026  return true;
1027  }
1028 
1029  /// Returns true if the operation can trap for the value type.
1030  ///
1031  /// VT must be a legal type. By default, we optimistically assume most
1032  /// operations don't trap except for integer divide and remainder.
1033  virtual bool canOpTrap(unsigned Op, EVT VT) const;
1034 
1035  /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
1036  /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
1037  /// constant pool entry.
1038  virtual bool isVectorClearMaskLegal(ArrayRef<int> /*Mask*/,
1039  EVT /*VT*/) const {
1040  return false;
1041  }
1042 
1043  /// Return how this operation should be treated: either it is legal, needs to
1044  /// be promoted to a larger size, needs to be expanded to some other code
1045  /// sequence, or the target has a custom expander for it.
1046  LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
1047  if (VT.isExtended()) return Expand;
1048  // If a target-specific SDNode requires legalization, require the target
1049  // to provide custom legalization for it.
1050  if (Op >= array_lengthof(OpActions[0])) return Custom;
1051  return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
1052  }
1053 
1054  /// Custom method defined by each target to indicate if an operation which
1055  /// may require a scale is supported natively by the target.
1056  /// If not, the operation is illegal.
1057  virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT,
1058  unsigned Scale) const {
1059  return false;
1060  }
1061 
1062  /// Some fixed point operations may be natively supported by the target but
1063  /// only for specific scales. This method allows for checking
1064  /// if the width is supported by the target for a given operation that may
1065  /// depend on scale.
1067  unsigned Scale) const {
1068  auto Action = getOperationAction(Op, VT);
1069  if (Action != Legal)
1070  return Action;
1071 
1072  // This operation is supported in this type but may only work on specific
1073  // scales.
1074  bool Supported;
1075  switch (Op) {
1076  default:
1077  llvm_unreachable("Unexpected fixed point operation.");
1078  case ISD::SMULFIX:
1079  case ISD::SMULFIXSAT:
1080  case ISD::UMULFIX:
1081  case ISD::UMULFIXSAT:
1082  case ISD::SDIVFIX:
1083  case ISD::SDIVFIXSAT:
1084  case ISD::UDIVFIX:
1085  case ISD::UDIVFIXSAT:
1086  Supported = isSupportedFixedPointOperation(Op, VT, Scale);
1087  break;
1088  }
1089 
1090  return Supported ? Action : Expand;
1091  }
1092 
1093  // If Op is a strict floating-point operation, return the result
1094  // of getOperationAction for the equivalent non-strict operation.
1096  unsigned EqOpc;
1097  switch (Op) {
1098  default: llvm_unreachable("Unexpected FP pseudo-opcode");
1099 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1100  case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break;
1101 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1102  case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break;
1103 #include "llvm/IR/ConstrainedOps.def"
1104  }
1105 
1106  return getOperationAction(EqOpc, VT);
1107  }
1108 
1109  /// Return true if the specified operation is legal on this target or can be
1110  /// made legal with custom lowering. This is used to help guide high-level
1111  /// lowering decisions. LegalOnly is an optional convenience for code paths
1112  /// traversed pre and post legalisation.
1113  bool isOperationLegalOrCustom(unsigned Op, EVT VT,
1114  bool LegalOnly = false) const {
1115  if (LegalOnly)
1116  return isOperationLegal(Op, VT);
1117 
1118  return (VT == MVT::Other || isTypeLegal(VT)) &&
1119  (getOperationAction(Op, VT) == Legal ||
1120  getOperationAction(Op, VT) == Custom);
1121  }
1122 
1123  /// Return true if the specified operation is legal on this target or can be
1124  /// made legal using promotion. This is used to help guide high-level lowering
1125  /// decisions. LegalOnly is an optional convenience for code paths traversed
1126  /// pre and post legalisation.
1127  bool isOperationLegalOrPromote(unsigned Op, EVT VT,
1128  bool LegalOnly = false) const {
1129  if (LegalOnly)
1130  return isOperationLegal(Op, VT);
1131 
1132  return (VT == MVT::Other || isTypeLegal(VT)) &&
1133  (getOperationAction(Op, VT) == Legal ||
1134  getOperationAction(Op, VT) == Promote);
1135  }
1136 
1137  /// Return true if the specified operation is legal on this target or can be
1138  /// made legal with custom lowering or using promotion. This is used to help
1139  /// guide high-level lowering decisions. LegalOnly is an optional convenience
1140  /// for code paths traversed pre and post legalisation.
1142  bool LegalOnly = false) const {
1143  if (LegalOnly)
1144  return isOperationLegal(Op, VT);
1145 
1146  return (VT == MVT::Other || isTypeLegal(VT)) &&
1147  (getOperationAction(Op, VT) == Legal ||
1148  getOperationAction(Op, VT) == Custom ||
1149  getOperationAction(Op, VT) == Promote);
1150  }
1151 
1152  /// Return true if the operation uses custom lowering, regardless of whether
1153  /// the type is legal or not.
1154  bool isOperationCustom(unsigned Op, EVT VT) const {
1155  return getOperationAction(Op, VT) == Custom;
1156  }
1157 
1158  /// Return true if lowering to a jump table is allowed.
1159  virtual bool areJTsAllowed(const Function *Fn) const {
1160  if (Fn->getFnAttribute("no-jump-tables").getValueAsBool())
1161  return false;
1162 
1165  }
1166 
1167  /// Check whether the range [Low,High] fits in a machine word.
1168  bool rangeFitsInWord(const APInt &Low, const APInt &High,
1169  const DataLayout &DL) const {
1170  // FIXME: Using the pointer type doesn't seem ideal.
1171  uint64_t BW = DL.getIndexSizeInBits(0u);
1172  uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
1173  return Range <= BW;
1174  }
1175 
1176  /// Return true if lowering to a jump table is suitable for a set of case
1177  /// clusters which may contain \p NumCases cases, \p Range range of values.
1178  virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
1179  uint64_t Range, ProfileSummaryInfo *PSI,
1180  BlockFrequencyInfo *BFI) const;
1181 
1182  /// Return true if lowering to a bit test is suitable for a set of case
1183  /// clusters which contains \p NumDests unique destinations, \p Low and
1184  /// \p High as its lowest and highest case values, and expects \p NumCmps
1185  /// case value comparisons. Check if the number of destinations, comparison
1186  /// metric, and range are all suitable.
1187  bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps,
1188  const APInt &Low, const APInt &High,
1189  const DataLayout &DL) const {
1190  // FIXME: I don't think NumCmps is the correct metric: a single case and a
1191  // range of cases both require only one branch to lower. Just looking at the
1192  // number of clusters and destinations should be enough to decide whether to
1193  // build bit tests.
1194 
1195  // To lower a range with bit tests, the range must fit the bitwidth of a
1196  // machine word.
1197  if (!rangeFitsInWord(Low, High, DL))
1198  return false;
1199 
1200  // Decide whether it's profitable to lower this range with bit tests. Each
1201  // destination requires a bit test and branch, and there is an overall range
1202  // check branch. For a small number of clusters, separate comparisons might
1203  // be cheaper, and for many destinations, splitting the range might be
1204  // better.
1205  return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
1206  (NumDests == 3 && NumCmps >= 6);
1207  }
1208 
1209  /// Return true if the specified operation is illegal on this target or
1210  /// unlikely to be made legal with custom lowering. This is used to help guide
1211  /// high-level lowering decisions.
1212  bool isOperationExpand(unsigned Op, EVT VT) const {
1213  return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
1214  }
1215 
1216  /// Return true if the specified operation is legal on this target.
1217  bool isOperationLegal(unsigned Op, EVT VT) const {
1218  return (VT == MVT::Other || isTypeLegal(VT)) &&
1219  getOperationAction(Op, VT) == Legal;
1220  }
1221 
1222  /// Return how this load with extension should be treated: either it is legal,
1223  /// needs to be promoted to a larger size, needs to be expanded to some other
1224  /// code sequence, or the target has a custom expander for it.
1225  LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
1226  EVT MemVT) const {
1227  if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1228  unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1229  unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1230  assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::VALUETYPE_SIZE &&
1231  MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!");
1232  unsigned Shift = 4 * ExtType;
1233  return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
1234  }
1235 
1236  /// Return true if the specified load with extension is legal on this target.
1237  bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1238  return getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
1239  }
1240 
1241  /// Return true if the specified load with extension is legal or custom
1242  /// on this target.
1243  bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1244  return getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
1245  getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
1246  }
1247 
1248  /// Return how this store with truncation should be treated: either it is
1249  /// legal, needs to be promoted to a larger size, needs to be expanded to some
1250  /// other code sequence, or the target has a custom expander for it.
1252  if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1253  unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1254  unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1255  assert(ValI < MVT::VALUETYPE_SIZE && MemI < MVT::VALUETYPE_SIZE &&
1256  "Table isn't big enough!");
1257  return TruncStoreActions[ValI][MemI];
1258  }
1259 
1260  /// Return true if the specified store with truncation is legal on this
1261  /// target.
1262  bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
1263  return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal;
1264  }
1265 
1266  /// Return true if the specified store with truncation has solution on this
1267  /// target.
1268  bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const {
1269  return isTypeLegal(ValVT) &&
1270  (getTruncStoreAction(ValVT, MemVT) == Legal ||
1271  getTruncStoreAction(ValVT, MemVT) == Custom);
1272  }
1273 
1274  virtual bool canCombineTruncStore(EVT ValVT, EVT MemVT,
1275  bool LegalOnly) const {
1276  if (LegalOnly)
1277  return isTruncStoreLegal(ValVT, MemVT);
1278 
1279  return isTruncStoreLegalOrCustom(ValVT, MemVT);
1280  }
1281 
1282  /// Return how the indexed load should be treated: either it is legal, needs
1283  /// to be promoted to a larger size, needs to be expanded to some other code
1284  /// sequence, or the target has a custom expander for it.
1285  LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
1286  return getIndexedModeAction(IdxMode, VT, IMAB_Load);
1287  }
1288 
1289  /// Return true if the specified indexed load is legal on this target.
1290  bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
1291  return VT.isSimple() &&
1292  (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1293  getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1294  }
1295 
1296  /// Return how the indexed store should be treated: either it is legal, needs
1297  /// to be promoted to a larger size, needs to be expanded to some other code
1298  /// sequence, or the target has a custom expander for it.
1299  LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
1300  return getIndexedModeAction(IdxMode, VT, IMAB_Store);
1301  }
1302 
1303  /// Return true if the specified indexed load is legal on this target.
1304  bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
1305  return VT.isSimple() &&
1306  (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1307  getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1308  }
1309 
1310  /// Return how the indexed load should be treated: either it is legal, needs
1311  /// to be promoted to a larger size, needs to be expanded to some other code
1312  /// sequence, or the target has a custom expander for it.
1313  LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const {
1314  return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad);
1315  }
1316 
1317  /// Return true if the specified indexed load is legal on this target.
1318  bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const {
1319  return VT.isSimple() &&
1320  (getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1321  getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1322  }
1323 
1324  /// Return how the indexed store should be treated: either it is legal, needs
1325  /// to be promoted to a larger size, needs to be expanded to some other code
1326  /// sequence, or the target has a custom expander for it.
1327  LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const {
1328  return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore);
1329  }
1330 
1331  /// Return true if the specified indexed load is legal on this target.
1332  bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const {
1333  return VT.isSimple() &&
1334  (getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1335  getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1336  }
1337 
1338  /// Returns true if the index type for a masked gather/scatter requires
1339  /// extending
1340  virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const { return false; }
1341 
1342  // Returns true if VT is a legal index type for masked gathers/scatters
1343  // on this target
1344  virtual bool shouldRemoveExtendFromGSIndex(EVT VT) const { return false; }
1345 
1346  /// Return how the condition code should be treated: either it is legal, needs
1347  /// to be expanded to some other code sequence, or the target has a custom
1348  /// expander for it.
1351  assert((unsigned)CC < array_lengthof(CondCodeActions) &&
1352  ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) &&
1353  "Table isn't big enough!");
1354  // See setCondCodeAction for how this is encoded.
1355  uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
1356  uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
1357  LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
1358  assert(Action != Promote && "Can't promote condition code!");
1359  return Action;
1360  }
1361 
1362  /// Return true if the specified condition code is legal on this target.
1363  bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
1364  return getCondCodeAction(CC, VT) == Legal;
1365  }
1366 
1367  /// Return true if the specified condition code is legal or custom on this
1368  /// target.
1370  return getCondCodeAction(CC, VT) == Legal ||
1371  getCondCodeAction(CC, VT) == Custom;
1372  }
1373 
1374  /// If the action for this operation is to promote, this method returns the
1375  /// ValueType to promote to.
1376  MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
1377  assert(getOperationAction(Op, VT) == Promote &&
1378  "This operation isn't promoted!");
1379 
1380  // See if this has an explicit type specified.
1381  std::map<std::pair<unsigned, MVT::SimpleValueType>,
1383  PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
1384  if (PTTI != PromoteToType.end()) return PTTI->second;
1385 
1386  assert((VT.isInteger() || VT.isFloatingPoint()) &&
1387  "Cannot autopromote this type, add it with AddPromotedToType.");
1388 
1389  MVT NVT = VT;
1390  do {
1391  NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
1392  assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
1393  "Didn't find type to promote to!");
1394  } while (!isTypeLegal(NVT) ||
1395  getOperationAction(Op, NVT) == Promote);
1396  return NVT;
1397  }
1398 
1400  bool AllowUnknown = false) const {
1401  return getValueType(DL, Ty, AllowUnknown);
1402  }
1403 
1404  /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
1405  /// operations except for the pointer size. If AllowUnknown is true, this
1406  /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
1407  /// otherwise it will assert.
1409  bool AllowUnknown = false) const {
1410  // Lower scalar pointers to native pointer types.
1411  if (auto *PTy = dyn_cast<PointerType>(Ty))
1412  return getPointerTy(DL, PTy->getAddressSpace());
1413 
1414  if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1415  Type *EltTy = VTy->getElementType();
1416  // Lower vectors of pointers to native pointer types.
1417  if (auto *PTy = dyn_cast<PointerType>(EltTy)) {
1418  EVT PointerTy(getPointerTy(DL, PTy->getAddressSpace()));
1419  EltTy = PointerTy.getTypeForEVT(Ty->getContext());
1420  }
1421  return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false),
1422  VTy->getElementCount());
1423  }
1424 
1425  return EVT::getEVT(Ty, AllowUnknown);
1426  }
1427 
1429  bool AllowUnknown = false) const {
1430  // Lower scalar pointers to native pointer types.
1431  if (PointerType *PTy = dyn_cast<PointerType>(Ty))
1432  return getPointerMemTy(DL, PTy->getAddressSpace());
1433  else if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1434  Type *Elm = VTy->getElementType();
1435  if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
1436  EVT PointerTy(getPointerMemTy(DL, PT->getAddressSpace()));
1437  Elm = PointerTy.getTypeForEVT(Ty->getContext());
1438  }
1439  return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
1440  VTy->getElementCount());
1441  }
1442 
1443  return getValueType(DL, Ty, AllowUnknown);
1444  }
1445 
1446 
1447  /// Return the MVT corresponding to this LLVM type. See getValueType.
1449  bool AllowUnknown = false) const {
1450  return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
1451  }
1452 
1453  /// Return the desired alignment for ByVal or InAlloca aggregate function
1454  /// arguments in the caller parameter area. This is the actual alignment, not
1455  /// its logarithm.
1456  virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
1457 
1458  /// Return the type of registers that this ValueType will eventually require.
1460  assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
1461  return RegisterTypeForVT[VT.SimpleTy];
1462  }
1463 
1464  /// Return the type of registers that this ValueType will eventually require.
1466  if (VT.isSimple()) {
1467  assert((unsigned)VT.getSimpleVT().SimpleTy <
1468  array_lengthof(RegisterTypeForVT));
1469  return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
1470  }
1471  if (VT.isVector()) {
1472  EVT VT1;
1473  MVT RegisterVT;
1474  unsigned NumIntermediates;
1475  (void)getVectorTypeBreakdown(Context, VT, VT1,
1476  NumIntermediates, RegisterVT);
1477  return RegisterVT;
1478  }
1479  if (VT.isInteger()) {
1481  }
1482  llvm_unreachable("Unsupported extended type!");
1483  }
1484 
1485  /// Return the number of registers that this ValueType will eventually
1486  /// require.
1487  ///
1488  /// This is one for any types promoted to live in larger registers, but may be
1489  /// more than one for types (like i64) that are split into pieces. For types
1490  /// like i140, which are first promoted then expanded, it is the number of
1491  /// registers needed to hold all the bits of the original type. For an i140
1492  /// on a 32 bit machine this means 5 registers.
1493  ///
1494  /// RegisterVT may be passed as a way to override the default settings, for
1495  /// instance with i128 inline assembly operands on SystemZ.
1496  virtual unsigned
1498  Optional<MVT> RegisterVT = None) const {
1499  if (VT.isSimple()) {
1500  assert((unsigned)VT.getSimpleVT().SimpleTy <
1501  array_lengthof(NumRegistersForVT));
1502  return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
1503  }
1504  if (VT.isVector()) {
1505  EVT VT1;
1506  MVT VT2;
1507  unsigned NumIntermediates;
1508  return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
1509  }
1510  if (VT.isInteger()) {
1511  unsigned BitWidth = VT.getSizeInBits();
1512  unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
1513  return (BitWidth + RegWidth - 1) / RegWidth;
1514  }
1515  llvm_unreachable("Unsupported extended type!");
1516  }
1517 
1518  /// Certain combinations of ABIs, Targets and features require that types
1519  /// are legal for some operations and not for other operations.
1520  /// For MIPS all vector types must be passed through the integer register set.
1522  CallingConv::ID CC, EVT VT) const {
1523  return getRegisterType(Context, VT);
1524  }
1525 
1526  /// Certain targets require unusual breakdowns of certain types. For MIPS,
1527  /// this occurs when a vector type is used, as vector are passed through the
1528  /// integer register set.
1530  CallingConv::ID CC,
1531  EVT VT) const {
1532  return getNumRegisters(Context, VT);
1533  }
1534 
1535  /// Certain targets have context sensitive alignment requirements, where one
1536  /// type has the alignment requirement of another type.
1538  const DataLayout &DL) const {
1539  return DL.getABITypeAlign(ArgTy);
1540  }
1541 
1542  /// If true, then instruction selection should seek to shrink the FP constant
1543  /// of the specified type to a smaller type in order to save space and / or
1544  /// reduce runtime.
1545  virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
1546 
1547  /// Return true if it is profitable to reduce a load to a smaller type.
1548  /// Example: (i16 (trunc (i32 (load x))) -> i16 load x
1550  EVT NewVT) const {
1551  // By default, assume that it is cheaper to extract a subvector from a wide
1552  // vector load rather than creating multiple narrow vector loads.
1553  if (NewVT.isVector() && !Load->hasOneUse())
1554  return false;
1555 
1556  return true;
1557  }
1558 
1559  /// When splitting a value of the specified type into parts, does the Lo
1560  /// or Hi part come first? This usually follows the endianness, except
1561  /// for ppcf128, where the Hi part always comes first.
1562  bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const {
1563  return DL.isBigEndian() || VT == MVT::ppcf128;
1564  }
1565 
1566  /// If true, the target has custom DAG combine transformations that it can
1567  /// perform for the specified node.
1569  assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
1570  return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
1571  }
1572 
1573  unsigned getGatherAllAliasesMaxDepth() const {
1574  return GatherAllAliasesMaxDepth;
1575  }
1576 
1577  /// Returns the size of the platform's va_list object.
1578  virtual unsigned getVaListSizeInBits(const DataLayout &DL) const {
1579  return getPointerTy(DL).getSizeInBits();
1580  }
1581 
1582  /// Get maximum # of store operations permitted for llvm.memset
1583  ///
1584  /// This function returns the maximum number of store operations permitted
1585  /// to replace a call to llvm.memset. The value is set by the target at the
1586  /// performance threshold for such a replacement. If OptSize is true,
1587  /// return the limit for functions that have OptSize attribute.
1588  unsigned getMaxStoresPerMemset(bool OptSize) const {
1589  return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
1590  }
1591 
1592  /// Get maximum # of store operations permitted for llvm.memcpy
1593  ///
1594  /// This function returns the maximum number of store operations permitted
1595  /// to replace a call to llvm.memcpy. The value is set by the target at the
1596  /// performance threshold for such a replacement. If OptSize is true,
1597  /// return the limit for functions that have OptSize attribute.
1598  unsigned getMaxStoresPerMemcpy(bool OptSize) const {
1599  return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
1600  }
1601 
1602  /// \brief Get maximum # of store operations to be glued together
1603  ///
1604  /// This function returns the maximum number of store operations permitted
1605  /// to glue together during lowering of llvm.memcpy. The value is set by
1606  // the target at the performance threshold for such a replacement.
1607  virtual unsigned getMaxGluedStoresPerMemcpy() const {
1608  return MaxGluedStoresPerMemcpy;
1609  }
1610 
1611  /// Get maximum # of load operations permitted for memcmp
1612  ///
1613  /// This function returns the maximum number of load operations permitted
1614  /// to replace a call to memcmp. The value is set by the target at the
1615  /// performance threshold for such a replacement. If OptSize is true,
1616  /// return the limit for functions that have OptSize attribute.
1617  unsigned getMaxExpandSizeMemcmp(bool OptSize) const {
1618  return OptSize ? MaxLoadsPerMemcmpOptSize : MaxLoadsPerMemcmp;
1619  }
1620 
1621  /// Get maximum # of store operations permitted for llvm.memmove
1622  ///
1623  /// This function returns the maximum number of store operations permitted
1624  /// to replace a call to llvm.memmove. The value is set by the target at the
1625  /// performance threshold for such a replacement. If OptSize is true,
1626  /// return the limit for functions that have OptSize attribute.
1627  unsigned getMaxStoresPerMemmove(bool OptSize) const {
1629  }
1630 
1631  /// Determine if the target supports unaligned memory accesses.
1632  ///
1633  /// This function returns true if the target allows unaligned memory accesses
1634  /// of the specified type in the given address space. If true, it also returns
1635  /// whether the unaligned memory access is "fast" in the last argument by
1636  /// reference. This is used, for example, in situations where an array
1637  /// copy/move/set is converted to a sequence of store operations. Its use
1638  /// helps to ensure that such replacements don't generate code that causes an
1639  /// alignment error (trap) on the target machine.
1641  EVT, unsigned AddrSpace = 0, Align Alignment = Align(1),
1643  bool * /*Fast*/ = nullptr) const {
1644  return false;
1645  }
1646 
1647  /// LLT handling variant.
1649  LLT, unsigned AddrSpace = 0, Align Alignment = Align(1),
1651  bool * /*Fast*/ = nullptr) const {
1652  return false;
1653  }
1654 
1655  /// This function returns true if the memory access is aligned or if the
1656  /// target allows this specific unaligned memory access. If the access is
1657  /// allowed, the optional final parameter returns if the access is also fast
1658  /// (as defined by the target).
1660  LLVMContext &Context, const DataLayout &DL, EVT VT,
1661  unsigned AddrSpace = 0, Align Alignment = Align(1),
1663  bool *Fast = nullptr) const;
1664 
1665  /// Return true if the memory access of this type is aligned or if the target
1666  /// allows this specific unaligned access for the given MachineMemOperand.
1667  /// If the access is allowed, the optional final parameter returns if the
1668  /// access is also fast (as defined by the target).
1670  const DataLayout &DL, EVT VT,
1671  const MachineMemOperand &MMO,
1672  bool *Fast = nullptr) const;
1673 
1674  /// Return true if the target supports a memory access of this type for the
1675  /// given address space and alignment. If the access is allowed, the optional
1676  /// final parameter returns if the access is also fast (as defined by the
1677  /// target).
1678  virtual bool
1680  unsigned AddrSpace = 0, Align Alignment = Align(1),
1682  bool *Fast = nullptr) const;
1683 
1684  /// Return true if the target supports a memory access of this type for the
1685  /// given MachineMemOperand. If the access is allowed, the optional
1686  /// final parameter returns if the access is also fast (as defined by the
1687  /// target).
1689  const MachineMemOperand &MMO,
1690  bool *Fast = nullptr) const;
1691 
1692  /// LLT handling variant.
1694  const MachineMemOperand &MMO,
1695  bool *Fast = nullptr) const;
1696 
1697  /// Returns the target specific optimal type for load and store operations as
1698  /// a result of memset, memcpy, and memmove lowering.
1699  /// It returns EVT::Other if the type should be determined using generic
1700  /// target-independent logic.
1701  virtual EVT
1703  const AttributeList & /*FuncAttributes*/) const {
1704  return MVT::Other;
1705  }
1706 
1707  /// LLT returning variant.
1708  virtual LLT
1710  const AttributeList & /*FuncAttributes*/) const {
1711  return LLT();
1712  }
1713 
1714  /// Returns true if it's safe to use load / store of the specified type to
1715  /// expand memcpy / memset inline.
1716  ///
1717  /// This is mostly true for all types except for some special cases. For
1718  /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
1719  /// fstpl which also does type conversion. Note the specified type doesn't
1720  /// have to be legal as the hook is used before type legalization.
1721  virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
1722 
1723  /// Return lower limit for number of blocks in a jump table.
1724  virtual unsigned getMinimumJumpTableEntries() const;
1725 
1726  /// Return lower limit of the density in a jump table.
1727  unsigned getMinimumJumpTableDensity(bool OptForSize) const;
1728 
1729  /// Return upper limit for number of entries in a jump table.
1730  /// Zero if no limit.
1731  unsigned getMaximumJumpTableSize() const;
1732 
1733  virtual bool isJumpTableRelative() const;
1734 
1735  /// If a physical register, this specifies the register that
1736  /// llvm.savestack/llvm.restorestack should save and restore.
1738  return StackPointerRegisterToSaveRestore;
1739  }
1740 
1741  /// If a physical register, this returns the register that receives the
1742  /// exception address on entry to an EH pad.
1743  virtual Register
1744  getExceptionPointerRegister(const Constant *PersonalityFn) const {
1745  return Register();
1746  }
1747 
1748  /// If a physical register, this returns the register that receives the
1749  /// exception typeid on entry to a landing pad.
1750  virtual Register
1751  getExceptionSelectorRegister(const Constant *PersonalityFn) const {
1752  return Register();
1753  }
1754 
1755  virtual bool needsFixedCatchObjects() const {
1756  report_fatal_error("Funclet EH is not implemented for this target");
1757  }
1758 
1759  /// Return the minimum stack alignment of an argument.
1761  return MinStackArgumentAlignment;
1762  }
1763 
1764  /// Return the minimum function alignment.
1765  Align getMinFunctionAlignment() const { return MinFunctionAlignment; }
1766 
1767  /// Return the preferred function alignment.
1768  Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
1769 
1770  /// Return the preferred loop alignment.
1771  virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
1772  return PrefLoopAlignment;
1773  }
1774 
1775  /// Should loops be aligned even when the function is marked OptSize (but not
1776  /// MinSize).
1777  virtual bool alignLoopsWithOptSize() const {
1778  return false;
1779  }
1780 
1781  /// If the target has a standard location for the stack protector guard,
1782  /// returns the address of that location. Otherwise, returns nullptr.
1783  /// DEPRECATED: please override useLoadStackGuardNode and customize
1784  /// LOAD_STACK_GUARD, or customize \@llvm.stackguard().
1785  virtual Value *getIRStackGuard(IRBuilderBase &IRB) const;
1786 
1787  /// Inserts necessary declarations for SSP (stack protection) purpose.
1788  /// Should be used only when getIRStackGuard returns nullptr.
1789  virtual void insertSSPDeclarations(Module &M) const;
1790 
1791  /// Return the variable that's previously inserted by insertSSPDeclarations,
1792  /// if any, otherwise return nullptr. Should be used only when
1793  /// getIRStackGuard returns nullptr.
1794  virtual Value *getSDagStackGuard(const Module &M) const;
1795 
1796  /// If this function returns true, stack protection checks should XOR the
1797  /// frame pointer (or whichever pointer is used to address locals) into the
1798  /// stack guard value before checking it. getIRStackGuard must return nullptr
1799  /// if this returns true.
1800  virtual bool useStackGuardXorFP() const { return false; }
1801 
1802  /// If the target has a standard stack protection check function that
1803  /// performs validation and error handling, returns the function. Otherwise,
1804  /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
1805  /// Should be used only when getIRStackGuard returns nullptr.
1806  virtual Function *getSSPStackGuardCheck(const Module &M) const;
1807 
1808  /// \returns true if a constant G_UBFX is legal on the target.
1809  virtual bool isConstantUnsignedBitfieldExtactLegal(unsigned Opc, LLT Ty1,
1810  LLT Ty2) const {
1811  return false;
1812  }
1813 
1814 protected:
1816  bool UseTLS) const;
1817 
1818 public:
1819  /// Returns the target-specific address of the unsafe stack pointer.
1820  virtual Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const;
1821 
1822  /// Returns the name of the symbol used to emit stack probes or the empty
1823  /// string if not applicable.
1824  virtual bool hasStackProbeSymbol(MachineFunction &MF) const { return false; }
1825 
1826  virtual bool hasInlineStackProbe(MachineFunction &MF) const { return false; }
1827 
1829  return "";
1830  }
1831 
1832  /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
1833  /// are happy to sink it into basic blocks. A cast may be free, but not
1834  /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer.
1835  virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const;
1836 
1837  /// Return true if the pointer arguments to CI should be aligned by aligning
1838  /// the object whose address is being passed. If so then MinSize is set to the
1839  /// minimum size the object must be to be aligned and PrefAlign is set to the
1840  /// preferred alignment.
1841  virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
1842  unsigned & /*PrefAlign*/) const {
1843  return false;
1844  }
1845 
1846  //===--------------------------------------------------------------------===//
1847  /// \name Helpers for TargetTransformInfo implementations
1848  /// @{
1849 
1850  /// Get the ISD node that corresponds to the Instruction class opcode.
1851  int InstructionOpcodeToISD(unsigned Opcode) const;
1852 
1853  /// Estimate the cost of type-legalization and the legalized type.
1854  std::pair<InstructionCost, MVT> getTypeLegalizationCost(const DataLayout &DL,
1855  Type *Ty) const;
1856 
1857  /// @}
1858 
1859  //===--------------------------------------------------------------------===//
1860  /// \name Helpers for atomic expansion.
1861  /// @{
1862 
1863  /// Returns the maximum atomic operation size (in bits) supported by
1864  /// the backend. Atomic operations greater than this size (as well
1865  /// as ones that are not naturally aligned), will be expanded by
1866  /// AtomicExpandPass into an __atomic_* library call.
1868  return MaxAtomicSizeInBitsSupported;
1869  }
1870 
1871  /// Returns the size of the smallest cmpxchg or ll/sc instruction
1872  /// the backend supports. Any smaller operations are widened in
1873  /// AtomicExpandPass.
1874  ///
1875  /// Note that *unlike* operations above the maximum size, atomic ops
1876  /// are still natively supported below the minimum; they just
1877  /// require a more complex expansion.
1878  unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
1879 
1880  /// Whether the target supports unaligned atomic operations.
1881  bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; }
1882 
1883  /// Whether AtomicExpandPass should automatically insert fences and reduce
1884  /// ordering for this atomic. This should be true for most architectures with
1885  /// weak memory ordering. Defaults to false.
1886  virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
1887  return false;
1888  }
1889 
1890  /// Perform a load-linked operation on Addr, returning a "Value *" with the
1891  /// corresponding pointee type. This may entail some non-trivial operations to
1892  /// truncate or reconstruct types that will be illegal in the backend. See
1893  /// ARMISelLowering for an example implementation.
1895  Value *Addr, AtomicOrdering Ord) const {
1896  llvm_unreachable("Load linked unimplemented on this target");
1897  }
1898 
1899  /// Perform a store-conditional operation to Addr. Return the status of the
1900  /// store. This should be 0 if the store succeeded, non-zero otherwise.
1902  Value *Addr, AtomicOrdering Ord) const {
1903  llvm_unreachable("Store conditional unimplemented on this target");
1904  }
1905 
1906  /// Perform a masked atomicrmw using a target-specific intrinsic. This
1907  /// represents the core LL/SC loop which will be lowered at a late stage by
1908  /// the backend.
1910  AtomicRMWInst *AI,
1911  Value *AlignedAddr, Value *Incr,
1912  Value *Mask, Value *ShiftAmt,
1913  AtomicOrdering Ord) const {
1914  llvm_unreachable("Masked atomicrmw expansion unimplemented on this target");
1915  }
1916 
1917  /// Perform a masked cmpxchg using a target-specific intrinsic. This
1918  /// represents the core LL/SC loop which will be lowered at a late stage by
1919  /// the backend.
1921  IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
1922  Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
1923  llvm_unreachable("Masked cmpxchg expansion unimplemented on this target");
1924  }
1925 
1926  /// Inserts in the IR a target-specific intrinsic specifying a fence.
1927  /// It is called by AtomicExpandPass before expanding an
1928  /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
1929  /// if shouldInsertFencesForAtomic returns true.
1930  ///
1931  /// Inst is the original atomic instruction, prior to other expansions that
1932  /// may be performed.
1933  ///
1934  /// This function should either return a nullptr, or a pointer to an IR-level
1935  /// Instruction*. Even complex fence sequences can be represented by a
1936  /// single Instruction* through an intrinsic to be lowered later.
1937  /// Backends should override this method to produce target-specific intrinsic
1938  /// for their fences.
1939  /// FIXME: Please note that the default implementation here in terms of
1940  /// IR-level fences exists for historical/compatibility reasons and is
1941  /// *unsound* ! Fences cannot, in general, be used to restore sequential
1942  /// consistency. For example, consider the following example:
1943  /// atomic<int> x = y = 0;
1944  /// int r1, r2, r3, r4;
1945  /// Thread 0:
1946  /// x.store(1);
1947  /// Thread 1:
1948  /// y.store(1);
1949  /// Thread 2:
1950  /// r1 = x.load();
1951  /// r2 = y.load();
1952  /// Thread 3:
1953  /// r3 = y.load();
1954  /// r4 = x.load();
1955  /// r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all
1956  /// seq_cst. But if they are lowered to monotonic accesses, no amount of
1957  /// IR-level fences can prevent it.
1958  /// @{
1960  Instruction *Inst,
1961  AtomicOrdering Ord) const;
1962 
1964  Instruction *Inst,
1965  AtomicOrdering Ord) const;
1966  /// @}
1967 
1968  // Emits code that executes when the comparison result in the ll/sc
1969  // expansion of a cmpxchg instruction is such that the store-conditional will
1970  // not execute. This makes it possible to balance out the load-linked with
1971  // a dedicated instruction, if desired.
1972  // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
1973  // be unnecessarily held, except if clrex, inserted by this hook, is executed.
1975 
1976  /// Returns true if the given (atomic) store should be expanded by the
1977  /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input.
1979  return false;
1980  }
1981 
1982  /// Returns true if arguments should be sign-extended in lib calls.
1983  virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
1984  return IsSigned;
1985  }
1986 
1987  /// Returns true if arguments should be extended in lib calls.
1988  virtual bool shouldExtendTypeInLibCall(EVT Type) const {
1989  return true;
1990  }
1991 
1992  /// Returns how the given (atomic) load should be expanded by the
1993  /// IR-level AtomicExpand pass.
1996  }
1997 
1998  /// Returns how the given atomic cmpxchg should be expanded by the IR-level
1999  /// AtomicExpand pass.
2000  virtual AtomicExpansionKind
2003  }
2004 
2005  /// Returns how the IR-level AtomicExpand pass should expand the given
2006  /// AtomicRMW, if at all. Default is to never expand.
2008  return RMW->isFloatingPointOperation() ?
2010  }
2011 
2012  /// On some platforms, an AtomicRMW that never actually modifies the value
2013  /// (such as fetch_add of 0) can be turned into a fence followed by an
2014  /// atomic load. This may sound useless, but it makes it possible for the
2015  /// processor to keep the cacheline shared, dramatically improving
2016  /// performance. And such idempotent RMWs are useful for implementing some
2017  /// kinds of locks, see for example (justification + benchmarks):
2018  /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
2019  /// This method tries doing that transformation, returning the atomic load if
2020  /// it succeeds, and nullptr otherwise.
2021  /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
2022  /// another round of expansion.
2023  virtual LoadInst *
2025  return nullptr;
2026  }
2027 
2028  /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
2029  /// SIGN_EXTEND, or ANY_EXTEND).
2031  return ISD::ZERO_EXTEND;
2032  }
2033 
2034  /// Returns how the platform's atomic compare and swap expects its comparison
2035  /// value to be extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND). This is
2036  /// separate from getExtendForAtomicOps, which is concerned with the
2037  /// sign-extension of the instruction's output, whereas here we are concerned
2038  /// with the sign-extension of the input. For targets with compare-and-swap
2039  /// instructions (or sub-word comparisons in their LL/SC loop expansions),
2040  /// the input can be ANY_EXTEND, but the output will still have a specific
2041  /// extension.
2043  return ISD::ANY_EXTEND;
2044  }
2045 
2046  /// @}
2047 
2048  /// Returns true if we should normalize
2049  /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
2050  /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
2051  /// that it saves us from materializing N0 and N1 in an integer register.
2052  /// Targets that are able to perform and/or on flags should return false here.
2054  EVT VT) const {
2055  // If a target has multiple condition registers, then it likely has logical
2056  // operations on those registers.
2058  return false;
2059  // Only do the transform if the value won't be split into multiple
2060  // registers.
2062  return Action != TypeExpandInteger && Action != TypeExpandFloat &&
2063  Action != TypeSplitVector;
2064  }
2065 
2066  virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const { return true; }
2067 
2068  /// Return true if a select of constants (select Cond, C1, C2) should be
2069  /// transformed into simple math ops with the condition value. For example:
2070  /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
2071  virtual bool convertSelectOfConstantsToMath(EVT VT) const {
2072  return false;
2073  }
2074 
2075  /// Return true if it is profitable to transform an integer
2076  /// multiplication-by-constant into simpler operations like shifts and adds.
2077  /// This may be true if the target does not directly support the
2078  /// multiplication operation for the specified type or the sequence of simpler
2079  /// ops is faster than the multiply.
2081  EVT VT, SDValue C) const {
2082  return false;
2083  }
2084 
2085  /// Return true if it is more correct/profitable to use strict FP_TO_INT
2086  /// conversion operations - canonicalizing the FP source value instead of
2087  /// converting all cases and then selecting based on value.
2088  /// This may be true if the target throws exceptions for out of bounds
2089  /// conversions or has fast FP CMOV.
2090  virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
2091  bool IsSigned) const {
2092  return false;
2093  }
2094 
2095  //===--------------------------------------------------------------------===//
2096  // TargetLowering Configuration Methods - These methods should be invoked by
2097  // the derived class constructor to configure this object for the target.
2098  //
2099 protected:
2100  /// Specify how the target extends the result of integer and floating point
2101  /// boolean values from i1 to a wider type. See getBooleanContents.
2103  BooleanContents = Ty;
2104  BooleanFloatContents = Ty;
2105  }
2106 
2107  /// Specify how the target extends the result of integer and floating point
2108  /// boolean values from i1 to a wider type. See getBooleanContents.
2110  BooleanContents = IntTy;
2111  BooleanFloatContents = FloatTy;
2112  }
2113 
2114  /// Specify how the target extends the result of a vector boolean value from a
2115  /// vector of i1 to a wider type. See getBooleanContents.
2117  BooleanVectorContents = Ty;
2118  }
2119 
2120  /// Specify the target scheduling preference.
2122  SchedPreferenceInfo = Pref;
2123  }
2124 
2125  /// Indicate the minimum number of blocks to generate jump tables.
2126  void setMinimumJumpTableEntries(unsigned Val);
2127 
2128  /// Indicate the maximum number of entries in jump tables.
2129  /// Set to zero to generate unlimited jump tables.
2130  void setMaximumJumpTableSize(unsigned);
2131 
2132  /// If set to a physical register, this specifies the register that
2133  /// llvm.savestack/llvm.restorestack should save and restore.
2135  StackPointerRegisterToSaveRestore = R;
2136  }
2137 
2138  /// Tells the code generator that the target has multiple (allocatable)
2139  /// condition registers that can be used to store the results of comparisons
2140  /// for use by selects and conditional branches. With multiple condition
2141  /// registers, the code generator will not aggressively sink comparisons into
2142  /// the blocks of their users.
2143  void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
2144  HasMultipleConditionRegisters = hasManyRegs;
2145  }
2146 
2147  /// Tells the code generator that the target has BitExtract instructions.
2148  /// The code generator will aggressively sink "shift"s into the blocks of
2149  /// their users if the users will generate "and" instructions which can be
2150  /// combined with "shift" to BitExtract instructions.
2151  void setHasExtractBitsInsn(bool hasExtractInsn = true) {
2152  HasExtractBitsInsn = hasExtractInsn;
2153  }
2154 
2155  /// Tells the code generator not to expand logic operations on comparison
2156  /// predicates into separate sequences that increase the amount of flow
2157  /// control.
2158  void setJumpIsExpensive(bool isExpensive = true);
2159 
2160  /// Tells the code generator which bitwidths to bypass.
2161  void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
2162  BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
2163  }
2164 
2165  /// Add the specified register class as an available regclass for the
2166  /// specified value type. This indicates the selector can handle values of
2167  /// that class natively.
2169  assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
2170  RegClassForVT[VT.SimpleTy] = RC;
2171  }
2172 
2173  /// Return the largest legal super-reg register class of the register class
2174  /// for the specified type and its associated "cost".
2175  virtual std::pair<const TargetRegisterClass *, uint8_t>
2177 
2178  /// Once all of the register classes are added, this allows us to compute
2179  /// derived properties we expose.
2181 
2182  /// Indicate that the specified operation does not work with the specified
2183  /// type and indicate what to do about it. Note that VT may refer to either
2184  /// the type of a result or that of an operand of Op.
2185  void setOperationAction(unsigned Op, MVT VT,
2186  LegalizeAction Action) {
2187  assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
2188  OpActions[(unsigned)VT.SimpleTy][Op] = Action;
2189  }
2190 
2191  /// Indicate that the specified load with extension does not work with the
2192  /// specified type and indicate what to do about it.
2193  void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
2194  LegalizeAction Action) {
2195  assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
2196  MemVT.isValid() && "Table isn't big enough!");
2197  assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2198  unsigned Shift = 4 * ExtType;
2199  LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
2200  LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
2201  }
2202 
2203  /// Indicate that the specified truncating store does not work with the
2204  /// specified type and indicate what to do about it.
2205  void setTruncStoreAction(MVT ValVT, MVT MemVT,
2206  LegalizeAction Action) {
2207  assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
2208  TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
2209  }
2210 
2211  /// Indicate that the specified indexed load does or does not work with the
2212  /// specified type and indicate what to do abort it.
2213  ///
2214  /// NOTE: All indexed mode loads are initialized to Expand in
2215  /// TargetLowering.cpp
2216  void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action) {
2217  setIndexedModeAction(IdxMode, VT, IMAB_Load, Action);
2218  }
2219 
2220  /// Indicate that the specified indexed store does or does not work with the
2221  /// specified type and indicate what to do about it.
2222  ///
2223  /// NOTE: All indexed mode stores are initialized to Expand in
2224  /// TargetLowering.cpp
2225  void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action) {
2226  setIndexedModeAction(IdxMode, VT, IMAB_Store, Action);
2227  }
2228 
2229  /// Indicate that the specified indexed masked load does or does not work with
2230  /// the specified type and indicate what to do about it.
2231  ///
2232  /// NOTE: All indexed mode masked loads are initialized to Expand in
2233  /// TargetLowering.cpp
2234  void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT,
2235  LegalizeAction Action) {
2236  setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action);
2237  }
2238 
2239  /// Indicate that the specified indexed masked store does or does not work
2240  /// with the specified type and indicate what to do about it.
2241  ///
2242  /// NOTE: All indexed mode masked stores are initialized to Expand in
2243  /// TargetLowering.cpp
2244  void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT,
2245  LegalizeAction Action) {
2246  setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action);
2247  }
2248 
2249  /// Indicate that the specified condition code is or isn't supported on the
2250  /// target and indicate what to do about it.
2252  LegalizeAction Action) {
2253  assert(VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) &&
2254  "Table isn't big enough!");
2255  assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2256  /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the 32-bit
2257  /// value and the upper 29 bits index into the second dimension of the array
2258  /// to select what 32-bit value to use.
2259  uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
2260  CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
2261  CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
2262  }
2263 
2264  /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
2265  /// to trying a larger integer/fp until it can find one that works. If that
2266  /// default is insufficient, this method can be used by the target to override
2267  /// the default.
2268  void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2269  PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
2270  }
2271 
2272  /// Convenience method to set an operation to Promote and specify the type
2273  /// in a single call.
2274  void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2275  setOperationAction(Opc, OrigVT, Promote);
2276  AddPromotedToType(Opc, OrigVT, DestVT);
2277  }
2278 
2279  /// Targets should invoke this method for each target independent node that
2280  /// they want to provide a custom DAG combiner for by implementing the
2281  /// PerformDAGCombine virtual method.
2283  assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
2284  TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
2285  }
2286 
2287  /// Set the target's minimum function alignment.
2288  void setMinFunctionAlignment(Align Alignment) {
2289  MinFunctionAlignment = Alignment;
2290  }
2291 
2292  /// Set the target's preferred function alignment. This should be set if
2293  /// there is a performance benefit to higher-than-minimum alignment
2295  PrefFunctionAlignment = Alignment;
2296  }
2297 
2298  /// Set the target's preferred loop alignment. Default alignment is one, it
2299  /// means the target does not care about loop alignment. The target may also
2300  /// override getPrefLoopAlignment to provide per-loop values.
2301  void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; }
2302 
2303  /// Set the minimum stack alignment of an argument.
2305  MinStackArgumentAlignment = Alignment;
2306  }
2307 
2308  /// Set the maximum atomic operation size supported by the
2309  /// backend. Atomic operations greater than this size (as well as
2310  /// ones that are not naturally aligned), will be expanded by
2311  /// AtomicExpandPass into an __atomic_* library call.
2312  void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
2313  MaxAtomicSizeInBitsSupported = SizeInBits;
2314  }
2315 
2316  /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
2317  void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
2318  MinCmpXchgSizeInBits = SizeInBits;
2319  }
2320 
2321  /// Sets whether unaligned atomic operations are supported.
2322  void setSupportsUnalignedAtomics(bool UnalignedSupported) {
2323  SupportsUnalignedAtomics = UnalignedSupported;
2324  }
2325 
2326 public:
2327  //===--------------------------------------------------------------------===//
2328  // Addressing mode description hooks (used by LSR etc).
2329  //
2330 
2331  /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
2332  /// instructions reading the address. This allows as much computation as
2333  /// possible to be done in the address mode for that operand. This hook lets
2334  /// targets also pass back when this should be done on intrinsics which
2335  /// load/store.
2336  virtual bool getAddrModeArguments(IntrinsicInst * /*I*/,
2337  SmallVectorImpl<Value*> &/*Ops*/,
2338  Type *&/*AccessTy*/) const {
2339  return false;
2340  }
2341 
2342  /// This represents an addressing mode of:
2343  /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
2344  /// If BaseGV is null, there is no BaseGV.
2345  /// If BaseOffs is zero, there is no base offset.
2346  /// If HasBaseReg is false, there is no base register.
2347  /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
2348  /// no scale.
2349  struct AddrMode {
2350  GlobalValue *BaseGV = nullptr;
2351  int64_t BaseOffs = 0;
2352  bool HasBaseReg = false;
2353  int64_t Scale = 0;
2354  AddrMode() = default;
2355  };
2356 
2357  /// Return true if the addressing mode represented by AM is legal for this
2358  /// target, for a load/store of the specified type.
2359  ///
2360  /// The type may be VoidTy, in which case only return true if the addressing
2361  /// mode is legal for a load/store of any legal type. TODO: Handle
2362  /// pre/postinc as well.
2363  ///
2364  /// If the address space cannot be determined, it will be -1.
2365  ///
2366  /// TODO: Remove default argument
2367  virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
2368  Type *Ty, unsigned AddrSpace,
2369  Instruction *I = nullptr) const;
2370 
2371  /// Return the cost of the scaling factor used in the addressing mode
2372  /// represented by AM for this target, for a load/store of the specified type.
2373  ///
2374  /// If the AM is supported, the return value must be >= 0.
2375  /// If the AM is not supported, it returns a negative value.
2376  /// TODO: Handle pre/postinc as well.
2377  /// TODO: Remove default argument
2379  const AddrMode &AM, Type *Ty,
2380  unsigned AS = 0) const {
2381  // Default: assume that any scaling factor used in a legal AM is free.
2382  if (isLegalAddressingMode(DL, AM, Ty, AS))
2383  return 0;
2384  return -1;
2385  }
2386 
2387  /// Return true if the specified immediate is legal icmp immediate, that is
2388  /// the target has icmp instructions which can compare a register against the
2389  /// immediate without having to materialize the immediate into a register.
2390  virtual bool isLegalICmpImmediate(int64_t) const {
2391  return true;
2392  }
2393 
2394  /// Return true if the specified immediate is legal add immediate, that is the
2395  /// target has add instructions which can add a register with the immediate
2396  /// without having to materialize the immediate into a register.
2397  virtual bool isLegalAddImmediate(int64_t) const {
2398  return true;
2399  }
2400 
2401  /// Return true if the specified immediate is legal for the value input of a
2402  /// store instruction.
2403  virtual bool isLegalStoreImmediate(int64_t Value) const {
2404  // Default implementation assumes that at least 0 works since it is likely
2405  // that a zero register exists or a zero immediate is allowed.
2406  return Value == 0;
2407  }
2408 
2409  /// Return true if it's significantly cheaper to shift a vector by a uniform
2410  /// scalar than by an amount which will vary across each lane. On x86 before
2411  /// AVX2 for example, there is a "psllw" instruction for the former case, but
2412  /// no simple instruction for a general "a << b" operation on vectors.
2413  /// This should also apply to lowering for vector funnel shifts (rotates).
2414  virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
2415  return false;
2416  }
2417 
2418  /// Given a shuffle vector SVI representing a vector splat, return a new
2419  /// scalar type of size equal to SVI's scalar type if the new type is more
2420  /// profitable. Returns nullptr otherwise. For example under MVE float splats
2421  /// are converted to integer to prevent the need to move from SPR to GPR
2422  /// registers.
2424  return nullptr;
2425  }
2426 
2427  /// Given a set in interconnected phis of type 'From' that are loaded/stored
2428  /// or bitcast to type 'To', return true if the set should be converted to
2429  /// 'To'.
2430  virtual bool shouldConvertPhiType(Type *From, Type *To) const {
2431  return (From->isIntegerTy() || From->isFloatingPointTy()) &&
2432  (To->isIntegerTy() || To->isFloatingPointTy());
2433  }
2434 
2435  /// Returns true if the opcode is a commutative binary operation.
2436  virtual bool isCommutativeBinOp(unsigned Opcode) const {
2437  // FIXME: This should get its info from the td file.
2438  switch (Opcode) {
2439  case ISD::ADD:
2440  case ISD::SMIN:
2441  case ISD::SMAX:
2442  case ISD::UMIN:
2443  case ISD::UMAX:
2444  case ISD::MUL:
2445  case ISD::MULHU:
2446  case ISD::MULHS:
2447  case ISD::SMUL_LOHI:
2448  case ISD::UMUL_LOHI:
2449  case ISD::FADD:
2450  case ISD::FMUL:
2451  case ISD::AND:
2452  case ISD::OR:
2453  case ISD::XOR:
2454  case ISD::SADDO:
2455  case ISD::UADDO:
2456  case ISD::ADDC:
2457  case ISD::ADDE:
2458  case ISD::SADDSAT:
2459  case ISD::UADDSAT:
2460  case ISD::FMINNUM:
2461  case ISD::FMAXNUM:
2462  case ISD::FMINNUM_IEEE:
2463  case ISD::FMAXNUM_IEEE:
2464  case ISD::FMINIMUM:
2465  case ISD::FMAXIMUM:
2466  return true;
2467  default: return false;
2468  }
2469  }
2470 
2471  /// Return true if the node is a math/logic binary operator.
2472  virtual bool isBinOp(unsigned Opcode) const {
2473  // A commutative binop must be a binop.
2474  if (isCommutativeBinOp(Opcode))
2475  return true;
2476  // These are non-commutative binops.
2477  switch (Opcode) {
2478  case ISD::SUB:
2479  case ISD::SHL:
2480  case ISD::SRL:
2481  case ISD::SRA:
2482  case ISD::SDIV:
2483  case ISD::UDIV:
2484  case ISD::SREM:
2485  case ISD::UREM:
2486  case ISD::SSUBSAT:
2487  case ISD::USUBSAT:
2488  case ISD::FSUB:
2489  case ISD::FDIV:
2490  case ISD::FREM:
2491  return true;
2492  default:
2493  return false;
2494  }
2495  }
2496 
2497  /// Return true if it's free to truncate a value of type FromTy to type
2498  /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
2499  /// by referencing its sub-register AX.
2500  /// Targets must return false when FromTy <= ToTy.
2501  virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
2502  return false;
2503  }
2504 
2505  /// Return true if a truncation from FromTy to ToTy is permitted when deciding
2506  /// whether a call is in tail position. Typically this means that both results
2507  /// would be assigned to the same register or stack slot, but it could mean
2508  /// the target performs adequate checks of its own before proceeding with the
2509  /// tail call. Targets must return false when FromTy <= ToTy.
2510  virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
2511  return false;
2512  }
2513 
2514  virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const {
2515  return false;
2516  }
2517 
2518  virtual bool isProfitableToHoist(Instruction *I) const { return true; }
2519 
2520  /// Return true if the extension represented by \p I is free.
2521  /// Unlikely the is[Z|FP]ExtFree family which is based on types,
2522  /// this method can use the context provided by \p I to decide
2523  /// whether or not \p I is free.
2524  /// This method extends the behavior of the is[Z|FP]ExtFree family.
2525  /// In other words, if is[Z|FP]Free returns true, then this method
2526  /// returns true as well. The converse is not true.
2527  /// The target can perform the adequate checks by overriding isExtFreeImpl.
2528  /// \pre \p I must be a sign, zero, or fp extension.
2529  bool isExtFree(const Instruction *I) const {
2530  switch (I->getOpcode()) {
2531  case Instruction::FPExt:
2532  if (isFPExtFree(EVT::getEVT(I->getType()),
2533  EVT::getEVT(I->getOperand(0)->getType())))
2534  return true;
2535  break;
2536  case Instruction::ZExt:
2537  if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
2538  return true;
2539  break;
2540  case Instruction::SExt:
2541  break;
2542  default:
2543  llvm_unreachable("Instruction is not an extension");
2544  }
2545  return isExtFreeImpl(I);
2546  }
2547 
2548  /// Return true if \p Load and \p Ext can form an ExtLoad.
2549  /// For example, in AArch64
2550  /// %L = load i8, i8* %ptr
2551  /// %E = zext i8 %L to i32
2552  /// can be lowered into one load instruction
2553  /// ldrb w0, [x0]
2554  bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
2555  const DataLayout &DL) const {
2556  EVT VT = getValueType(DL, Ext->getType());
2557  EVT LoadVT = getValueType(DL, Load->getType());
2558 
2559  // If the load has other users and the truncate is not free, the ext
2560  // probably isn't free.
2561  if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
2562  !isTruncateFree(Ext->getType(), Load->getType()))
2563  return false;
2564 
2565  // Check whether the target supports casts folded into loads.
2566  unsigned LType;
2567  if (isa<ZExtInst>(Ext))
2568  LType = ISD::ZEXTLOAD;
2569  else {
2570  assert(isa<SExtInst>(Ext) && "Unexpected ext type!");
2571  LType = ISD::SEXTLOAD;
2572  }
2573 
2574  return isLoadExtLegal(LType, VT, LoadVT);
2575  }
2576 
2577  /// Return true if any actual instruction that defines a value of type FromTy
2578  /// implicitly zero-extends the value to ToTy in the result register.
2579  ///
2580  /// The function should return true when it is likely that the truncate can
2581  /// be freely folded with an instruction defining a value of FromTy. If
2582  /// the defining instruction is unknown (because you're looking at a
2583  /// function argument, PHI, etc.) then the target may require an
2584  /// explicit truncate, which is not necessarily free, but this function
2585  /// does not deal with those cases.
2586  /// Targets must return false when FromTy >= ToTy.
2587  virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
2588  return false;
2589  }
2590 
2591  virtual bool isZExtFree(EVT FromTy, EVT ToTy) const {
2592  return false;
2593  }
2594 
2595  /// Return true if sign-extension from FromTy to ToTy is cheaper than
2596  /// zero-extension.
2597  virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const {
2598  return false;
2599  }
2600 
2601  /// Return true if sinking I's operands to the same basic block as I is
2602  /// profitable, e.g. because the operands can be folded into a target
2603  /// instruction during instruction selection. After calling the function
2604  /// \p Ops contains the Uses to sink ordered by dominance (dominating users
2605  /// come first).
2607  SmallVectorImpl<Use *> &Ops) const {
2608  return false;
2609  }
2610 
2611  /// Return true if the target supplies and combines to a paired load
2612  /// two loaded values of type LoadedType next to each other in memory.
2613  /// RequiredAlignment gives the minimal alignment constraints that must be met
2614  /// to be able to select this paired load.
2615  ///
2616  /// This information is *not* used to generate actual paired loads, but it is
2617  /// used to generate a sequence of loads that is easier to combine into a
2618  /// paired load.
2619  /// For instance, something like this:
2620  /// a = load i64* addr
2621  /// b = trunc i64 a to i32
2622  /// c = lshr i64 a, 32
2623  /// d = trunc i64 c to i32
2624  /// will be optimized into:
2625  /// b = load i32* addr1
2626  /// d = load i32* addr2
2627  /// Where addr1 = addr2 +/- sizeof(i32).
2628  ///
2629  /// In other words, unless the target performs a post-isel load combining,
2630  /// this information should not be provided because it will generate more
2631  /// loads.
2632  virtual bool hasPairedLoad(EVT /*LoadedType*/,
2633  Align & /*RequiredAlignment*/) const {
2634  return false;
2635  }
2636 
2637  /// Return true if the target has a vector blend instruction.
2638  virtual bool hasVectorBlend() const { return false; }
2639 
2640  /// Get the maximum supported factor for interleaved memory accesses.
2641  /// Default to be the minimum interleave factor: 2.
2642  virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
2643 
2644  /// Lower an interleaved load to target specific intrinsics. Return
2645  /// true on success.
2646  ///
2647  /// \p LI is the vector load instruction.
2648  /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
2649  /// \p Indices is the corresponding indices for each shufflevector.
2650  /// \p Factor is the interleave factor.
2651  virtual bool lowerInterleavedLoad(LoadInst *LI,
2653  ArrayRef<unsigned> Indices,
2654  unsigned Factor) const {
2655  return false;
2656  }
2657 
2658  /// Lower an interleaved store to target specific intrinsics. Return
2659  /// true on success.
2660  ///
2661  /// \p SI is the vector store instruction.
2662  /// \p SVI is the shufflevector to RE-interleave the stored vector.
2663  /// \p Factor is the interleave factor.
2665  unsigned Factor) const {
2666  return false;
2667  }
2668 
2669  /// Return true if zero-extending the specific node Val to type VT2 is free
2670  /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
2671  /// because it's folded such as X86 zero-extending loads).
2672  virtual bool isZExtFree(SDValue Val, EVT VT2) const {
2673  return isZExtFree(Val.getValueType(), VT2);
2674  }
2675 
2676  /// Return true if an fpext operation is free (for instance, because
2677  /// single-precision floating-point numbers are implicitly extended to
2678  /// double-precision).
2679  virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const {
2680  assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() &&
2681  "invalid fpext types");
2682  return false;
2683  }
2684 
2685  /// Return true if an fpext operation input to an \p Opcode operation is free
2686  /// (for instance, because half-precision floating-point numbers are
2687  /// implicitly extended to float-precision) for an FMA instruction.
2688  virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode,
2689  EVT DestVT, EVT SrcVT) const {
2690  assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
2691  "invalid fpext types");
2692  return isFPExtFree(DestVT, SrcVT);
2693  }
2694 
2695  /// Return true if folding a vector load into ExtVal (a sign, zero, or any
2696  /// extend node) is profitable.
2697  virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
2698 
2699  /// Return true if an fneg operation is free to the point where it is never
2700  /// worthwhile to replace it with a bitwise operation.
2701  virtual bool isFNegFree(EVT VT) const {
2702  assert(VT.isFloatingPoint());
2703  return false;
2704  }
2705 
2706  /// Return true if an fabs operation is free to the point where it is never
2707  /// worthwhile to replace it with a bitwise operation.
2708  virtual bool isFAbsFree(EVT VT) const {
2709  assert(VT.isFloatingPoint());
2710  return false;
2711  }
2712 
2713  /// Return true if an FMA operation is faster than a pair of fmul and fadd
2714  /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
2715  /// returns true, otherwise fmuladd is expanded to fmul + fadd.
2716  ///
2717  /// NOTE: This may be called before legalization on types for which FMAs are
2718  /// not legal, but should return true if those types will eventually legalize
2719  /// to types that support FMAs. After legalization, it will only be called on
2720  /// types that support FMAs (via Legal or Custom actions)
2722  EVT) const {
2723  return false;
2724  }
2725 
2726  /// IR version
2727  virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const {
2728  return false;
2729  }
2730 
2731  /// Returns true if be combined with to form an ISD::FMAD. \p N may be an
2732  /// ISD::FADD, ISD::FSUB, or an ISD::FMUL which will be distributed into an
2733  /// fadd/fsub.
2734  virtual bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const {
2735  assert((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB ||
2736  N->getOpcode() == ISD::FMUL) &&
2737  "unexpected node in FMAD forming combine");
2738  return isOperationLegal(ISD::FMAD, N->getValueType(0));
2739  }
2740 
2741  // Return true when the decision to generate FMA's (or FMS, FMLA etc) rather
2742  // than FMUL and ADD is delegated to the machine combiner.
2744  CodeGenOpt::Level OptLevel) const {
2745  return false;
2746  }
2747 
2748  /// Return true if it's profitable to narrow operations of type VT1 to
2749  /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
2750  /// i32 to i16.
2751  virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
2752  return false;
2753  }
2754 
2755  /// Return true if it is beneficial to convert a load of a constant to
2756  /// just the constant itself.
2757  /// On some targets it might be more efficient to use a combination of
2758  /// arithmetic instructions to materialize the constant instead of loading it
2759  /// from a constant pool.
2760  virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
2761  Type *Ty) const {
2762  return false;
2763  }
2764 
2765  /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type
2766  /// from this source type with this index. This is needed because
2767  /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of
2768  /// the first element, and only the target knows which lowering is cheap.
2769  virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
2770  unsigned Index) const {
2771  return false;
2772  }
2773 
2774  /// Try to convert an extract element of a vector binary operation into an
2775  /// extract element followed by a scalar operation.
2776  virtual bool shouldScalarizeBinop(SDValue VecOp) const {
2777  return false;
2778  }
2779 
2780  /// Return true if extraction of a scalar element from the given vector type
2781  /// at the given index is cheap. For example, if scalar operations occur on
2782  /// the same register file as vector operations, then an extract element may
2783  /// be a sub-register rename rather than an actual instruction.
2784  virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const {
2785  return false;
2786  }
2787 
2788  /// Try to convert math with an overflow comparison into the corresponding DAG
2789  /// node operation. Targets may want to override this independently of whether
2790  /// the operation is legal/custom for the given type because it may obscure
2791  /// matching of other patterns.
2792  virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
2793  bool MathUsed) const {
2794  // TODO: The default logic is inherited from code in CodeGenPrepare.
2795  // The opcode should not make a difference by default?
2796  if (Opcode != ISD::UADDO)
2797  return false;
2798 
2799  // Allow the transform as long as we have an integer type that is not
2800  // obviously illegal and unsupported and if the math result is used
2801  // besides the overflow check. On some targets (e.g. SPARC), it is
2802  // not profitable to form on overflow op if the math result has no
2803  // concrete users.
2804  if (VT.isVector())
2805  return false;
2806  return MathUsed && (VT.isSimple() || !isOperationExpand(Opcode, VT));
2807  }
2808 
2809  // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
2810  // even if the vector itself has multiple uses.
2811  virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
2812  return false;
2813  }
2814 
2815  // Return true if CodeGenPrepare should consider splitting large offset of a
2816  // GEP to make the GEP fit into the addressing mode and can be sunk into the
2817  // same blocks of its users.
2818  virtual bool shouldConsiderGEPOffsetSplit() const { return false; }
2819 
2820  /// Return true if creating a shift of the type by the given
2821  /// amount is not profitable.
2822  virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const {
2823  return false;
2824  }
2825 
2826  /// Does this target require the clearing of high-order bits in a register
2827  /// passed to the fp16 to fp conversion library function.
2828  virtual bool shouldKeepZExtForFP16Conv() const { return false; }
2829 
2830  //===--------------------------------------------------------------------===//
2831  // Runtime Library hooks
2832  //
2833 
2834  /// Rename the default libcall routine name for the specified libcall.
2835  void setLibcallName(RTLIB::Libcall Call, const char *Name) {
2836  LibcallRoutineNames[Call] = Name;
2837  }
2838 
2839  /// Get the libcall routine name for the specified libcall.
2840  const char *getLibcallName(RTLIB::Libcall Call) const {
2841  return LibcallRoutineNames[Call];
2842  }
2843 
2844  /// Override the default CondCode to be used to test the result of the
2845  /// comparison libcall against zero.
2847  CmpLibcallCCs[Call] = CC;
2848  }
2849 
2850  /// Get the CondCode that's to be used to test the result of the comparison
2851  /// libcall against zero.
2853  return CmpLibcallCCs[Call];
2854  }
2855 
2856  /// Set the CallingConv that should be used for the specified libcall.
2858  LibcallCallingConvs[Call] = CC;
2859  }
2860 
2861  /// Get the CallingConv that should be used for the specified libcall.
2863  return LibcallCallingConvs[Call];
2864  }
2865 
2866  /// Execute target specific actions to finalize target lowering.
2867  /// This is used to set extra flags in MachineFrameInformation and freezing
2868  /// the set of reserved registers.
2869  /// The default implementation just freezes the set of reserved registers.
2870  virtual void finalizeLowering(MachineFunction &MF) const;
2871 
2872  //===----------------------------------------------------------------------===//
2873  // GlobalISel Hooks
2874  //===----------------------------------------------------------------------===//
2875  /// Check whether or not \p MI needs to be moved close to its uses.
2876  virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const;
2877 
2878 
2879 private:
2880  const TargetMachine &TM;
2881 
2882  /// Tells the code generator that the target has multiple (allocatable)
2883  /// condition registers that can be used to store the results of comparisons
2884  /// for use by selects and conditional branches. With multiple condition
2885  /// registers, the code generator will not aggressively sink comparisons into
2886  /// the blocks of their users.
2887  bool HasMultipleConditionRegisters;
2888 
2889  /// Tells the code generator that the target has BitExtract instructions.
2890  /// The code generator will aggressively sink "shift"s into the blocks of
2891  /// their users if the users will generate "and" instructions which can be
2892  /// combined with "shift" to BitExtract instructions.
2893  bool HasExtractBitsInsn;
2894 
2895  /// Tells the code generator to bypass slow divide or remainder
2896  /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
2897  /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
2898  /// div/rem when the operands are positive and less than 256.
2899  DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
2900 
2901  /// Tells the code generator that it shouldn't generate extra flow control
2902  /// instructions and should attempt to combine flow control instructions via
2903  /// predication.
2904  bool JumpIsExpensive;
2905 
2906  /// Information about the contents of the high-bits in boolean values held in
2907  /// a type wider than i1. See getBooleanContents.
2908  BooleanContent BooleanContents;
2909 
2910  /// Information about the contents of the high-bits in boolean values held in
2911  /// a type wider than i1. See getBooleanContents.
2912  BooleanContent BooleanFloatContents;
2913 
2914  /// Information about the contents of the high-bits in boolean vector values
2915  /// when the element type is wider than i1. See getBooleanContents.
2916  BooleanContent BooleanVectorContents;
2917 
2918  /// The target scheduling preference: shortest possible total cycles or lowest
2919  /// register usage.
2920  Sched::Preference SchedPreferenceInfo;
2921 
2922  /// The minimum alignment that any argument on the stack needs to have.
2923  Align MinStackArgumentAlignment;
2924 
2925  /// The minimum function alignment (used when optimizing for size, and to
2926  /// prevent explicitly provided alignment from leading to incorrect code).
2927  Align MinFunctionAlignment;
2928 
2929  /// The preferred function alignment (used when alignment unspecified and
2930  /// optimizing for speed).
2931  Align PrefFunctionAlignment;
2932 
2933  /// The preferred loop alignment (in log2 bot in bytes).
2934  Align PrefLoopAlignment;
2935 
2936  /// Size in bits of the maximum atomics size the backend supports.
2937  /// Accesses larger than this will be expanded by AtomicExpandPass.
2938  unsigned MaxAtomicSizeInBitsSupported;
2939 
2940  /// Size in bits of the minimum cmpxchg or ll/sc operation the
2941  /// backend supports.
2942  unsigned MinCmpXchgSizeInBits;
2943 
2944  /// This indicates if the target supports unaligned atomic operations.
2945  bool SupportsUnalignedAtomics;
2946 
2947  /// If set to a physical register, this specifies the register that
2948  /// llvm.savestack/llvm.restorestack should save and restore.
2949  Register StackPointerRegisterToSaveRestore;
2950 
2951  /// This indicates the default register class to use for each ValueType the
2952  /// target supports natively.
2953  const TargetRegisterClass *RegClassForVT[MVT::VALUETYPE_SIZE];
2954  uint16_t NumRegistersForVT[MVT::VALUETYPE_SIZE];
2955  MVT RegisterTypeForVT[MVT::VALUETYPE_SIZE];
2956 
2957  /// This indicates the "representative" register class to use for each
2958  /// ValueType the target supports natively. This information is used by the
2959  /// scheduler to track register pressure. By default, the representative
2960  /// register class is the largest legal super-reg register class of the
2961  /// register class of the specified type. e.g. On x86, i8, i16, and i32's
2962  /// representative class would be GR32.
2963  const TargetRegisterClass *RepRegClassForVT[MVT::VALUETYPE_SIZE];
2964 
2965  /// This indicates the "cost" of the "representative" register class for each
2966  /// ValueType. The cost is used by the scheduler to approximate register
2967  /// pressure.
2968  uint8_t RepRegClassCostForVT[MVT::VALUETYPE_SIZE];
2969 
2970  /// For any value types we are promoting or expanding, this contains the value
2971  /// type that we are changing to. For Expanded types, this contains one step
2972  /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
2973  /// (e.g. i64 -> i16). For types natively supported by the system, this holds
2974  /// the same type (e.g. i32 -> i32).
2975  MVT TransformToType[MVT::VALUETYPE_SIZE];
2976 
2977  /// For each operation and each value type, keep a LegalizeAction that
2978  /// indicates how instruction selection should deal with the operation. Most
2979  /// operations are Legal (aka, supported natively by the target), but
2980  /// operations that are not should be described. Note that operations on
2981  /// non-legal value types are not described here.
2983 
2984  /// For each load extension type and each value type, keep a LegalizeAction
2985  /// that indicates how instruction selection should deal with a load of a
2986  /// specific value type and extension type. Uses 4-bits to store the action
2987  /// for each of the 4 load ext types.
2989 
2990  /// For each value type pair keep a LegalizeAction that indicates whether a
2991  /// truncating store of a specific value type and truncating type is legal.
2993 
2994  /// For each indexed mode and each value type, keep a quad of LegalizeAction
2995  /// that indicates how instruction selection should deal with the load /
2996  /// store / maskedload / maskedstore.
2997  ///
2998  /// The first dimension is the value_type for the reference. The second
2999  /// dimension represents the various modes for load store.
3001 
3002  /// For each condition code (ISD::CondCode) keep a LegalizeAction that
3003  /// indicates how instruction selection should deal with the condition code.
3004  ///
3005  /// Because each CC action takes up 4 bits, we need to have the array size be
3006  /// large enough to fit all of the value types. This can be done by rounding
3007  /// up the MVT::VALUETYPE_SIZE value to the next multiple of 8.
3008  uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::VALUETYPE_SIZE + 7) / 8];
3009 
3010  ValueTypeActionImpl ValueTypeActions;
3011 
3012 private:
3013  LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
3014 
3015  /// Targets can specify ISD nodes that they would like PerformDAGCombine
3016  /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
3017  /// array.
3018  unsigned char
3019  TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
3020 
3021  /// For operations that must be promoted to a specific type, this holds the
3022  /// destination type. This map should be sparse, so don't hold it as an
3023  /// array.
3024  ///
3025  /// Targets add entries to this map with AddPromotedToType(..), clients access
3026  /// this with getTypeToPromoteTo(..).
3027  std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
3028  PromoteToType;
3029 
3030  /// Stores the name each libcall.
3031  const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL + 1];
3032 
3033  /// The ISD::CondCode that should be used to test the result of each of the
3034  /// comparison libcall against zero.
3035  ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
3036 
3037  /// Stores the CallingConv that should be used for each libcall.
3038  CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
3039 
3040  /// Set default libcall names and calling conventions.
3041  void InitLibcalls(const Triple &TT);
3042 
3043  /// The bits of IndexedModeActions used to store the legalisation actions
3044  /// We store the data as | ML | MS | L | S | each taking 4 bits.
3045  enum IndexedModeActionsBits {
3046  IMAB_Store = 0,
3047  IMAB_Load = 4,
3048  IMAB_MaskedStore = 8,
3049  IMAB_MaskedLoad = 12
3050  };
3051 
3052  void setIndexedModeAction(unsigned IdxMode, MVT VT, unsigned Shift,
3053  LegalizeAction Action) {
3054  assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
3055  (unsigned)Action < 0xf && "Table isn't big enough!");
3056  unsigned Ty = (unsigned)VT.SimpleTy;
3057  IndexedModeActions[Ty][IdxMode] &= ~(0xf << Shift);
3058  IndexedModeActions[Ty][IdxMode] |= ((uint16_t)Action) << Shift;
3059  }
3060 
3061  LegalizeAction getIndexedModeAction(unsigned IdxMode, MVT VT,
3062  unsigned Shift) const {
3063  assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
3064  "Table isn't big enough!");
3065  unsigned Ty = (unsigned)VT.SimpleTy;
3066  return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] >> Shift) & 0xf);
3067  }
3068 
3069 protected:
3070  /// Return true if the extension represented by \p I is free.
3071  /// \pre \p I is a sign, zero, or fp extension and
3072  /// is[Z|FP]ExtFree of the related types is not true.
3073  virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
3074 
3075  /// Depth that GatherAllAliases should should continue looking for chain
3076  /// dependencies when trying to find a more preferable chain. As an
3077  /// approximation, this should be more than the number of consecutive stores
3078  /// expected to be merged.
3080 
3081  /// \brief Specify maximum number of store instructions per memset call.
3082  ///
3083  /// When lowering \@llvm.memset this field specifies the maximum number of
3084  /// store operations that may be substituted for the call to memset. Targets
3085  /// must set this value based on the cost threshold for that target. Targets
3086  /// should assume that the memset will be done using as many of the largest
3087  /// store operations first, followed by smaller ones, if necessary, per
3088  /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
3089  /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
3090  /// store. This only applies to setting a constant array of a constant size.
3092  /// Likewise for functions with the OptSize attribute.
3094 
3095  /// \brief Specify maximum number of store instructions per memcpy call.
3096  ///
3097  /// When lowering \@llvm.memcpy this field specifies the maximum number of
3098  /// store operations that may be substituted for a call to memcpy. Targets
3099  /// must set this value based on the cost threshold for that target. Targets
3100  /// should assume that the memcpy will be done using as many of the largest
3101  /// store operations first, followed by smaller ones, if necessary, per
3102  /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
3103  /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
3104  /// and one 1-byte store. This only applies to copying a constant array of
3105  /// constant size.
3107  /// Likewise for functions with the OptSize attribute.
3109  /// \brief Specify max number of store instructions to glue in inlined memcpy.
3110  ///
3111  /// When memcpy is inlined based on MaxStoresPerMemcpy, specify maximum number
3112  /// of store instructions to keep together. This helps in pairing and
3113  // vectorization later on.
3115 
3116  /// \brief Specify maximum number of load instructions per memcmp call.
3117  ///
3118  /// When lowering \@llvm.memcmp this field specifies the maximum number of
3119  /// pairs of load operations that may be substituted for a call to memcmp.
3120  /// Targets must set this value based on the cost threshold for that target.
3121  /// Targets should assume that the memcmp will be done using as many of the
3122  /// largest load operations first, followed by smaller ones, if necessary, per
3123  /// alignment restrictions. For example, loading 7 bytes on a 32-bit machine
3124  /// with 32-bit alignment would result in one 4-byte load, a one 2-byte load
3125  /// and one 1-byte load. This only applies to copying a constant array of
3126  /// constant size.
3128  /// Likewise for functions with the OptSize attribute.
3130 
3131  /// \brief Specify maximum number of store instructions per memmove call.
3132  ///
3133  /// When lowering \@llvm.memmove this field specifies the maximum number of
3134  /// store instructions that may be substituted for a call to memmove. Targets
3135  /// must set this value based on the cost threshold for that target. Targets
3136  /// should assume that the memmove will be done using as many of the largest
3137  /// store operations first, followed by smaller ones, if necessary, per
3138  /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
3139  /// with 8-bit alignment would result in nine 1-byte stores. This only
3140  /// applies to copying a constant array of constant size.
3142  /// Likewise for functions with the OptSize attribute.
3144 
3145  /// Tells the code generator that select is more expensive than a branch if
3146  /// the branch is usually predicted right.
3148 
3149  /// \see enableExtLdPromotion.
3151 
3152  /// Return true if the value types that can be represented by the specified
3153  /// register class are all legal.
3154  bool isLegalRC(const TargetRegisterInfo &TRI,
3155  const TargetRegisterClass &RC) const;
3156 
3157  /// Replace/modify any TargetFrameIndex operands with a targte-dependent
3158  /// sequence of memory operands that is recognized by PrologEpilogInserter.
3160  MachineBasicBlock *MBB) const;
3161 
3163 };
3164 
3165 /// This class defines information used to lower LLVM code to legal SelectionDAG
3166 /// operators that the target instruction selector can accept natively.
3167 ///
3168 /// This class also defines callbacks that targets must implement to lower
3169 /// target-specific constructs to SelectionDAG operators.
3171 public:
3172  struct DAGCombinerInfo;
3173  struct MakeLibCallOptions;
3174 
3175  TargetLowering(const TargetLowering &) = delete;
3176  TargetLowering &operator=(const TargetLowering &) = delete;
3177 
3178  explicit TargetLowering(const TargetMachine &TM);
3179 
3180  bool isPositionIndependent() const;
3181 
3182  virtual bool isSDNodeSourceOfDivergence(const SDNode *N,
3183  FunctionLoweringInfo *FLI,
3184  LegacyDivergenceAnalysis *DA) const {
3185  return false;
3186  }
3187 
3188  virtual bool isSDNodeAlwaysUniform(const SDNode * N) const {
3189  return false;
3190  }
3191 
3192  /// Returns true by value, base pointer and offset pointer and addressing mode
3193  /// by reference if the node's address can be legally represented as
3194  /// pre-indexed load / store address.
3195  virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
3196  SDValue &/*Offset*/,
3197  ISD::MemIndexedMode &/*AM*/,
3198  SelectionDAG &/*DAG*/) const {
3199  return false;
3200  }
3201 
3202  /// Returns true by value, base pointer and offset pointer and addressing mode
3203  /// by reference if this node can be combined with a load / store to form a
3204  /// post-indexed load / store.
3205  virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
3206  SDValue &/*Base*/,
3207  SDValue &/*Offset*/,
3208  ISD::MemIndexedMode &/*AM*/,
3209  SelectionDAG &/*DAG*/) const {
3210  return false;
3211  }
3212 
3213  /// Returns true if the specified base+offset is a legal indexed addressing
3214  /// mode for this target. \p MI is the load or store instruction that is being
3215  /// considered for transformation.
3217  bool IsPre, MachineRegisterInfo &MRI) const {
3218  return false;
3219  }
3220 
3221  /// Return the entry encoding for a jump table in the current function. The
3222  /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
3223  virtual unsigned getJumpTableEncoding() const;
3224 
3225  virtual const MCExpr *
3227  const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
3228  MCContext &/*Ctx*/) const {
3229  llvm_unreachable("Need to implement this hook if target has custom JTIs");
3230  }
3231 
3232  /// Returns relocation base for the given PIC jumptable.
3234  SelectionDAG &DAG) const;
3235 
3236  /// This returns the relocation base for the given PIC jumptable, the same as
3237  /// getPICJumpTableRelocBase, but as an MCExpr.
3238  virtual const MCExpr *
3240  unsigned JTI, MCContext &Ctx) const;
3241 
3242  /// Return true if folding a constant offset with the given GlobalAddress is
3243  /// legal. It is frequently not legal in PIC relocation models.
3244  virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
3245 
3247  SDValue &Chain) const;
3248 
3249  void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
3250  SDValue &NewRHS, ISD::CondCode &CCCode,
3251  const SDLoc &DL, const SDValue OldLHS,
3252  const SDValue OldRHS) const;
3253 
3254  void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
3255  SDValue &NewRHS, ISD::CondCode &CCCode,
3256  const SDLoc &DL, const SDValue OldLHS,
3257  const SDValue OldRHS, SDValue &Chain,
3258  bool IsSignaling = false) const;
3259 
3260  /// Returns a pair of (return value, chain).
3261  /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
3262  std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
3263  EVT RetVT, ArrayRef<SDValue> Ops,
3264  MakeLibCallOptions CallOptions,
3265  const SDLoc &dl,
3266  SDValue Chain = SDValue()) const;
3267 
3268  /// Check whether parameters to a call that are passed in callee saved
3269  /// registers are the same as from the calling function. This needs to be
3270  /// checked for tail call eligibility.
3272  const uint32_t *CallerPreservedMask,
3273  const SmallVectorImpl<CCValAssign> &ArgLocs,
3274  const SmallVectorImpl<SDValue> &OutVals) const;
3275 
3276  //===--------------------------------------------------------------------===//
3277  // TargetLowering Optimization Methods
3278  //
3279 
3280  /// A convenience struct that encapsulates a DAG, and two SDValues for
3281  /// returning information from TargetLowering to its clients that want to
3282  /// combine.
3285  bool LegalTys;
3286  bool LegalOps;
3289 
3291  bool LT, bool LO) :
3292  DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
3293 
3294  bool LegalTypes() const { return LegalTys; }
3295  bool LegalOperations() const { return LegalOps; }
3296 
3298  Old = O;
3299  New = N;
3300  return true;
3301  }
3302  };
3303 
3304  /// Determines the optimal series of memory ops to replace the memset / memcpy.
3305  /// Return true if the number of memory ops is below the threshold (Limit).
3306  /// It returns the types of the sequence of memory ops to perform
3307  /// memset / memcpy by reference.
3308  bool findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit,
3309  const MemOp &Op, unsigned DstAS, unsigned SrcAS,
3310  const AttributeList &FuncAttributes) const;
3311 
3312  /// Check to see if the specified operand of the specified instruction is a
3313  /// constant integer. If so, check to see if there are any bits set in the
3314  /// constant that are not demanded. If so, shrink the constant and return
3315  /// true.
3317  const APInt &DemandedElts,
3318  TargetLoweringOpt &TLO) const;
3319 
3320  /// Helper wrapper around ShrinkDemandedConstant, demanding all elements.
3322  TargetLoweringOpt &TLO) const;
3323 
3324  // Target hook to do target-specific const optimization, which is called by
3325  // ShrinkDemandedConstant. This function should return true if the target
3326  // doesn't want ShrinkDemandedConstant to further optimize the constant.
3328  const APInt &DemandedBits,
3329  const APInt &DemandedElts,
3330  TargetLoweringOpt &TLO) const {
3331  return false;
3332  }
3333 
3334  /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This
3335  /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
3336  /// generalized for targets with other types of implicit widening casts.
3337  bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
3338  TargetLoweringOpt &TLO) const;
3339 
3340  /// Look at Op. At this point, we know that only the DemandedBits bits of the
3341  /// result of Op are ever used downstream. If we can use this information to
3342  /// simplify Op, create a new simplified DAG node and return true, returning
3343  /// the original and new nodes in Old and New. Otherwise, analyze the
3344  /// expression and return a mask of KnownOne and KnownZero bits for the
3345  /// expression (used to simplify the caller). The KnownZero/One bits may only
3346  /// be accurate for those bits in the Demanded masks.
3347  /// \p AssumeSingleUse When this parameter is true, this function will
3348  /// attempt to simplify \p Op even if there are multiple uses.
3349  /// Callers are responsible for correctly updating the DAG based on the
3350  /// results of this function, because simply replacing replacing TLO.Old
3351  /// with TLO.New will be incorrect when this parameter is true and TLO.Old
3352  /// has multiple uses.
3354  const APInt &DemandedElts, KnownBits &Known,
3355  TargetLoweringOpt &TLO, unsigned Depth = 0,
3356  bool AssumeSingleUse = false) const;
3357 
3358  /// Helper wrapper around SimplifyDemandedBits, demanding all elements.
3359  /// Adds Op back to the worklist upon success.
3361  KnownBits &Known, TargetLoweringOpt &TLO,
3362  unsigned Depth = 0,
3363  bool AssumeSingleUse = false) const;
3364 
3365  /// Helper wrapper around SimplifyDemandedBits.
3366  /// Adds Op back to the worklist upon success.
3368  DAGCombinerInfo &DCI) const;
3369 
3370  /// More limited version of SimplifyDemandedBits that can be used to "look
3371  /// through" ops that don't contribute to the DemandedBits/DemandedElts -
3372  /// bitwise ops etc.
3374  const APInt &DemandedElts,
3375  SelectionDAG &DAG,
3376  unsigned Depth) const;
3377 
3378  /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
3379  /// elements.
3381  SelectionDAG &DAG,
3382  unsigned Depth = 0) const;
3383 
3384  /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
3385  /// bits from only some vector elements.
3387  const APInt &DemandedElts,
3388  SelectionDAG &DAG,
3389  unsigned Depth = 0) const;
3390 
3391  /// Look at Vector Op. At this point, we know that only the DemandedElts
3392  /// elements of the result of Op are ever used downstream. If we can use
3393  /// this information to simplify Op, create a new simplified DAG node and
3394  /// return true, storing the original and new nodes in TLO.
3395  /// Otherwise, analyze the expression and return a mask of KnownUndef and
3396  /// KnownZero elements for the expression (used to simplify the caller).
3397  /// The KnownUndef/Zero elements may only be accurate for those bits
3398  /// in the DemandedMask.
3399  /// \p AssumeSingleUse When this parameter is true, this function will
3400  /// attempt to simplify \p Op even if there are multiple uses.
3401  /// Callers are responsible for correctly updating the DAG based on the
3402  /// results of this function, because simply replacing replacing TLO.Old
3403  /// with TLO.New will be incorrect when this parameter is true and TLO.Old
3404  /// has multiple uses.
3405  bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask,
3406  APInt &KnownUndef, APInt &KnownZero,
3407  TargetLoweringOpt &TLO, unsigned Depth = 0,
3408  bool AssumeSingleUse = false) const;
3409 
3410  /// Helper wrapper around SimplifyDemandedVectorElts.
3411  /// Adds Op back to the worklist upon success.
3412  bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
3413  APInt &KnownUndef, APInt &KnownZero,
3414  DAGCombinerInfo &DCI) const;
3415 
3416  /// Determine which of the bits specified in Mask are known to be either zero
3417  /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
3418  /// argument allows us to only collect the known bits that are shared by the
3419  /// requested vector elements.
3420  virtual void computeKnownBitsForTargetNode(const SDValue Op,
3421  KnownBits &Known,
3422  const APInt &DemandedElts,
3423  const SelectionDAG &DAG,
3424  unsigned Depth = 0) const;
3425 
3426  /// Determine which of the bits specified in Mask are known to be either zero
3427  /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
3428  /// argument allows us to only collect the known bits that are shared by the
3429  /// requested vector elements. This is for GISel.
3430  virtual void computeKnownBitsForTargetInstr(GISelKnownBits &Analysis,
3431  Register R, KnownBits &Known,
3432  const APInt &DemandedElts,
3433  const MachineRegisterInfo &MRI,
3434  unsigned Depth = 0) const;
3435 
3436  /// Determine the known alignment for the pointer value \p R. This is can
3437  /// typically be inferred from the number of low known 0 bits. However, for a
3438  /// pointer with a non-integral address space, the alignment value may be
3439  /// independent from the known low bits.
3441  Register R,
3442  const MachineRegisterInfo &MRI,
3443  unsigned Depth = 0) const;
3444 
3445  /// Determine which of the bits of FrameIndex \p FIOp are known to be 0.
3446  /// Default implementation computes low bits based on alignment
3447  /// information. This should preserve known bits passed into it.
3448  virtual void computeKnownBitsForFrameIndex(int FIOp,
3449  KnownBits &Known,
3450  const MachineFunction &MF) const;
3451 
3452  /// This method can be implemented by targets that want to expose additional
3453  /// information about sign bits to the DAG Combiner. The DemandedElts
3454  /// argument allows us to only collect the minimum sign bits that are shared
3455  /// by the requested vector elements.
3456  virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
3457  const APInt &DemandedElts,
3458  const SelectionDAG &DAG,
3459  unsigned Depth = 0) const;
3460 
3461  /// This method can be implemented by targets that want to expose additional
3462  /// information about sign bits to GlobalISel combiners. The DemandedElts
3463  /// argument allows us to only collect the minimum sign bits that are shared
3464  /// by the requested vector elements.
3465  virtual unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis,
3466  Register R,
3467  const APInt &DemandedElts,
3468  const MachineRegisterInfo &MRI,
3469  unsigned Depth = 0) const;
3470 
3471  /// Attempt to simplify any target nodes based on the demanded vector
3472  /// elements, returning true on success. Otherwise, analyze the expression and
3473  /// return a mask of KnownUndef and KnownZero elements for the expression
3474  /// (used to simplify the caller). The KnownUndef/Zero elements may only be
3475  /// accurate for those bits in the DemandedMask.
3477  SDValue Op, const APInt &DemandedElts, APInt &KnownUndef,
3478  APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const;
3479 
3480  /// Attempt to simplify any target nodes based on the demanded bits/elts,
3481  /// returning true on success. Otherwise, analyze the
3482  /// expression and return a mask of KnownOne and KnownZero bits for the
3483  /// expression (used to simplify the caller). The KnownZero/One bits may only
3484  /// be accurate for those bits in the Demanded masks.
3486  const APInt &DemandedBits,
3487  const APInt &DemandedElts,
3488  KnownBits &Known,
3489  TargetLoweringOpt &TLO,
3490  unsigned Depth = 0) const;
3491 
3492  /// More limited version of SimplifyDemandedBits that can be used to "look
3493  /// through" ops that don't contribute to the DemandedBits/DemandedElts -
3494  /// bitwise ops etc.
3496  SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
3497  SelectionDAG &DAG, unsigned Depth) const;
3498 
3499  /// Return true if this function can prove that \p Op is never poison
3500  /// and, if \p PoisonOnly is false, does not have undef bits. The DemandedElts
3501  /// argument limits the check to the requested vector elements.
3503  SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
3504  bool PoisonOnly, unsigned Depth) const;
3505 
3506  /// Tries to build a legal vector shuffle using the provided parameters
3507  /// or equivalent variations. The Mask argument maybe be modified as the
3508  /// function tries different variations.
3509  /// Returns an empty SDValue if the operation fails.
3512  SelectionDAG &DAG) const;
3513 
3514  /// This method returns the constant pool value that will be loaded by LD.
3515  /// NOTE: You must check for implicit extensions of the constant by LD.
3516  virtual const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const;
3517 
3518  /// If \p SNaN is false, \returns true if \p Op is known to never be any
3519  /// NaN. If \p sNaN is true, returns if \p Op is known to never be a signaling
3520  /// NaN.
3522  const SelectionDAG &DAG,
3523  bool SNaN = false,
3524  unsigned Depth = 0) const;
3526  void *DC; // The DAG Combiner object.
3529 
3530  public:
3532 
3533  DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
3534  : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
3535 
3536  bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
3538  bool isAfterLegalizeDAG() const { return Level >= AfterLegalizeDAG; }
3540  bool isCalledByLegalizer() const { return CalledByLegalizer; }
3541 
3542  void AddToWorklist(SDNode *N);
3543  SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
3544  SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
3545  SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
3546 
3548 
3549  void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
3550  };
3551 
3552  /// Return if the N is a constant or constant vector equal to the true value
3553  /// from getBooleanContents().
3554  bool isConstTrueVal(const SDNode *N) const;
3555 
3556  /// Return if the N is a constant or constant vector equal to the false value
3557  /// from getBooleanContents().
3558  bool isConstFalseVal(const SDNode *N) const;
3559 
3560  /// Return if \p N is a True value when extended to \p VT.
3561  bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const;
3562 
3563  /// Try to simplify a setcc built with the specified operands and cc. If it is
3564  /// unable to simplify it, return a null SDValue.
3566  bool foldBooleans, DAGCombinerInfo &DCI,
3567  const SDLoc &dl) const;
3568 
3569  // For targets which wrap address, unwrap for analysis.
3570  virtual SDValue unwrapAddress(SDValue N) const { return N; }
3571 
3572  /// Returns true (and the GlobalValue and the offset) if the node is a
3573  /// GlobalAddress + offset.
3574  virtual bool
3575  isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
3576 
3577  /// This method will be invoked for all target nodes and for any
3578  /// target-independent nodes that the target has registered with invoke it
3579  /// for.
3580  ///
3581  /// The semantics are as follows:
3582  /// Return Value:
3583  /// SDValue.Val == 0 - No change was made
3584  /// SDValue.Val == N - N was replaced, is dead, and is already handled.
3585  /// otherwise - N should be replaced by the returned Operand.
3586  ///
3587  /// In addition, methods provided by DAGCombinerInfo may be used to perform
3588  /// more complex transformations.
3589  ///
3590  virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
3591 
3592  /// Return true if it is profitable to move this shift by a constant amount
3593  /// though its operand, adjusting any immediate operands as necessary to
3594  /// preserve semantics. This transformation may not be desirable if it
3595  /// disrupts a particularly auspicious target-specific tree (e.g. bitfield
3596  /// extraction in AArch64). By default, it returns true.
3597  ///
3598  /// @param N the shift node
3599  /// @param Level the current DAGCombine legalization level.
3601  CombineLevel Level) const {
3602  return true;
3603  }
3604 
3605  /// Return true if the target has native support for the specified value type
3606  /// and it is 'desirable' to use the type for the given node type. e.g. On x86
3607  /// i16 is legal, but undesirable since i16 instruction encodings are longer
3608  /// and some i16 instructions are slow.
3609  virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
3610  // By default, assume all legal types are desirable.
3611  return isTypeLegal(VT);
3612  }
3613 
3614  /// Return true if it is profitable for dag combiner to transform a floating
3615  /// point op of specified opcode to a equivalent op of an integer
3616  /// type. e.g. f32 load -> i32 load can be profitable on ARM.
3617  virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
3618  EVT /*VT*/) const {
3619  return false;
3620  }
3621 
3622  /// This method query the target whether it is beneficial for dag combiner to
3623  /// promote the specified node. If true, it should return the desired
3624  /// promotion type by reference.
3625  virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
3626  return false;
3627  }
3628 
3629  /// Return true if the target supports swifterror attribute. It optimizes
3630  /// loads and stores to reading and writing a specific register.
3631  virtual bool supportSwiftError() const {
3632  return false;
3633  }
3634 
3635  /// Return true if the target supports that a subset of CSRs for the given
3636  /// machine function is handled explicitly via copies.
3637  virtual bool supportSplitCSR(MachineFunction *MF) const {
3638  return false;
3639  }
3640 
3641  /// Perform necessary initialization to handle a subset of CSRs explicitly
3642  /// via copies. This function is called at the beginning of instruction
3643  /// selection.
3644  virtual void initializeSplitCSR(MachineBasicBlock *Entry) const {
3645  llvm_unreachable("Not Implemented");
3646  }
3647 
3648  /// Insert explicit copies in entry and exit blocks. We copy a subset of
3649  /// CSRs to virtual registers in the entry block, and copy them back to
3650  /// physical registers in the exit blocks. This function is called at the end
3651  /// of instruction selection.
3652  virtual void insertCopiesSplitCSR(
3653  MachineBasicBlock *Entry,
3654  const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
3655  llvm_unreachable("Not Implemented");
3656  }
3657 
3658  /// Return the newly negated expression if the cost is not expensive and
3659  /// set the cost in \p Cost to indicate that if it is cheaper or neutral to
3660  /// do the negation.
3662  bool LegalOps, bool OptForSize,
3663  NegatibleCost &Cost,
3664  unsigned Depth = 0) const;
3665 
3666  /// This is the helper function to return the newly negated expression only
3667  /// when the cost is cheaper.
3669  bool LegalOps, bool OptForSize,
3670  unsigned Depth = 0) const {
3672  SDValue Neg =
3673  getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth);
3674  if (Neg && Cost == NegatibleCost::Cheaper)
3675  return Neg;
3676  // Remove the new created node to avoid the side effect to the DAG.
3677  if (Neg && Neg.getNode()->use_empty())
3678  DAG.RemoveDeadNode(Neg.getNode());
3679  return SDValue();
3680  }
3681 
3682  /// This is the helper function to return the newly negated expression if
3683  /// the cost is not expensive.
3685  bool OptForSize, unsigned Depth = 0) const {
3687  return getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth);
3688  }
3689 
3690  //===--------------------------------------------------------------------===//
3691  // Lowering methods - These methods must be implemented by targets so that
3692  // the SelectionDAGBuilder code knows how to lower these.
3693  //
3694 
3695  /// Target-specific splitting of values into parts that fit a register
3696  /// storing a legal type
3698  SDValue Val, SDValue *Parts,
3699  unsigned NumParts, MVT PartVT,
3700  Optional<CallingConv::ID> CC) const {
3701  return false;
3702  }
3703 
3704  /// Target-specific combining of register parts into its original value
3705  virtual SDValue
3707  const SDValue *Parts, unsigned NumParts,
3708  MVT PartVT, EVT ValueVT,
3709  Optional<CallingConv::ID> CC) const {
3710  return SDValue();
3711  }
3712 
3713  /// This hook must be implemented to lower the incoming (formal) arguments,
3714  /// described by the Ins array, into the specified DAG. The implementation
3715  /// should fill in the InVals array with legal-type argument values, and
3716  /// return the resulting token chain value.
3718  SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
3719  const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/,
3720  SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const {
3721  llvm_unreachable("Not Implemented");
3722  }
3723 
3724  /// This structure contains all information that is necessary for lowering
3725  /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
3726  /// needs to lower a call, and targets will see this struct in their LowerCall
3727  /// implementation.
3730  Type *RetTy = nullptr;
3731  bool RetSExt : 1;
3732  bool RetZExt : 1;
3733  bool IsVarArg : 1;
3734  bool IsInReg : 1;
3735  bool DoesNotReturn : 1;
3737  bool IsConvergent : 1;
3738  bool IsPatchPoint : 1;
3739  bool IsPreallocated : 1;
3740  bool NoMerge : 1;
3741 
3742  // IsTailCall should be modified by implementations of
3743  // TargetLowering::LowerCall that perform tail call conversions.
3744  bool IsTailCall = false;
3745 
3746  // Is Call lowering done post SelectionDAG type legalization.
3748 
3749  unsigned NumFixedArgs = -1;
3755  const CallBase *CB = nullptr;
3760 
3765  DAG(DAG) {}
3766 
3768  DL = dl;
3769  return *this;
3770  }
3771 
3773  Chain = InChain;
3774  return *this;
3775  }
3776 
3777  // setCallee with target/module-specific attributes
3779  SDValue Target, ArgListTy &&ArgsList) {
3780  RetTy = ResultType;
3781  Callee = Target;
3782  CallConv = CC;
3783  NumFixedArgs = ArgsList.size();
3784  Args = std::move(ArgsList);
3785 
3787  &(DAG.getMachineFunction()), CC, Args);
3788  return *this;
3789  }
3790 
3792  SDValue Target, ArgListTy &&ArgsList) {
3793  RetTy = ResultType;
3794  Callee = Target;
3795  CallConv = CC;
3796  NumFixedArgs = ArgsList.size();
3797  Args = std::move(ArgsList);
3798  return *this;
3799  }
3800 
3802  SDValue Target, ArgListTy &&ArgsList,
3803  const CallBase &Call) {
3804  RetTy = ResultType;
3805 
3806  IsInReg = Call.hasRetAttr(Attribute::InReg);
3807  DoesNotReturn =
3808  Call.doesNotReturn() ||
3809  (!isa<InvokeInst>(Call) && isa<UnreachableInst>(Call.getNextNode()));
3810  IsVarArg = FTy->isVarArg();
3811  IsReturnValueUsed = !Call.use_empty();
3812  RetSExt = Call.hasRetAttr(Attribute::SExt);
3813  RetZExt = Call.hasRetAttr(Attribute::ZExt);
3814  NoMerge = Call.hasFnAttr(Attribute::NoMerge);
3815 
3816  Callee = Target;
3817 
3818  CallConv = Call.getCallingConv();
3819  NumFixedArgs = FTy->getNumParams();
3820  Args = std::move(ArgsList);
3821 
3822  CB = &Call;
3823 
3824  return *this;
3825  }
3826 
3828  IsInReg = Value;
3829  return *this;
3830  }
3831 
3833  DoesNotReturn = Value;
3834  return *this;
3835  }
3836 
3838  IsVarArg = Value;
3839  return *this;
3840  }
3841 
3843  IsTailCall = Value;
3844  return *this;
3845  }
3846 
3849  return *this;
3850  }
3851 
3853  IsConvergent = Value;
3854  return *this;
3855  }
3856 
3858  RetSExt = Value;
3859  return *this;
3860  }
3861 
3863  RetZExt = Value;
3864  return *this;
3865  }
3866 
3868  IsPatchPoint = Value;
3869  return *this;
3870  }
3871 
3874  return *this;
3875  }
3876 
3879  return *this;
3880  }
3881 
3883  return Args;
3884  }
3885  };
3886 
3887  /// This structure is used to pass arguments to makeLibCall function.
3889  // By passing type list before soften to makeLibCall, the target hook
3890  // shouldExtendTypeInLibCall can get the original type before soften.
3893  bool IsSExt : 1;
3894  bool DoesNotReturn : 1;
3897  bool IsSoften : 1;
3898 
3902 
3904  IsSExt = Value;
3905  return *this;
3906  }
3907 
3909  DoesNotReturn = Value;
3910  return *this;
3911  }
3912 
3915  return *this;
3916  }
3917 
3920  return *this;
3921  }
3922 
3924  bool Value = true) {
3925  OpsVTBeforeSoften = OpsVT;
3926  RetVTBeforeSoften = RetVT;
3927  IsSoften = Value;
3928  return *this;
3929  }
3930  };
3931 
3932  /// This function lowers an abstract call to a function into an actual call.
3933  /// This returns a pair of operands. The first element is the return value
3934  /// for the function (if RetTy is not VoidTy). The second element is the
3935  /// outgoing token chain. It calls LowerCall to do the actual lowering.
3936  std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
3937 
3938  /// This hook must be implemented to lower calls into the specified
3939  /// DAG. The outgoing arguments to the call are described by the Outs array,
3940  /// and the values to be returned by the call are described by the Ins
3941  /// array. The implementation should fill in the InVals array with legal-type
3942  /// return values from the call, and return the resulting token chain value.
3943  virtual SDValue
3945  SmallVectorImpl<SDValue> &/*InVals*/) const {
3946  llvm_unreachable("Not Implemented");
3947  }
3948 
3949  /// Target-specific cleanup for formal ByVal parameters.
3950  virtual void HandleByVal(CCState *, unsigned &, Align) const {}
3951 
3952  /// This hook should be implemented to check whether the return values
3953  /// described by the Outs array can fit into the return registers. If false
3954  /// is returned, an sret-demotion is performed.
3955  virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
3956  MachineFunction &/*MF*/, bool /*isVarArg*/,
3957  const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
3958  LLVMContext &/*Context*/) const
3959  {
3960  // Return true by default to get preexisting behavior.
3961  return true;
3962  }
3963 
3964  /// This hook must be implemented to lower outgoing return values, described
3965  /// by the Outs array, into the specified DAG. The implementation should
3966  /// return the resulting token chain value.
3967  virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
3968  bool /*isVarArg*/,
3969  const SmallVectorImpl<ISD::OutputArg> & /*Outs*/,
3970  const SmallVectorImpl<SDValue> & /*OutVals*/,
3971  const SDLoc & /*dl*/,
3972  SelectionDAG & /*DAG*/) const {
3973  llvm_unreachable("Not Implemented");
3974  }
3975 
3976  /// Return true if result of the specified node is used by a return node
3977  /// only. It also compute and return the input chain for the tail call.
3978  ///
3979  /// This is used to determine whether it is possible to codegen a libcall as
3980  /// tail call at legalization time.
3981  virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
3982  return false;
3983  }
3984 
3985  /// Return true if the target may be able emit the call instruction as a tail
3986  /// call. This is used by optimization passes to determine if it's profitable
3987  /// to duplicate return instructions to enable tailcall optimization.
3988  virtual bool mayBeEmittedAsTailCall(const CallInst *) const {
3989  return false;
3990  }
3991 
3992  /// Return the builtin name for the __builtin___clear_cache intrinsic
3993  /// Default is to invoke the clear cache library call
3994  virtual const char * getClearCacheBuiltinName() const {
3995  return "__clear_cache";
3996  }
3997 
3998  /// Return the register ID of the name passed in. Used by named register
3999  /// global variables extension. There is no target-independent behaviour
4000  /// so the default action is to bail.
4001  virtual Register getRegisterByName(const char* RegName, LLT Ty,
4002  const MachineFunction &MF) const {
4003  report_fatal_error("Named registers not implemented for this target");
4004  }
4005 
4006  /// Return the type that should be used to zero or sign extend a
4007  /// zeroext/signext integer return value. FIXME: Some C calling conventions
4008  /// require the return type to be promoted, but this is not true all the time,
4009  /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling
4010  /// conventions. The frontend should handle this and include all of the
4011  /// necessary information.
4013  ISD::NodeType /*ExtendKind*/) const {
4014  EVT MinVT = getRegisterType(Context, MVT::i32);
4015  return VT.bitsLT(MinVT) ? MinVT : VT;
4016  }
4017 
4018  /// For some targets, an LLVM struct type must be broken down into multiple
4019  /// simple types, but the calling convention specifies that the entire struct
4020  /// must be passed in a block of consecutive registers.
4021  virtual bool
4023  bool isVarArg,
4024  const DataLayout &DL) const {
4025  return false;
4026  }
4027 
4028  /// For most targets, an LLVM type must be broken down into multiple
4029  /// smaller types. Usually the halves are ordered according to the endianness
4030  /// but for some platform that would break. So this method will default to
4031  /// matching the endianness but can be overridden.
4032  virtual bool
4034  return DL.isLittleEndian();
4035  }
4036 
4037  /// Returns a 0 terminated array of registers that can be safely used as
4038  /// scratch registers.
4039  virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
4040  return nullptr;
4041  }
4042 
4043  /// This callback is used to prepare for a volatile or atomic load.
4044  /// It takes a chain node as input and returns the chain for the load itself.
4045  ///
4046  /// Having a callback like this is necessary for targets like SystemZ,
4047  /// which allows a CPU to reuse the result of a previous load indefinitely,
4048  /// even if a cache-coherent store is performed by another CPU. The default
4049  /// implementation does nothing.
4051  SelectionDAG &DAG) const {
4052  return Chain;
4053  }
4054 
4055  /// Should SelectionDAG lower an atomic store of the given kind as a normal
4056  /// StoreSDNode (as opposed to an AtomicSDNode)? NOTE: The intention is to
4057  /// eventually migrate all targets to the using StoreSDNodes, but porting is
4058  /// being done target at a time.
4059  virtual bool lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const {
4060  assert(SI.isAtomic() && "violated precondition");
4061  return false;
4062  }
4063 
4064  /// Should SelectionDAG lower an atomic load of the given kind as a normal
4065  /// LoadSDNode (as opposed to an AtomicSDNode)? NOTE: The intention is to
4066  /// eventually migrate all targets to the using LoadSDNodes, but porting is
4067  /// being done target at a time.
4068  virtual bool lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const {
4069  assert(LI.isAtomic() && "violated precondition");
4070  return false;
4071  }
4072 
4073 
4074  /// This callback is invoked by the type legalizer to legalize nodes with an
4075  /// illegal operand type but legal result types. It replaces the
4076  /// LowerOperation callback in the type Legalizer. The reason we can not do
4077  /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
4078  /// use this callback.
4079  ///
4080  /// TODO: Consider merging with ReplaceNodeResults.
4081  ///
4082  /// The target places new result values for the node in Results (their number
4083  /// and types must exactly match those of the original return values of
4084  /// the node), or leaves Results empty, which indicates that the node is not
4085  /// to be custom lowered after all.
4086  /// The default implementation calls LowerOperation.
4087  virtual void LowerOperationWrapper(SDNode *N,
4089  SelectionDAG &DAG) const;
4090 
4091  /// This callback is invoked for operations that are unsupported by the
4092  /// target, which are registered to use 'custom' lowering, and whose defined
4093  /// values are all legal. If the target has no operations that require custom
4094  /// lowering, it need not implement this. The default implementation of this
4095  /// aborts.
4096  virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
4097 
4098  /// This callback is invoked when a node result type is illegal for the
4099  /// target, and the operation was registered to use 'custom' lowering for that
4100  /// result type. The target places new result values for the node in Results
4101  /// (their number and types must exactly match those of the original return
4102  /// values of the node), or leaves Results empty, which indicates that the
4103  /// node is not to be custom lowered after all.
4104  ///
4105  /// If the target has no operations that require custom lowering, it need not
4106  /// implement this. The default implementation aborts.
4107  virtual void ReplaceNodeResults(SDNode * /*N*/,
4108  SmallVectorImpl<SDValue> &/*Results*/,
4109  SelectionDAG &/*DAG*/) const {
4110  llvm_unreachable("ReplaceNodeResults not implemented for this target!");
4111  }
4112 
4113  /// This method returns the name of a target specific DAG node.
4114  virtual const char *getTargetNodeName(unsigned Opcode) const;
4115 
4116  /// This method returns a target specific FastISel object, or null if the
4117  /// target does not support "fast" ISel.
4119  const TargetLibraryInfo *) const {
4120  return nullptr;
4121  }
4122 
4124  SelectionDAG &DAG) const;
4125 
4126  //===--------------------------------------------------------------------===//
4127  // Inline Asm Support hooks
4128  //
4129 
4130  /// This hook allows the target to expand an inline asm call to be explicit
4131  /// llvm code if it wants to. This is useful for turning simple inline asms
4132  /// into LLVM intrinsics, which gives the compiler more information about the
4133  /// behavior of the code.
4134  virtual bool ExpandInlineAsm(CallInst *) const {
4135  return false;
4136  }
4137 
4139  C_Register, // Constraint represents specific register(s).
4140  C_RegisterClass, // Constraint represents any of register(s) in class.
4141  C_Memory, // Memory constraint.
4142  C_Immediate, // Requires an immediate.
4143  C_Other, // Something else.
4144  C_Unknown // Unsupported constraint.
4145  };
4146 
4148  // Generic weights.
4149  CW_Invalid = -1, // No match.
4150  CW_Okay = 0, // Acceptable.
4151  CW_Good = 1, // Good weight.
4152  CW_Better = 2, // Better weight.
4153  CW_Best = 3, // Best weight.
4154 
4155  // Well-known weights.
4156  CW_SpecificReg = CW_Okay, // Specific register operands.
4157  CW_Register = CW_Good, // Register operands.
4158  CW_Memory = CW_Better, // Memory operands.
4159  CW_Constant = CW_Best, // Constant operand.
4160  CW_Default = CW_Okay // Default or don't know type.
4161  };
4162 
4163  /// This contains information for each constraint that we are lowering.
4165  /// This contains the actual string for the code, like "m". TargetLowering
4166  /// picks the 'best' code from ConstraintInfo::Codes that most closely
4167  /// matches the operand.
4168  std::string ConstraintCode;
4169 
4170  /// Information about the constraint code, e.g. Register, RegisterClass,
4171  /// Memory, Other, Unknown.
4173 
4174  /// If this is the result output operand or a clobber, this is null,
4175  /// otherwise it is the incoming operand to the CallInst. This gets
4176  /// modified as the asm is processed.
4177  Value *CallOperandVal = nullptr;
4178 
4179  /// The ValueType for the operand value.
4181 
4182  /// Copy constructor for copying from a ConstraintInfo.
4185 
4186  /// Return true of this is an input operand that is a matching constraint
4187  /// like "4".
4188  bool isMatchingInputConstraint() const;
4189 
4190  /// If this is an input matching constraint, this method returns the output
4191  /// operand it matches.
4192  unsigned getMatchedOperand() const;
4193  };
4194 
4195  using AsmOperandInfoVector = std::vector<AsmOperandInfo>;
4196 
4197  /// Split up the constraint string from the inline assembly value into the
4198  /// specific constraints and their prefixes, and also tie in the associated
4199  /// operand values. If this returns an empty vector, and if the constraint
4200  /// string itself isn't empty, there was an error parsing.
4202  const TargetRegisterInfo *TRI,
4203  const CallBase &Call) const;
4204 
4205  /// Examine constraint type and operand type and determine a weight value.
4206  /// The operand object must already have been set up with the operand type.
4208  AsmOperandInfo &info, int maIndex) const;
4209 
4210  /// Examine constraint string and operand type and determine a weight value.
4211  /// The operand object must already have been set up with the operand type.
4213  AsmOperandInfo &info, const char *constraint) const;
4214 
4215  /// Determines the constraint code and constraint type to use for the specific
4216  /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
4217  /// If the actual operand being passed in is available, it can be passed in as
4218  /// Op, otherwise an empty SDValue can be passed.
4219  virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
4220  SDValue Op,
4221  SelectionDAG *DAG = nullptr) const;
4222 
4223  /// Given a constraint, return the type of constraint it is for this target.
4224  virtual ConstraintType getConstraintType(StringRef Constraint) const;
4225 
4226  /// Given a physical register constraint (e.g. {edx}), return the register
4227  /// number and the register class for the register.
4228  ///
4229  /// Given a register class constraint, like 'r', if this corresponds directly
4230  /// to an LLVM register class, return a register of 0 and the register class
4231  /// pointer.
4232  ///
4233  /// This should only be used for C_Register constraints. On error, this
4234  /// returns a register number of 0 and a null register class pointer.
4235  virtual std::pair<unsigned, const TargetRegisterClass *>
4237  StringRef Constraint, MVT VT) const;
4238 
4239  virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const {
4240  if (ConstraintCode == "m")
4241  return InlineAsm::Constraint_m;
4242  if (ConstraintCode == "o")
4243  return InlineAsm::Constraint_o;
4244  if (ConstraintCode == "X")
4245  return InlineAsm::Constraint_X;
4247  }
4248 
4249  /// Try to replace an X constraint, which matches anything, with another that
4250  /// has more specific requirements based on the type of the corresponding
4251  /// operand. This returns null if there is no replacement to make.
4252  virtual const char *LowerXConstraint(EVT ConstraintVT) const;
4253 
4254  /// Lower the specified operand into the Ops vector. If it is invalid, don't
4255  /// add anything to Ops.
4256  virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
4257  std::vector<SDValue> &Ops,
4258  SelectionDAG &DAG) const;
4259 
4260  // Lower custom output constraints. If invalid, return SDValue().
4262  const SDLoc &DL,
4263  const AsmOperandInfo &OpInfo,
4264  SelectionDAG &DAG) const;
4265 
4266  //===--------------------------------------------------------------------===//
4267  // Div utility functions
4268  //
4269  SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
4270  SmallVectorImpl<SDNode *> &Created) const;
4271  SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
4272  SmallVectorImpl<SDNode *> &Created) const;
4273 
4274  /// Targets may override this function to provide custom SDIV lowering for
4275  /// power-of-2 denominators. If the target returns an empty SDValue, LLVM
4276  /// assumes SDIV is expensive and replaces it with a series of other integer
4277  /// operations.
4278  virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
4279  SelectionDAG &DAG,
4280  SmallVectorImpl<SDNode *> &Created) const;
4281 
4282  /// Indicate whether this target prefers to combine FDIVs with the same
4283  /// divisor. If the transform should never be done, return zero. If the
4284  /// transform should be done, return the minimum number of divisor uses
4285  /// that must exist.
4286  virtual unsigned combineRepeatedFPDivisors() const {
4287  return 0;
4288  }
4289 
4290  /// Hooks for building estimates in place of slower divisions and square
4291  /// roots.
4292 
4293  /// Return either a square root or its reciprocal estimate value for the input
4294  /// operand.
4295  /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
4296  /// 'Enabled' as set by a potential default override attribute.
4297  /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
4298  /// refinement iterations required to generate a sufficient (though not
4299  /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
4300  /// The boolean UseOneConstNR output is used to select a Newton-Raphson
4301  /// algorithm implementation that uses either one or two constants.
4302  /// The boolean Reciprocal is used to select whether the estimate is for the
4303  /// square root of the input operand or the reciprocal of its square root.
4304  /// A target may choose to implement its own refinement within this function.
4305  /// If that's true, then return '0' as the number of RefinementSteps to avoid
4306  /// any further refinement of the estimate.
4307  /// An empty SDValue return means no estimate sequence can be created.
4309  int Enabled, int &RefinementSteps,
4310  bool &UseOneConstNR, bool Reciprocal) const {
4311  return SDValue();
4312  }
4313 
4314  /// Return a reciprocal estimate value for the input operand.
4315  /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
4316  /// 'Enabled' as set by a potential default override attribute.
4317  /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
4318  /// refinement iterations required to generate a sufficient (though not
4319  /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
4320  /// A target may choose to implement its own refinement within this function.
4321  /// If that's true, then return '0' as the number of RefinementSteps to avoid
4322  /// any further refinement of the estimate.
4323  /// An empty SDValue return means no estimate sequence can be created.
4325  int Enabled, int &RefinementSteps) const {
4326  return SDValue();
4327  }
4328 
4329  /// Return a target-dependent comparison result if the input operand is
4330  /// suitable for use with a square root estimate calculation. For example, the
4331  /// comparison may check if the operand is NAN, INF, zero, normal, etc. The
4332  /// result should be used as the condition operand for a select or branch.
4333  virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
4334  const DenormalMode &Mode) const;
4335 
4336  /// Return a target-dependent result if the input operand is not suitable for
4337  /// use with a square root estimate calculation.
4339  SelectionDAG &DAG) const {
4340  return DAG.getConstantFP(0.0, SDLoc(Operand), Operand.getValueType());
4341  }
4342 
4343  //===--------------------------------------------------------------------===//
4344  // Legalization utility functions
4345  //
4346 
4347  /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes,
4348  /// respectively, each computing an n/2-bit part of the result.
4349  /// \param Result A vector that will be filled with the parts of the result
4350  /// in little-endian order.
4351  /// \param LL Low bits of the LHS of the MUL. You can use this parameter
4352  /// if you want to control how low bits are extracted from the LHS.
4353  /// \param LH High bits of the LHS of the MUL. See LL for meaning.
4354  /// \param RL Low bits of the RHS of the MUL. See LL for meaning
4355  /// \param RH High bits of the RHS of the MUL. See LL for meaning.
4356  /// \returns true if the node has been expanded, false if it has not
4357  bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS,
4358  SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT,
4360  SDValue LL = SDValue(), SDValue LH = SDValue(),
4361  SDValue RL = SDValue(), SDValue RH = SDValue()) const;
4362 
4363  /// Expand a MUL into two nodes. One that computes the high bits of
4364  /// the result and one that computes the low bits.
4365  /// \param HiLoVT The value type to use for the Lo and Hi nodes.
4366  /// \param LL Low bits of the LHS of the MUL. You can use this parameter
4367  /// if you want to control how low bits are extracted from the LHS.
4368  /// \param LH High bits of the LHS of the MUL. See LL for meaning.
4369  /// \param RL Low bits of the RHS of the MUL. See LL for meaning
4370  /// \param RH High bits of the RHS of the MUL. See LL for meaning.
4371  /// \returns true if the node has been expanded. false if it has not
4372  bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
4374  SDValue LL = SDValue(), SDValue LH = SDValue(),
4375  SDValue RL = SDValue(), SDValue RH = SDValue()) const;
4376 
4377  /// Expand funnel shift.
4378  /// \param N Node to expand
4379  /// \param Result output after conversion
4380  /// \returns True, if the expansion was successful, false otherwise
4381  bool expandFunnelShift(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
4382 
4383  /// Expand rotations.
4384  /// \param N Node to expand
4385  /// \param AllowVectorOps expand vector rotate, this should only be performed
4386  /// if the legalization is happening outside of LegalizeVectorOps
4387  /// \param Result output after conversion
4388  /// \returns True, if the expansion was successful, false otherwise
4389  bool expandROT(SDNode *N, bool AllowVectorOps, SDValue &Result,
4390  SelectionDAG &DAG) const;
4391 
4392  /// Expand shift-by-parts.
4393  /// \param N Node to expand
4394  /// \param Lo lower-output-part after conversion
4395  /// \param Hi upper-output-part after conversion
4397  SelectionDAG &DAG) const;
4398 
4399  /// Expand float(f32) to SINT(i64) conversion
4400  /// \param N Node to expand
4401  /// \param Result output after conversion
4402  /// \returns True, if the expansion was successful, false otherwise
4403  bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
4404 
4405  /// Expand float to UINT conversion
4406  /// \param N Node to expand
4407  /// \param Result output after conversion
4408  /// \param Chain output chain after conversion
4409  /// \returns True, if the expansion was successful, false otherwise
4410  bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain,
4411  SelectionDAG &DAG) const;
4412 
4413  /// Expand UINT(i64) to double(f64) conversion
4414  /// \param N Node to expand
4415  /// \param Result output after conversion
4416  /// \param Chain output chain after conversion
4417  /// \returns True, if the expansion was successful, false otherwise
4418  bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain,
4419  SelectionDAG &DAG) const;
4420 
4421  /// Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
4423 
4424  /// Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max.
4425  /// \param N Node to expand
4426  /// \returns The expansion result
4428 
4429  /// Expand CTPOP nodes. Expands vector/scalar CTPOP nodes,
4430  /// vector nodes can only succeed if all operations are legal/custom.
4431  /// \param N Node to expand
4432  /// \param Result output after conversion
4433  /// \returns True, if the expansion was successful, false otherwise
4434  bool expandCTPOP(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
4435 
4436  /// Expand CTLZ/CTLZ_ZERO_UNDEF nodes. Expands vector/scalar CTLZ nodes,
4437  /// vector nodes can only succeed if all operations are legal/custom.
4438  /// \param N Node to expand
4439  /// \param Result output after conversion
4440  /// \returns True, if the expansion was successful, false otherwise
4441  bool expandCTLZ(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
4442 
4443  /// Expand CTTZ/CTTZ_ZERO_UNDEF nodes. Expands vector/scalar CTTZ nodes,
4444  /// vector nodes can only succeed if all operations are legal/custom.
4445  /// \param N Node to expand
4446  /// \param Result output after conversion
4447  /// \returns True, if the expansion was successful, false otherwise
4448  bool expandCTTZ(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
4449 
4450  /// Expand ABS nodes. Expands vector/scalar ABS nodes,
4451  /// vector nodes can only succeed if all operations are legal/custom.
4452  /// (ABS x) -> (XOR (ADD x, (SRA x, type_size)), (SRA x, type_size))
4453  /// \param N Node to expand
4454  /// \param Result output after conversion
4455  /// \param IsNegative indicate negated abs
4456  /// \returns True, if the expansion was successful, false otherwise
4457  bool expandABS(SDNode *N, SDValue &Result, SelectionDAG &DAG,
4458  bool IsNegative = false) const;
4459 
4460  /// Expand BSWAP nodes. Expands scalar/vector BSWAP nodes with i16/i32/i64
4461  /// scalar types. Returns SDValue() if expand fails.
4462  /// \param N Node to expand
4463  /// \returns The expansion result or SDValue() if it fails.
4464  SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const;
4465 
4466  /// Expand BITREVERSE nodes. Expands scalar/vector BITREVERSE nodes.
4467  /// Returns SDValue() if expand fails.
4468  /// \param N Node to expand
4469  /// \returns The expansion result or SDValue() if it fails.
4471 
4472  /// Turn load of vector type into a load of the individual elements.
4473  /// \param LD load to expand
4474  /// \returns BUILD_VECTOR and TokenFactor nodes.
4475  std::pair<SDValue, SDValue> scalarizeVectorLoad(LoadSDNode *LD,
4476  SelectionDAG &DAG) const;
4477 
4478  // Turn a store of a vector type into stores of the individual elements.
4479  /// \param ST Store with a vector value type
4480  /// \returns TokenFactor of the individual store chains.
4482 
4483  /// Expands an unaligned load to 2 half-size loads for an integer, and
4484  /// possibly more for vectors.
4485  std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD,
4486  SelectionDAG &DAG) const;
4487 
4488  /// Expands an unaligned store to 2 half-size stores for integer values, and
4489  /// possibly more for vectors.
4491 
4492  /// Increments memory address \p Addr according to the type of the value
4493  /// \p DataVT that should be stored. If the data is stored in compressed
4494  /// form, the memory address should be incremented according to the number of
4495  /// the stored elements. This number is equal to the number of '1's bits
4496  /// in the \p Mask.
4497  /// \p DataVT is a vector type. \p Mask is a vector value.
4498  /// \p DataVT and \p Mask have the same number of vector elements.
4500  EVT DataVT, SelectionDAG &DAG,
4501  bool IsCompressedMemory) const;
4502 
4503  /// Get a pointer to vector element \p Idx located in memory for a vector of
4504  /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of
4505  /// bounds the returned pointer is unspecified, but will be within the vector
4506  /// bounds.
4508  SDValue Index) const;
4509 
4510  /// Get a pointer to a sub-vector of type \p SubVecVT at index \p Idx located
4511  /// in memory for a vector of type \p VecVT starting at a base address of
4512  /// \p VecPtr. If \p Idx plus the size of \p SubVecVT is out of bounds the
4513  /// returned pointer is unspecified, but the value returned will be such that
4514  /// the entire subvector would be within the vector bounds.
4516  EVT SubVecVT, SDValue Index) const;
4517 
4518  /// Method for building the DAG expansion of ISD::[US][MIN|MAX]. This
4519  /// method accepts integers as its arguments.
4521 
4522  /// Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT. This
4523  /// method accepts integers as its arguments.
4525 
4526  /// Method for building the DAG expansion of ISD::[US]SHLSAT. This
4527  /// method accepts integers as its arguments.
4529 
4530  /// Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT]. This
4531  /// method accepts integers as its arguments.
4533 
4534  /// Method for building the DAG expansion of ISD::[US]DIVFIX[SAT]. This
4535  /// method accepts integers as its arguments.
4536  /// Note: This method may fail if the division could not be performed
4537  /// within the type. Clients must retry with a wider type if this happens.
4538  SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl,
4539  SDValue LHS, SDValue RHS,
4540  unsigned Scale, SelectionDAG &DAG) const;
4541 
4542  /// Method for building the DAG expansion of ISD::U(ADD|SUB)O. Expansion
4543  /// always suceeds and populates the Result and Overflow arguments.
4544  void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow,
4545  SelectionDAG &DAG) const;
4546 
4547  /// Method for building the DAG expansion of ISD::S(ADD|SUB)O. Expansion
4548  /// always suceeds and populates the Result and Overflow arguments.
4549  void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow,
4550  SelectionDAG &DAG) const;
4551 
4552  /// Method for building the DAG expansion of ISD::[US]MULO. Returns whether
4553  /// expansion was successful and populates the Result and Overflow arguments.
4554  bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow,
4555  SelectionDAG &DAG) const;
4556 
4557  /// Expand a VECREDUCE_* into an explicit calculation. If Count is specified,
4558  /// only the first Count elements of the vector are used.