LLVM  9.0.0svn
PPCISelLowering.h
Go to the documentation of this file.
1 //===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that PPC uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
15 #define LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
16 
17 #include "PPCInstrInfo.h"
25 #include "llvm/IR/Attributes.h"
26 #include "llvm/IR/CallingConv.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/InlineAsm.h"
29 #include "llvm/IR/Metadata.h"
30 #include "llvm/IR/Type.h"
32 #include <utility>
33 
34 namespace llvm {
35 
36  namespace PPCISD {
37 
38  // When adding a NEW PPCISD node please add it to the correct position in
39  // the enum. The order of elements in this enum matters!
40  // Values that are added after this entry:
41  // STBRX = ISD::FIRST_TARGET_MEMORY_OPCODE
42  // are considered memory opcodes and are treated differently than entries
43  // that come before it. For example, ADD or MUL should be placed before
44  // the ISD::FIRST_TARGET_MEMORY_OPCODE while a LOAD or STORE should come
45  // after it.
46  enum NodeType : unsigned {
47  // Start the numbering where the builtin ops and target ops leave off.
49 
50  /// FSEL - Traditional three-operand fsel node.
51  ///
53 
54  /// FCFID - The FCFID instruction, taking an f64 operand and producing
55  /// and f64 value containing the FP representation of the integer that
56  /// was temporarily in the f64 operand.
58 
59  /// Newer FCFID[US] integer-to-floating-point conversion instructions for
60  /// unsigned integers and single-precision outputs.
62 
63  /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64
64  /// operand, producing an f64 value containing the integer representation
65  /// of that FP value.
67 
68  /// Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for
69  /// unsigned integers with round toward zero.
71 
72  /// Floating-point-to-interger conversion instructions
74 
75  /// VEXTS, ByteWidth - takes an input in VSFRC and produces an output in
76  /// VSFRC that is sign-extended from ByteWidth to a 64-byte integer.
78 
79  /// SExtVElems, takes an input vector of a smaller type and sign
80  /// extends to an output vector of a larger type.
82 
83  /// Reciprocal estimate instructions (unary FP ops).
85 
86  // VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking
87  // three v4f32 operands and producing a v4f32 result.
89 
90  /// VPERM - The PPC VPERM Instruction.
91  ///
93 
94  /// XXSPLT - The PPC VSX splat instructions
95  ///
97 
98  /// VECINSERT - The PPC vector insert instruction
99  ///
101 
102  /// XXREVERSE - The PPC VSX reverse instruction
103  ///
105 
106  /// VECSHL - The PPC vector shift left instruction
107  ///
109 
110  /// XXPERMDI - The PPC XXPERMDI instruction
111  ///
113 
114  /// The CMPB instruction (takes two operands of i32 or i64).
116 
117  /// Hi/Lo - These represent the high and low 16-bit parts of a global
118  /// address respectively. These nodes have two operands, the first of
119  /// which must be a TargetGlobalAddress, and the second of which must be a
120  /// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C',
121  /// though these are usually folded into other nodes.
122  Hi, Lo,
123 
124  /// The following two target-specific nodes are used for calls through
125  /// function pointers in the 64-bit SVR4 ABI.
126 
127  /// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX)
128  /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
129  /// compute an allocation on the stack.
131 
132  /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
133  /// compute an offset from native SP to the address of the most recent
134  /// dynamic alloca.
136 
137  /// GlobalBaseReg - On Darwin, this node represents the result of the mflr
138  /// at function entry, used for PIC code.
140 
141  /// These nodes represent PPC shifts.
142  ///
143  /// For scalar types, only the last `n + 1` bits of the shift amounts
144  /// are used, where n is log2(sizeof(element) * 8). See sld/slw, etc.
145  /// for exact behaviors.
146  ///
147  /// For vector types, only the last n bits are used. See vsld.
149 
150  /// EXTSWSLI = The PPC extswsli instruction, which does an extend-sign
151  /// word and shift left immediate.
153 
154  /// The combination of sra[wd]i and addze used to implemented signed
155  /// integer division by a power of 2. The first operand is the dividend,
156  /// and the second is the constant shift amount (representing the
157  /// divisor).
159 
160  /// CALL - A direct function call.
161  /// CALL_NOP is a call with the special NOP which follows 64-bit
162  /// SVR4 calls and 32-bit/64-bit AIX calls.
164 
165  /// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a
166  /// MTCTR instruction.
168 
169  /// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a
170  /// BCTRL instruction.
172 
173  /// CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl
174  /// instruction and the TOC reload required on SVR4 PPC64.
176 
177  /// Return with a flag operand, matched by 'blr'
179 
180  /// R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
181  /// This copies the bits corresponding to the specified CRREG into the
182  /// resultant GPR. Bits corresponding to other CR regs are undefined.
184 
185  /// Direct move from a VSX register to a GPR
187 
188  /// Direct move from a GPR to a VSX register (algebraic)
190 
191  /// Direct move from a GPR to a VSX register (zero)
193 
194  /// Direct move of 2 consecutive GPR to a VSX register.
196 
197  /// Extract a subvector from signed integer vector and convert to FP.
198  /// It is primarily used to convert a (widened) illegal integer vector
199  /// type to a legal floating point vector type.
200  /// For example v2i32 -> widened to v4i32 -> v2f64
202 
203  /// Extract a subvector from unsigned integer vector and convert to FP.
204  /// As with SINT_VEC_TO_FP, used for converting illegal types.
206 
207  // FIXME: Remove these once the ANDI glue bug is fixed:
208  /// i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the
209  /// eq or gt bit of CR0 after executing andi. x, 1. This is used to
210  /// implement truncation of i32 or i64 to i1.
212 
213  // READ_TIME_BASE - A read of the 64-bit time-base register on a 32-bit
214  // target (returns (Lo, Hi)). It takes a chain operand.
216 
217  // EH_SJLJ_SETJMP - SjLj exception handling setjmp.
219 
220  // EH_SJLJ_LONGJMP - SjLj exception handling longjmp.
222 
223  /// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP*
224  /// instructions. For lack of better number, we use the opcode number
225  /// encoding for the OPC field to identify the compare. For example, 838
226  /// is VCMPGTSH.
228 
229  /// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the
230  /// altivec VCMP*o instructions. For lack of better number, we use the
231  /// opcode number encoding for the OPC field to identify the compare. For
232  /// example, 838 is VCMPGTSH.
234 
235  /// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This
236  /// corresponds to the COND_BRANCH pseudo instruction. CRRC is the
237  /// condition register to branch on, OPC is the branch opcode to use (e.g.
238  /// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is
239  /// an optional input flag argument.
241 
242  /// CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based
243  /// loops.
245 
246  /// F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding
247  /// towards zero. Used only as part of the long double-to-int
248  /// conversion sequence.
250 
251  /// F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
253 
254  /// TC_RETURN - A tail call return.
255  /// operand #0 chain
256  /// operand #1 callee (register or absolute)
257  /// operand #2 stack adjustment
258  /// operand #3 optional in flag
260 
261  /// ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
264 
265  /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by initial-exec TLS
266  /// for non-position independent code on PPC32.
268 
269  /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by general dynamic and
270  /// local dynamic TLS and position indendepent code on PPC32.
272 
273  /// G8RC = ADDIS_GOT_TPREL_HA %x2, Symbol - Used by the initial-exec
274  /// TLS model, produces an ADDIS8 instruction that adds the GOT
275  /// base to sym\@got\@tprel\@ha.
277 
278  /// G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec
279  /// TLS model, produces a LD instruction with base register G8RReg
280  /// and offset sym\@got\@tprel\@l. This completes the addition that
281  /// finds the offset of "sym" relative to the thread pointer.
283 
284  /// G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS
285  /// model, produces an ADD instruction that adds the contents of
286  /// G8RReg to the thread pointer. Symbol contains a relocation
287  /// sym\@tls which is to be replaced by the thread pointer and
288  /// identifies to the linker that the instruction is part of a
289  /// TLS sequence.
291 
292  /// G8RC = ADDIS_TLSGD_HA %x2, Symbol - For the general-dynamic TLS
293  /// model, produces an ADDIS8 instruction that adds the GOT base
294  /// register to sym\@got\@tlsgd\@ha.
296 
297  /// %x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS
298  /// model, produces an ADDI8 instruction that adds G8RReg to
299  /// sym\@got\@tlsgd\@l and stores the result in X3. Hidden by
300  /// ADDIS_TLSGD_L_ADDR until after register assignment.
302 
303  /// %x3 = GET_TLS_ADDR %x3, Symbol - For the general-dynamic TLS
304  /// model, produces a call to __tls_get_addr(sym\@tlsgd). Hidden by
305  /// ADDIS_TLSGD_L_ADDR until after register assignment.
307 
308  /// G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that
309  /// combines ADDI_TLSGD_L and GET_TLS_ADDR until expansion following
310  /// register assignment.
312 
313  /// G8RC = ADDIS_TLSLD_HA %x2, Symbol - For the local-dynamic TLS
314  /// model, produces an ADDIS8 instruction that adds the GOT base
315  /// register to sym\@got\@tlsld\@ha.
317 
318  /// %x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS
319  /// model, produces an ADDI8 instruction that adds G8RReg to
320  /// sym\@got\@tlsld\@l and stores the result in X3. Hidden by
321  /// ADDIS_TLSLD_L_ADDR until after register assignment.
323 
324  /// %x3 = GET_TLSLD_ADDR %x3, Symbol - For the local-dynamic TLS
325  /// model, produces a call to __tls_get_addr(sym\@tlsld). Hidden by
326  /// ADDIS_TLSLD_L_ADDR until after register assignment.
328 
329  /// G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that
330  /// combines ADDI_TLSLD_L and GET_TLSLD_ADDR until expansion
331  /// following register assignment.
333 
334  /// G8RC = ADDIS_DTPREL_HA %x3, Symbol - For the local-dynamic TLS
335  /// model, produces an ADDIS8 instruction that adds X3 to
336  /// sym\@dtprel\@ha.
338 
339  /// G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS
340  /// model, produces an ADDI8 instruction that adds G8RReg to
341  /// sym\@got\@dtprel\@l.
343 
344  /// VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded
345  /// during instruction selection to optimize a BUILD_VECTOR into
346  /// operations on splats. This is necessary to avoid losing these
347  /// optimizations due to constant folding.
349 
350  /// CHAIN = SC CHAIN, Imm128 - System call. The 7-bit unsigned
351  /// operand identifies the operating system entry point.
352  SC,
353 
354  /// CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
356 
357  /// GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch
358  /// history rolling buffer entry.
360 
361  /// CHAIN = RFEBB CHAIN, State - Return from event-based branch.
363 
364  /// VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little
365  /// endian. Maps to an xxswapd instruction that corrects an lxvd2x
366  /// or stxvd2x instruction. The chain is necessary because the
367  /// sequence replaces a load and needs to provide the same number
368  /// of outputs.
370 
371  /// An SDNode for swaps that are not associated with any loads/stores
372  /// and thereby have no chain.
374 
375  /// An SDNode for Power9 vector absolute value difference.
376  /// operand #0 vector
377  /// operand #1 vector
378  /// operand #2 constant i32 0 or 1, to indicate whether needs to patch
379  /// the most significant bit for signed i32
380  ///
381  /// Power9 VABSD* instructions are designed to support unsigned integer
382  /// vectors (byte/halfword/word), if we want to make use of them for signed
383  /// integer vectors, we have to flip their sign bits first. To flip sign bit
384  /// for byte/halfword integer vector would become inefficient, but for word
385  /// integer vector, we can leverage XVNEGSP to make it efficiently. eg:
386  /// abs(sub(a,b)) => VABSDUW(a+0x80000000, b+0x80000000)
387  /// => VABSDUW((XVNEGSP a), (XVNEGSP b))
389 
390  /// QVFPERM = This corresponds to the QPX qvfperm instruction.
392 
393  /// QVGPCI = This corresponds to the QPX qvgpci instruction.
395 
396  /// QVALIGNI = This corresponds to the QPX qvaligni instruction.
398 
399  /// QVESPLATI = This corresponds to the QPX qvesplati instruction.
401 
402  /// QBFLT = Access the underlying QPX floating-point boolean
403  /// representation.
405 
406  /// Custom extend v4f32 to v2f64.
408 
409  /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
410  /// byte-swapping store instruction. It byte-swaps the low "Type" bits of
411  /// the GPRC input, then stores it through Ptr. Type can be either i16 or
412  /// i32.
414 
415  /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a
416  /// byte-swapping load instruction. It loads "Type" bits, byte swaps it,
417  /// then puts it in the bottom bits of the GPRC. TYPE can be either i16
418  /// or i32.
420 
421  /// STFIWX - The STFIWX instruction. The first operand is an input token
422  /// chain, then an f64 value to store, then an address to store it to.
424 
425  /// GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point
426  /// load which sign-extends from a 32-bit integer value into the
427  /// destination 64-bit register.
429 
430  /// GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point
431  /// load which zero-extends from a 32-bit integer value into the
432  /// destination 64-bit register.
434 
435  /// GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an
436  /// integer smaller than 64 bits into a VSR. The integer is zero-extended.
437  /// This can be used for converting loaded integers to floating point.
439 
440  /// STXSIX - The STXSI[bh]X instruction. The first operand is an input
441  /// chain, then an f64 value to store, then an address to store it to,
442  /// followed by a byte-width for the store.
444 
445  /// VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
446  /// Maps directly to an lxvd2x instruction that will be followed by
447  /// an xxswapd.
449 
450  /// VSRC, CHAIN = LD_VSX_LH CHAIN, Ptr - This is a floating-point load of a
451  /// v2f32 value into the lower half of a VSR register.
453 
454  /// CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
455  /// Maps directly to an stxvd2x instruction that will be preceded by
456  /// an xxswapd.
458 
459  /// Store scalar integers from VSR.
461 
462  /// QBRC, CHAIN = QVLFSb CHAIN, Ptr
463  /// The 4xf32 load used for v4i1 constants.
465 
466  /// ATOMIC_CMP_SWAP - the exact same as the target-independent nodes
467  /// except they ensure that the compare input is zero-extended for
468  /// sub-word versions because the atomic loads zero-extend.
470 
471  /// GPRC = TOC_ENTRY GA, TOC
472  /// Loads the entry for GA from the TOC, where the TOC base is given by
473  /// the last operand.
475  };
476 
477  } // end namespace PPCISD
478 
479  /// Define some predicates that are used for node matching.
480  namespace PPC {
481 
482  /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
483  /// VPKUHUM instruction.
484  bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
485  SelectionDAG &DAG);
486 
487  /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
488  /// VPKUWUM instruction.
489  bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
490  SelectionDAG &DAG);
491 
492  /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
493  /// VPKUDUM instruction.
494  bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
495  SelectionDAG &DAG);
496 
497  /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
498  /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
499  bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
500  unsigned ShuffleKind, SelectionDAG &DAG);
501 
502  /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
503  /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
504  bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
505  unsigned ShuffleKind, SelectionDAG &DAG);
506 
507  /// isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for
508  /// a VMRGEW or VMRGOW instruction
509  bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
510  unsigned ShuffleKind, SelectionDAG &DAG);
511  /// isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable
512  /// for a XXSLDWI instruction.
513  bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
514  bool &Swap, bool IsLE);
515 
516  /// isXXBRHShuffleMask - Return true if this is a shuffle mask suitable
517  /// for a XXBRH instruction.
519 
520  /// isXXBRWShuffleMask - Return true if this is a shuffle mask suitable
521  /// for a XXBRW instruction.
523 
524  /// isXXBRDShuffleMask - Return true if this is a shuffle mask suitable
525  /// for a XXBRD instruction.
527 
528  /// isXXBRQShuffleMask - Return true if this is a shuffle mask suitable
529  /// for a XXBRQ instruction.
531 
532  /// isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable
533  /// for a XXPERMDI instruction.
534  bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
535  bool &Swap, bool IsLE);
536 
537  /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the
538  /// shift amount, otherwise return -1.
539  int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
540  SelectionDAG &DAG);
541 
542  /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
543  /// specifies a splat of a single element that is suitable for input to
544  /// VSPLTB/VSPLTH/VSPLTW.
545  bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
546 
547  /// isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by
548  /// the XXINSERTW instruction introduced in ISA 3.0. This is essentially any
549  /// shuffle of v4f32/v4i32 vectors that just inserts one element from one
550  /// vector into the other. This function will also set a couple of
551  /// output parameters for how much the source vector needs to be shifted and
552  /// what byte number needs to be specified for the instruction to put the
553  /// element in the desired location of the target vector.
554  bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
555  unsigned &InsertAtByte, bool &Swap, bool IsLE);
556 
557  /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
558  /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
559  unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG);
560 
561  /// get_VSPLTI_elt - If this is a build_vector of constants which can be
562  /// formed by using a vspltis[bhw] instruction of the specified element
563  /// size, return the constant being splatted. The ByteSize field indicates
564  /// the number of bytes of each element [124] -> [bhw].
565  SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
566 
567  /// If this is a qvaligni shuffle mask, return the shift
568  /// amount, otherwise return -1.
570 
571  } // end namespace PPC
572 
574  const PPCSubtarget &Subtarget;
575 
576  public:
577  explicit PPCTargetLowering(const PPCTargetMachine &TM,
578  const PPCSubtarget &STI);
579 
580  /// getTargetNodeName() - This method returns the name of a target specific
581  /// DAG node.
582  const char *getTargetNodeName(unsigned Opcode) const override;
583 
584  bool isSelectSupported(SelectSupportKind Kind) const override {
585  // PowerPC does not support scalar condition selects on vectors.
586  return (Kind != SelectSupportKind::ScalarCondVectorVal);
587  }
588 
589  /// getPreferredVectorAction - The code we generate when vector types are
590  /// legalized by promoting the integer element type is often much worse
591  /// than code we generate if we widen the type for applicable vector types.
592  /// The issue with promoting is that the vector is scalaraized, individual
593  /// elements promoted and then the vector is rebuilt. So say we load a pair
594  /// of v4i8's and shuffle them. This will turn into a mess of 8 extending
595  /// loads, moves back into VSR's (or memory ops if we don't have moves) and
596  /// then the VPERM for the shuffle. All in all a very slow sequence.
598  const override {
599  if (VT.getScalarSizeInBits() % 8 == 0)
600  return TypeWidenVector;
602  }
603 
604  bool useSoftFloat() const override;
605 
606  bool hasSPE() const;
607 
608  MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
609  return MVT::i32;
610  }
611 
612  bool isCheapToSpeculateCttz() const override {
613  return true;
614  }
615 
616  bool isCheapToSpeculateCtlz() const override {
617  return true;
618  }
619 
620  bool isCtlzFast() const override {
621  return true;
622  }
623 
624  bool hasAndNotCompare(SDValue) const override {
625  return true;
626  }
627 
628  bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
629  return VT.isScalarInteger();
630  }
631 
632  bool supportSplitCSR(MachineFunction *MF) const override {
633  return
635  MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
636  }
637 
638  void initializeSplitCSR(MachineBasicBlock *Entry) const override;
639 
640  void insertCopiesSplitCSR(
641  MachineBasicBlock *Entry,
642  const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
643 
644  /// getSetCCResultType - Return the ISD::SETCC ValueType
645  EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
646  EVT VT) const override;
647 
648  /// Return true if target always beneficiates from combining into FMA for a
649  /// given value type. This must typically return false on targets where FMA
650  /// takes more cycles to execute than FADD.
651  bool enableAggressiveFMAFusion(EVT VT) const override;
652 
653  /// getPreIndexedAddressParts - returns true by value, base pointer and
654  /// offset pointer and addressing mode by reference if the node's address
655  /// can be legally represented as pre-indexed load / store address.
656  bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
657  SDValue &Offset,
659  SelectionDAG &DAG) const override;
660 
661  /// SelectAddressRegReg - Given the specified addressed, check to see if it
662  /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment
663  /// is non-zero, only accept displacement which is not suitable for [r+imm].
664  /// Returns false if it can be represented by [r+imm], which are preferred.
665  bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index,
666  SelectionDAG &DAG,
667  unsigned EncodingAlignment = 0) const;
668 
669  /// SelectAddressRegImm - Returns true if the address N can be represented
670  /// by a base register plus a signed 16-bit displacement [r+imm], and if it
671  /// is not better represented as reg+reg. If \p EncodingAlignment is
672  /// non-zero, only accept displacements suitable for instruction encoding
673  /// requirement, i.e. multiples of 4 for DS form.
674  bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base,
675  SelectionDAG &DAG,
676  unsigned EncodingAlignment) const;
677 
678  /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
679  /// represented as an indexed [r+r] operation.
680  bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index,
681  SelectionDAG &DAG) const;
682 
683  Sched::Preference getSchedulingPreference(SDNode *N) const override;
684 
685  /// LowerOperation - Provide custom lowering hooks for some operations.
686  ///
687  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
688 
689  /// ReplaceNodeResults - Replace the results of node with an illegal result
690  /// type with new values built out of custom code.
691  ///
692  void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
693  SelectionDAG &DAG) const override;
694 
695  SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const;
696  SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const;
697 
698  SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
699 
700  SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
701  SmallVectorImpl<SDNode *> &Created) const override;
702 
703  unsigned getRegisterByName(const char* RegName, EVT VT,
704  SelectionDAG &DAG) const override;
705 
706  void computeKnownBitsForTargetNode(const SDValue Op,
707  KnownBits &Known,
708  const APInt &DemandedElts,
709  const SelectionDAG &DAG,
710  unsigned Depth = 0) const override;
711 
712  unsigned getPrefLoopAlignment(MachineLoop *ML) const override;
713 
714  bool shouldInsertFencesForAtomic(const Instruction *I) const override {
715  return true;
716  }
717 
718  Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
719  AtomicOrdering Ord) const override;
720  Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst,
721  AtomicOrdering Ord) const override;
722 
724  EmitInstrWithCustomInserter(MachineInstr &MI,
725  MachineBasicBlock *MBB) const override;
726  MachineBasicBlock *EmitAtomicBinary(MachineInstr &MI,
727  MachineBasicBlock *MBB,
728  unsigned AtomicSize,
729  unsigned BinOpcode,
730  unsigned CmpOpcode = 0,
731  unsigned CmpPred = 0) const;
732  MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr &MI,
733  MachineBasicBlock *MBB,
734  bool is8bit,
735  unsigned Opcode,
736  unsigned CmpOpcode = 0,
737  unsigned CmpPred = 0) const;
738 
739  MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
740  MachineBasicBlock *MBB) const;
741 
742  MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
743  MachineBasicBlock *MBB) const;
744 
745  ConstraintType getConstraintType(StringRef Constraint) const override;
746 
747  /// Examine constraint string and operand type and determine a weight value.
748  /// The operand object must already have been set up with the operand type.
749  ConstraintWeight getSingleConstraintMatchWeight(
750  AsmOperandInfo &info, const char *constraint) const override;
751 
752  std::pair<unsigned, const TargetRegisterClass *>
753  getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
754  StringRef Constraint, MVT VT) const override;
755 
756  /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
757  /// function arguments in the caller parameter area. This is the actual
758  /// alignment, not its logarithm.
759  unsigned getByValTypeAlignment(Type *Ty,
760  const DataLayout &DL) const override;
761 
762  /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
763  /// vector. If it is invalid, don't add anything to Ops.
764  void LowerAsmOperandForConstraint(SDValue Op,
765  std::string &Constraint,
766  std::vector<SDValue> &Ops,
767  SelectionDAG &DAG) const override;
768 
769  unsigned
770  getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
771  if (ConstraintCode == "es")
773  else if (ConstraintCode == "o")
775  else if (ConstraintCode == "Q")
777  else if (ConstraintCode == "Z")
779  else if (ConstraintCode == "Zy")
781  return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
782  }
783 
784  /// isLegalAddressingMode - Return true if the addressing mode represented
785  /// by AM is legal for this target, for a load/store of the specified type.
786  bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
787  Type *Ty, unsigned AS,
788  Instruction *I = nullptr) const override;
789 
790  /// isLegalICmpImmediate - Return true if the specified immediate is legal
791  /// icmp immediate, that is the target has icmp instructions which can
792  /// compare a register against the immediate without having to materialize
793  /// the immediate into a register.
794  bool isLegalICmpImmediate(int64_t Imm) const override;
795 
796  /// isLegalAddImmediate - Return true if the specified immediate is legal
797  /// add immediate, that is the target has add instructions which can
798  /// add a register and the immediate without having to materialize
799  /// the immediate into a register.
800  bool isLegalAddImmediate(int64_t Imm) const override;
801 
802  /// isTruncateFree - Return true if it's free to truncate a value of
803  /// type Ty1 to type Ty2. e.g. On PPC it's free to truncate a i64 value in
804  /// register X1 to i32 by referencing its sub-register R1.
805  bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
806  bool isTruncateFree(EVT VT1, EVT VT2) const override;
807 
808  bool isZExtFree(SDValue Val, EVT VT2) const override;
809 
810  bool isFPExtFree(EVT DestVT, EVT SrcVT) const override;
811 
812  /// Returns true if it is beneficial to convert a load of a constant
813  /// to just the constant itself.
814  bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
815  Type *Ty) const override;
816 
817  bool convertSelectOfConstantsToMath(EVT VT) const override {
818  return true;
819  }
820 
821  // Returns true if the address of the global is stored in TOC entry.
822  bool isAccessedAsGotIndirect(SDValue N) const;
823 
824  bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
825 
826  bool getTgtMemIntrinsic(IntrinsicInfo &Info,
827  const CallInst &I,
828  MachineFunction &MF,
829  unsigned Intrinsic) const override;
830 
831  /// getOptimalMemOpType - Returns the target specific optimal type for load
832  /// and store operations as a result of memset, memcpy, and memmove
833  /// lowering. If DstAlign is zero that means it's safe to destination
834  /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
835  /// means there isn't a need to check it against alignment requirement,
836  /// probably because the source does not need to be loaded. If 'IsMemset' is
837  /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
838  /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
839  /// source is constant so it does not need to be loaded.
840  /// It returns EVT::Other if the type should be determined using generic
841  /// target-independent logic.
842  EVT
843  getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
844  bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
845  const AttributeList &FuncAttributes) const override;
846 
847  /// Is unaligned memory access allowed for the given type, and is it fast
848  /// relative to software emulation.
849  bool allowsMisalignedMemoryAccesses(
850  EVT VT, unsigned AddrSpace, unsigned Align = 1,
852  bool *Fast = nullptr) const override;
853 
854  /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
855  /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
856  /// expanded to FMAs when this method returns true, otherwise fmuladd is
857  /// expanded to fmul + fadd.
858  bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
859 
860  const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
861 
862  // Should we expand the build vector with shuffles?
863  bool
864  shouldExpandBuildVectorWithShuffles(EVT VT,
865  unsigned DefinedValues) const override;
866 
867  /// createFastISel - This method returns a target-specific FastISel object,
868  /// or null if the target does not support "fast" instruction selection.
870  const TargetLibraryInfo *LibInfo) const override;
871 
872  /// Returns true if an argument of type Ty needs to be passed in a
873  /// contiguous block of registers in calling convention CallConv.
875  Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override {
876  // We support any array type as "consecutive" block in the parameter
877  // save area. The element type defines the alignment requirement and
878  // whether the argument should go in GPRs, FPRs, or VRs if available.
879  //
880  // Note that clang uses this capability both to implement the ELFv2
881  // homogeneous float/vector aggregate ABI, and to avoid having to use
882  // "byval" when passing aggregates that might fully fit in registers.
883  return Ty->isArrayTy();
884  }
885 
886  /// If a physical register, this returns the register that receives the
887  /// exception address on entry to an EH pad.
888  unsigned
889  getExceptionPointerRegister(const Constant *PersonalityFn) const override;
890 
891  /// If a physical register, this returns the register that receives the
892  /// exception typeid on entry to a landing pad.
893  unsigned
894  getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
895 
896  /// Override to support customized stack guard loading.
897  bool useLoadStackGuardNode() const override;
898  void insertSSPDeclarations(Module &M) const override;
899 
900  bool isFPImmLegal(const APFloat &Imm, EVT VT,
901  bool ForCodeSize) const override;
902 
903  unsigned getJumpTableEncoding() const override;
904  bool isJumpTableRelative() const override;
905  SDValue getPICJumpTableRelocBase(SDValue Table,
906  SelectionDAG &DAG) const override;
907  const MCExpr *getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
908  unsigned JTI,
909  MCContext &Ctx) const override;
910 
911  unsigned getNumRegistersForCallingConv(LLVMContext &Context,
912  CallingConv:: ID CC,
913  EVT VT) const override;
914 
915  MVT getRegisterTypeForCallingConv(LLVMContext &Context,
916  CallingConv:: ID CC,
917  EVT VT) const override;
918 
919  private:
920  struct ReuseLoadInfo {
921  SDValue Ptr;
922  SDValue Chain;
923  SDValue ResChain;
924  MachinePointerInfo MPI;
925  bool IsDereferenceable = false;
926  bool IsInvariant = false;
927  unsigned Alignment = 0;
928  AAMDNodes AAInfo;
929  const MDNode *Ranges = nullptr;
930 
931  ReuseLoadInfo() = default;
932 
933  MachineMemOperand::Flags MMOFlags() const {
935  if (IsDereferenceable)
937  if (IsInvariant)
939  return F;
940  }
941  };
942 
943  bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
944  // Addrspacecasts are always noops.
945  return true;
946  }
947 
948  bool canReuseLoadAddress(SDValue Op, EVT MemVT, ReuseLoadInfo &RLI,
949  SelectionDAG &DAG,
951  void spliceIntoChain(SDValue ResChain, SDValue NewResChain,
952  SelectionDAG &DAG) const;
953 
954  void LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
955  SelectionDAG &DAG, const SDLoc &dl) const;
956  SDValue LowerFP_TO_INTDirectMove(SDValue Op, SelectionDAG &DAG,
957  const SDLoc &dl) const;
958 
959  bool directMoveIsProfitable(const SDValue &Op) const;
960  SDValue LowerINT_TO_FPDirectMove(SDValue Op, SelectionDAG &DAG,
961  const SDLoc &dl) const;
962 
963  SDValue LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
964  const SDLoc &dl) const;
965 
966  SDValue LowerTRUNCATEVector(SDValue Op, SelectionDAG &DAG) const;
967 
968  SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const;
969  SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const;
970 
971  bool
972  IsEligibleForTailCallOptimization(SDValue Callee,
973  CallingConv::ID CalleeCC,
974  bool isVarArg,
976  SelectionDAG& DAG) const;
977 
978  bool
979  IsEligibleForTailCallOptimization_64SVR4(
980  SDValue Callee,
981  CallingConv::ID CalleeCC,
983  bool isVarArg,
986  SelectionDAG& DAG) const;
987 
988  SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG &DAG, int SPDiff,
989  SDValue Chain, SDValue &LROpOut,
990  SDValue &FPOpOut,
991  const SDLoc &dl) const;
992 
994  SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
995  SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
996  SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
997  SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
998  SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
999  SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
1000  SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
1001  SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
1003  SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
1004  SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
1005  SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
1006  SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const;
1007  SDValue LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const;
1009  SDValue LowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;
1010  SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
1011  SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
1012  SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
1013  SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
1014  SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
1015  const SDLoc &dl) const;
1016  SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
1017  SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
1018  SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const;
1019  SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const;
1020  SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const;
1021  SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
1025  SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
1026  SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
1027  SDValue LowerREM(SDValue Op, SelectionDAG &DAG) const;
1028  SDValue LowerBSWAP(SDValue Op, SelectionDAG &DAG) const;
1029  SDValue LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
1031  SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
1032  SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
1033  SDValue LowerABS(SDValue Op, SelectionDAG &DAG) const;
1034  SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
1035 
1036  SDValue LowerVectorLoad(SDValue Op, SelectionDAG &DAG) const;
1037  SDValue LowerVectorStore(SDValue Op, SelectionDAG &DAG) const;
1038 
1039  SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
1040  CallingConv::ID CallConv, bool isVarArg,
1041  const SmallVectorImpl<ISD::InputArg> &Ins,
1042  const SDLoc &dl, SelectionDAG &DAG,
1043  SmallVectorImpl<SDValue> &InVals) const;
1044  SDValue FinishCall(CallingConv::ID CallConv, const SDLoc &dl,
1045  bool isTailCall, bool isVarArg, bool isPatchPoint,
1046  bool hasNest, SelectionDAG &DAG,
1047  SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
1048  SDValue InFlag, SDValue Chain, SDValue CallSeqStart,
1049  SDValue &Callee, int SPDiff, unsigned NumBytes,
1050  const SmallVectorImpl<ISD::InputArg> &Ins,
1051  SmallVectorImpl<SDValue> &InVals,
1052  ImmutableCallSite CS) const;
1053 
1054  SDValue
1055  LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1056  const SmallVectorImpl<ISD::InputArg> &Ins,
1057  const SDLoc &dl, SelectionDAG &DAG,
1058  SmallVectorImpl<SDValue> &InVals) const override;
1059 
1061  SmallVectorImpl<SDValue> &InVals) const override;
1062 
1063  bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1064  bool isVarArg,
1065  const SmallVectorImpl<ISD::OutputArg> &Outs,
1066  LLVMContext &Context) const override;
1067 
1068  SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1069  const SmallVectorImpl<ISD::OutputArg> &Outs,
1070  const SmallVectorImpl<SDValue> &OutVals,
1071  const SDLoc &dl, SelectionDAG &DAG) const override;
1072 
1073  SDValue extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT,
1074  SelectionDAG &DAG, SDValue ArgVal,
1075  const SDLoc &dl) const;
1076 
1077  SDValue LowerFormalArguments_Darwin(
1078  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1079  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1080  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
1081  SDValue LowerFormalArguments_64SVR4(
1082  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1083  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1084  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
1085  SDValue LowerFormalArguments_32SVR4(
1086  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1087  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1088  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
1089 
1090  SDValue createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
1091  SDValue CallSeqStart,
1092  ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
1093  const SDLoc &dl) const;
1094 
1095  SDValue LowerCall_Darwin(SDValue Chain, SDValue Callee,
1096  CallingConv::ID CallConv, bool isVarArg,
1097  bool isTailCall, bool isPatchPoint,
1098  const SmallVectorImpl<ISD::OutputArg> &Outs,
1099  const SmallVectorImpl<SDValue> &OutVals,
1100  const SmallVectorImpl<ISD::InputArg> &Ins,
1101  const SDLoc &dl, SelectionDAG &DAG,
1102  SmallVectorImpl<SDValue> &InVals,
1103  ImmutableCallSite CS) const;
1104  SDValue LowerCall_64SVR4(SDValue Chain, SDValue Callee,
1105  CallingConv::ID CallConv, bool isVarArg,
1106  bool isTailCall, bool isPatchPoint,
1107  const SmallVectorImpl<ISD::OutputArg> &Outs,
1108  const SmallVectorImpl<SDValue> &OutVals,
1109  const SmallVectorImpl<ISD::InputArg> &Ins,
1110  const SDLoc &dl, SelectionDAG &DAG,
1111  SmallVectorImpl<SDValue> &InVals,
1112  ImmutableCallSite CS) const;
1113  SDValue LowerCall_32SVR4(SDValue Chain, SDValue Callee,
1114  CallingConv::ID CallConv, bool isVarArg,
1115  bool isTailCall, bool isPatchPoint,
1116  const SmallVectorImpl<ISD::OutputArg> &Outs,
1117  const SmallVectorImpl<SDValue> &OutVals,
1118  const SmallVectorImpl<ISD::InputArg> &Ins,
1119  const SDLoc &dl, SelectionDAG &DAG,
1120  SmallVectorImpl<SDValue> &InVals,
1121  ImmutableCallSite CS) const;
1122  SDValue LowerCall_AIX(SDValue Chain, SDValue Callee,
1123  CallingConv::ID CallConv, bool isVarArg,
1124  bool isTailCall, bool isPatchPoint,
1125  const SmallVectorImpl<ISD::OutputArg> &Outs,
1126  const SmallVectorImpl<SDValue> &OutVals,
1127  const SmallVectorImpl<ISD::InputArg> &Ins,
1128  const SDLoc &dl, SelectionDAG &DAG,
1129  SmallVectorImpl<SDValue> &InVals,
1130  ImmutableCallSite CS) const;
1131 
1132  SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
1133  SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
1134  SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
1135 
1136  SDValue DAGCombineExtBoolTrunc(SDNode *N, DAGCombinerInfo &DCI) const;
1137  SDValue DAGCombineBuildVector(SDNode *N, DAGCombinerInfo &DCI) const;
1138  SDValue DAGCombineTruncBoolExt(SDNode *N, DAGCombinerInfo &DCI) const;
1139  SDValue combineStoreFPToInt(SDNode *N, DAGCombinerInfo &DCI) const;
1140  SDValue combineFPToIntToFP(SDNode *N, DAGCombinerInfo &DCI) const;
1141  SDValue combineSHL(SDNode *N, DAGCombinerInfo &DCI) const;
1142  SDValue combineSRA(SDNode *N, DAGCombinerInfo &DCI) const;
1143  SDValue combineSRL(SDNode *N, DAGCombinerInfo &DCI) const;
1144  SDValue combineMUL(SDNode *N, DAGCombinerInfo &DCI) const;
1145  SDValue combineADD(SDNode *N, DAGCombinerInfo &DCI) const;
1146  SDValue combineTRUNCATE(SDNode *N, DAGCombinerInfo &DCI) const;
1147  SDValue combineSetCC(SDNode *N, DAGCombinerInfo &DCI) const;
1148  SDValue combineABS(SDNode *N, DAGCombinerInfo &DCI) const;
1149  SDValue combineVSelect(SDNode *N, DAGCombinerInfo &DCI) const;
1150 
1151  /// ConvertSETCCToSubtract - looks at SETCC that compares ints. It replaces
1152  /// SETCC with integer subtraction when (1) there is a legal way of doing it
1153  /// (2) keeping the result of comparison in GPR has performance benefit.
1154  SDValue ConvertSETCCToSubtract(SDNode *N, DAGCombinerInfo &DCI) const;
1155 
1156  SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1157  int &RefinementSteps, bool &UseOneConstNR,
1158  bool Reciprocal) const override;
1159  SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1160  int &RefinementSteps) const override;
1161  unsigned combineRepeatedFPDivisors() const override;
1162 
1163  SDValue
1164  combineElementTruncationToVectorTruncation(SDNode *N,
1165  DAGCombinerInfo &DCI) const;
1166 
1167  /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be
1168  /// handled by the VINSERTH instruction introduced in ISA 3.0. This is
1169  /// essentially any shuffle of v8i16 vectors that just inserts one element
1170  /// from one vector into the other.
1171  SDValue lowerToVINSERTH(ShuffleVectorSDNode *N, SelectionDAG &DAG) const;
1172 
1173  /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be
1174  /// handled by the VINSERTB instruction introduced in ISA 3.0. This is
1175  /// essentially v16i8 vector version of VINSERTH.
1176  SDValue lowerToVINSERTB(ShuffleVectorSDNode *N, SelectionDAG &DAG) const;
1177 
1178  // Return whether the call instruction can potentially be optimized to a
1179  // tail call. This will cause the optimizers to attempt to move, or
1180  // duplicate return instructions to help enable tail call optimizations.
1181  bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
1182  bool hasBitPreservingFPLogic(EVT VT) const override;
1183  bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
1184  }; // end class PPCTargetLowering
1185 
1186  namespace PPC {
1187 
1189  const TargetLibraryInfo *LibInfo);
1190 
1191  } // end namespace PPC
1192 
1193  bool isIntS16Immediate(SDNode *N, int16_t &Imm);
1194  bool isIntS16Immediate(SDValue Op, int16_t &Imm);
1195 
1196 } // end namespace llvm
1197 
1198 #endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSLD_L and GET_TLSLD_ADDR un...
x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction tha...
static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG)
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:913
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
bool isSelectSupported(SelectSupportKind Kind) const override
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo)
Return with a flag operand, matched by &#39;blr&#39;.
Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for unsigned integers with round ...
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG)
get_VSPLTI_elt - If this is a build_vector of constants which can be formed by using a vspltis[bhw] i...
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
getPreferredVectorAction - The code we generate when vector types are legalized by promoting the inte...
GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a byte-swapping load instruction.
static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
LLVMContext & Context
QVFPERM = This corresponds to the QPX qvfperm instruction.
This class represents lattice values for constants.
Definition: AllocatorList.h:23
GPRC = address of GLOBAL_OFFSET_TABLE.
G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction ...
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:65
static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
The following two target-specific nodes are used for calls through function pointers in the 64-bit SV...
VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded during instruction selection to optimi...
bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, unsigned &InsertAtByte, bool &Swap, bool IsLE)
isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by the XXINSERTW instruction intr...
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition: ValueTypes.h:145
This class represents a function call, abstracting a target machine&#39;s calling convention.
This file contains the declarations for metadata subclasses.
QBRC, CHAIN = QVLFSb CHAIN, Ptr The 4xf32 load used for v4i1 constants.
bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable for a XXSLDWI instruction...
Function Alias Analysis Results
CHAIN = RFEBB CHAIN, State - Return from event-based branch.
VEXTS, ByteWidth - takes an input in VSFRC and produces an output in VSFRC that is sign-extended from...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:323
bool hasAndNotCompare(SDValue) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) != Y —> (~X & Y) ...
unsigned const TargetRegisterInfo * TRI
Metadata node.
Definition: Metadata.h:863
F(f)
bool convertSelectOfConstantsToMath(EVT VT) const override
Return true if a select of constants (select Cond, C1, C2) should be transformed into simple math ops...
CALL - A direct function call.
CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a BCTRL instruction.
bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a VPKUDUM instruction.
Floating-point-to-interger conversion instructions.
Newer FCFID[US] integer-to-floating-point conversion instructions for unsigned integers and single-pr...
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a VPKUHUM instruction.
GlobalBaseReg - On Darwin, this node represents the result of the mflr at function entry...
bool isCheapToSpeculateCtlz() const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSGD_L and GET_TLS_ADDR unti...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:742
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
EVT is not used in-tree, but is used by out-of-tree target.
This file contains the simple types necessary to represent the attributes associated with functions a...
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
The memory access is dereferenceable (i.e., doesn&#39;t trap).
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG)
Direct move from a GPR to a VSX register (algebraic)
x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS model, produces an ADDI8 instruction t...
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic...
ATOMIC_CMP_SWAP - the exact same as the target-independent nodes except they ensure that the compare ...
QVALIGNI = This corresponds to the QPX qvaligni instruction.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
AtomicOrdering
Atomic ordering for LLVM&#39;s memory model.
Context object for machine code objects.
Definition: MCContext.h:62
static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override
Returns true if an argument of type Ty needs to be passed in a contiguous block of registers in calli...
bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize)
isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand specifies a splat of a singl...
This is a fast-path instruction selection class that generates poor code and doesn&#39;t support illegal ...
Definition: FastISel.h:66
An SDNode for Power9 vector absolute value difference.
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based loops.
R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
G8RC = ADDIS_TLSGD_HA x2, Symbol - For the general-dynamic TLS model, produces an ADDIS8 instruction ...
This contains information for each constraint that we are lowering.
CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for a VRGL* instruction with the ...
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to compute an offset from native ...
bool isXXBRWShuffleMask(ShuffleVectorSDNode *N)
isXXBRWShuffleMask - Return true if this is a shuffle mask suitable for a XXBRW instruction.
static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
FSEL - Traditional three-operand fsel node.
bool isXXBRQShuffleMask(ShuffleVectorSDNode *N)
isXXBRQShuffleMask - Return true if this is a shuffle mask suitable for a XXBRQ instruction.
Machine Value Type.
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
static Value * LowerBSWAP(LLVMContext &Context, Value *V, Instruction *IP)
Emit the code to lower bswap of V before the specified instruction IP.
unsigned getScalarSizeInBits() const
This is an important base class in LLVM.
Definition: Constant.h:41
G8RC = ADDIS_DTPREL_HA x3, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction t...
SExtVElems, takes an input vector of a smaller type and sign extends to an output vector of a larger ...
VECINSERT - The PPC vector insert instruction.
Direct move from a VSX register to a GPR.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:970
CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a MTCTR instruction.
unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG)
getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the specified isSplatShuffleMask...
STFIWX - The STFIWX instruction.
FCFID - The FCFID instruction, taking an f64 operand and producing and f64 value containing the FP re...
Store scalar integers from VSR.
bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a VPKUWUM instruction.
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point load which sign-extends from a 32-bit inte...
G8RC = ADDIS_TLSLD_HA x2, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction th...
static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG, const X86Subtarget &Subtarget)
G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec TLS model, produces a LD instruction ...
QVESPLATI = This corresponds to the QPX qvesplati instruction.
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
lazy value info
Common code between 32-bit and 64-bit PowerPC targets.
int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift amount, otherwise return -1...
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
Extended Value Type.
Definition: ValueTypes.h:33
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This structure contains all information that is necessary for lowering calls.
This class contains a discriminated union of information about pointers in memory operands...
bool isXXBRDShuffleMask(ShuffleVectorSDNode *N)
isXXBRDShuffleMask - Return true if this is a shuffle mask suitable for a XXBRD instruction.
CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl instruction and the TOC reload r...
GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an integer smaller than 64 bits into ...
Extract a subvector from unsigned integer vector and convert to FP.
QBFLT = Access the underlying QPX floating-point boolean representation.
EXTSWSLI = The PPC extswsli instruction, which does an extend-sign word and shift left immediate...
static SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG)
x3 = GET_TLSLD_ADDR x3, Symbol - For the local-dynamic TLS model, produces a call to __tls_get_addr(s...
Custom extend v4f32 to v2f64.
GPRC = TOC_ENTRY GA, TOC Loads the entry for GA from the TOC, where the TOC base is given by the last...
XXSPLT - The PPC VSX splat instructions.
VECSHL - The PPC vector shift left instruction.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:212
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:221
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS model, produces an ADD instruction that ...
bool convertSetCCLogicToBitwiseLogic(EVT VT) const override
Use bitwise logic to make pairs of compares more efficient.
Provides information about what library functions are available for the current target.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:643
CHAIN = SC CHAIN, Imm128 - System call.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
x3 = GET_TLS_ADDR x3, Symbol - For the general-dynamic TLS model, produces a call to __tls_get_addr(s...
bool isCheapToSpeculateCttz() const override
Return true if it is cheap to speculate a call to intrinsic cttz.
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
Definition: ISDOpcodes.h:920
Represents one node in the SelectionDAG.
VPERM - The PPC VPERM Instruction.
bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable for a XXPERMDI instruction...
static bool Enabled
Definition: Statistic.cpp:50
const Function & getFunction() const
Return the LLVM function that this machine code represents.
STXSIX - The STXSI[bh]X instruction.
i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the eq or gt bit of CR0 after execu...
G8RC = ADDIS_GOT_TPREL_HA x2, Symbol - Used by the initial-exec TLS model, produces an ADDIS8 instruc...
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
Class for arbitrary precision integers.
Definition: APInt.h:69
QVGPCI = This corresponds to the QPX qvgpci instruction.
amdgpu Simplify well known AMD library false FunctionCallee Callee
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
The combination of sra[wd]i and addze used to implemented signed integer division by a power of 2...
Flags
Flags values. These may be or&#39;d together.
GPRC = address of GLOBAL_OFFSET_TABLE.
Representation of each machine instruction.
Definition: MachineInstr.h:63
GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch history rolling buffer entry...
unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override
SelectSupportKind
Enum that describes what type of support for selects the target has.
Reciprocal estimate instructions (unary FP ops).
bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for a VMRGEW or VMRGOW instructi...
F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
Establish a view to a call site for examination.
Definition: CallSite.h:897
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
Direct move from a GPR to a VSX register (zero)
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
The CMPB instruction (takes two operands of i32 or i64).
The memory access always returns the same value (or traps).
CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a byte-swapping store instruction. ...
uint32_t Size
Definition: Profile.cpp:46
TC_RETURN - A tail call return.
VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little endian.
bool isCtlzFast() const override
Return true if ctlz instruction is fast.
XXREVERSE - The PPC VSX reverse instruction.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
Direct move of 2 consecutive GPR to a VSX register.
CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This corresponds to the COND_BRANCH pseudo ...
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
These nodes represent PPC shifts.
VSRC, CHAIN = LD_VSX_LH CHAIN, Ptr - This is a floating-point load of a v2f32 value into the lower ha...
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
Extract a subvector from signed integer vector and convert to FP.
RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* instructions.
FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 operand, producing an f64 value...
Hi/Lo - These represent the high and low 16-bit parts of a global address respectively.
bool isXXBRHShuffleMask(ShuffleVectorSDNode *N)
isXXBRHShuffleMask - Return true if this is a shuffle mask suitable for a XXBRH instruction.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding towards zero.
An SDNode for swaps that are not associated with any loads/stores and thereby have no chain...
RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the altivec VCMP*o instructions.
GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point load which zero-extends from a 32-bit inte...
static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
bool isIntS16Immediate(SDNode *N, int16_t &Imm)
isIntS16Immediate - This method tests to see if the node is either a 32-bit or 64-bit immediate...
bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for a VRGH* instruction with the ...
XXPERMDI - The PPC XXPERMDI instruction.
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:220
This file describes how to lower LLVM code to machine code.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:950
int isQVALIGNIShuffleMask(SDNode *N)
If this is a qvaligni shuffle mask, return the shift amount, otherwise return -1. ...