LLVM 22.0.0git
PPCISelLowering.h
Go to the documentation of this file.
1//===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that PPC uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
15#define LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
16
17#include "PPCInstrInfo.h"
26#include "llvm/IR/Attributes.h"
27#include "llvm/IR/CallingConv.h"
28#include "llvm/IR/Function.h"
29#include "llvm/IR/InlineAsm.h"
30#include "llvm/IR/Metadata.h"
31#include "llvm/IR/Type.h"
32#include <optional>
33#include <utility>
34
35namespace llvm {
36
37 /// Define some predicates that are used for node matching.
38 namespace PPC {
39
40 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
41 /// VPKUHUM instruction.
42 bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
43 SelectionDAG &DAG);
44
45 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
46 /// VPKUWUM instruction.
47 bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
48 SelectionDAG &DAG);
49
50 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
51 /// VPKUDUM instruction.
52 bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
53 SelectionDAG &DAG);
54
55 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
56 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
57 bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
58 unsigned ShuffleKind, SelectionDAG &DAG);
59
60 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
61 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
62 bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
63 unsigned ShuffleKind, SelectionDAG &DAG);
64
65 /// isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for
66 /// a VMRGEW or VMRGOW instruction
67 bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
68 unsigned ShuffleKind, SelectionDAG &DAG);
69 /// isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable
70 /// for a XXSLDWI instruction.
71 bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
72 bool &Swap, bool IsLE);
73
74 /// isXXBRHShuffleMask - Return true if this is a shuffle mask suitable
75 /// for a XXBRH instruction.
76 bool isXXBRHShuffleMask(ShuffleVectorSDNode *N);
77
78 /// isXXBRWShuffleMask - Return true if this is a shuffle mask suitable
79 /// for a XXBRW instruction.
80 bool isXXBRWShuffleMask(ShuffleVectorSDNode *N);
81
82 /// isXXBRDShuffleMask - Return true if this is a shuffle mask suitable
83 /// for a XXBRD instruction.
84 bool isXXBRDShuffleMask(ShuffleVectorSDNode *N);
85
86 /// isXXBRQShuffleMask - Return true if this is a shuffle mask suitable
87 /// for a XXBRQ instruction.
88 bool isXXBRQShuffleMask(ShuffleVectorSDNode *N);
89
90 /// isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable
91 /// for a XXPERMDI instruction.
92 bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
93 bool &Swap, bool IsLE);
94
95 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the
96 /// shift amount, otherwise return -1.
97 int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
98 SelectionDAG &DAG);
99
100 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
101 /// specifies a splat of a single element that is suitable for input to
102 /// VSPLTB/VSPLTH/VSPLTW.
103 bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
104
105 /// isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by
106 /// the XXINSERTW instruction introduced in ISA 3.0. This is essentially any
107 /// shuffle of v4f32/v4i32 vectors that just inserts one element from one
108 /// vector into the other. This function will also set a couple of
109 /// output parameters for how much the source vector needs to be shifted and
110 /// what byte number needs to be specified for the instruction to put the
111 /// element in the desired location of the target vector.
112 bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
113 unsigned &InsertAtByte, bool &Swap, bool IsLE);
114
115 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
116 /// appropriate for PPC mnemonics (which have a big endian bias - namely
117 /// elements are counted from the left of the vector register).
118 unsigned getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
119 SelectionDAG &DAG);
120
121 /// get_VSPLTI_elt - If this is a build_vector of constants which can be
122 /// formed by using a vspltis[bhw] instruction of the specified element
123 /// size, return the constant being splatted. The ByteSize field indicates
124 /// the number of bytes of each element [124] -> [bhw].
125 SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
126
127 // Flags for computing the optimal addressing mode for loads and stores.
130
131 // Extension mode for integer loads.
133 MOF_ZExt = 1 << 1,
134 MOF_NoExt = 1 << 2,
135
136 // Address computation flags.
137 MOF_NotAddNorCst = 1 << 5, // Not const. or sum of ptr and scalar.
138 MOF_RPlusSImm16 = 1 << 6, // Reg plus signed 16-bit constant.
139 MOF_RPlusLo = 1 << 7, // Reg plus signed 16-bit relocation
140 MOF_RPlusSImm16Mult4 = 1 << 8, // Reg plus 16-bit signed multiple of 4.
141 MOF_RPlusSImm16Mult16 = 1 << 9, // Reg plus 16-bit signed multiple of 16.
142 MOF_RPlusSImm34 = 1 << 10, // Reg plus 34-bit signed constant.
143 MOF_RPlusR = 1 << 11, // Sum of two variables.
144 MOF_PCRel = 1 << 12, // PC-Relative relocation.
145 MOF_AddrIsSImm32 = 1 << 13, // A simple 32-bit constant.
146
147 // The in-memory type.
148 MOF_SubWordInt = 1 << 15,
149 MOF_WordInt = 1 << 16,
151 MOF_ScalarFloat = 1 << 18, // Scalar single or double precision.
152 MOF_Vector = 1 << 19, // Vector types and quad precision scalars.
153 MOF_Vector256 = 1 << 20,
154
155 // Subtarget features.
160 };
161
162 // The addressing modes for loads and stores.
172 } // end namespace PPC
173
175 const PPCSubtarget &Subtarget;
176
177 public:
178 explicit PPCTargetLowering(const PPCTargetMachine &TM,
179 const PPCSubtarget &STI);
180
181 bool isSelectSupported(SelectSupportKind Kind) const override {
182 // PowerPC does not support scalar condition selects on vectors.
184 }
185
186 /// getPreferredVectorAction - The code we generate when vector types are
187 /// legalized by promoting the integer element type is often much worse
188 /// than code we generate if we widen the type for applicable vector types.
189 /// The issue with promoting is that the vector is scalaraized, individual
190 /// elements promoted and then the vector is rebuilt. So say we load a pair
191 /// of v4i8's and shuffle them. This will turn into a mess of 8 extending
192 /// loads, moves back into VSR's (or memory ops if we don't have moves) and
193 /// then the VPERM for the shuffle. All in all a very slow sequence.
195 const override {
196 // Default handling for scalable and single-element vectors.
197 if (VT.isScalableVector() || VT.getVectorNumElements() == 1)
199
200 // Split and promote vNi1 vectors so we don't produce v256i1/v512i1
201 // types as those are only for MMA instructions.
202 if (VT.getScalarSizeInBits() == 1 && VT.getSizeInBits() > 16)
203 return TypeSplitVector;
204 if (VT.getScalarSizeInBits() == 1)
205 return TypePromoteInteger;
206
207 // Widen vectors that have reasonably sized elements.
208 if (VT.getScalarSizeInBits() % 8 == 0)
209 return TypeWidenVector;
211 }
212
213 bool useSoftFloat() const override;
214
215 bool hasSPE() const;
216
217 MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
218 return MVT::i32;
219 }
220
221 bool isCheapToSpeculateCttz(Type *Ty) const override {
222 return true;
223 }
224
225 bool isCheapToSpeculateCtlz(Type *Ty) const override {
226 return true;
227 }
228
229 bool
231 unsigned ElemSizeInBits,
232 unsigned &Index) const override;
233
234 bool isCtlzFast() const override {
235 return true;
236 }
237
238 bool isEqualityCmpFoldedWithSignedCmp() const override {
239 return false;
240 }
241
242 bool hasAndNotCompare(SDValue) const override {
243 return true;
244 }
245
246 bool preferIncOfAddToSubOfNot(EVT VT) const override;
247
248 bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
249 return VT.isScalarInteger();
250 }
251
253 bool OptForSize, NegatibleCost &Cost,
254 unsigned Depth = 0) const override;
255
256 /// getSetCCResultType - Return the ISD::SETCC ValueType
258 EVT VT) const override;
259
260 /// Return true if target always benefits from combining into FMA for a
261 /// given value type. This must typically return false on targets where FMA
262 /// takes more cycles to execute than FADD.
263 bool enableAggressiveFMAFusion(EVT VT) const override;
264
265 /// getPreIndexedAddressParts - returns true by value, base pointer and
266 /// offset pointer and addressing mode by reference if the node's address
267 /// can be legally represented as pre-indexed load / store address.
271 SelectionDAG &DAG) const override;
272
273 /// SelectAddressEVXRegReg - Given the specified addressed, check to see if
274 /// it can be more efficiently represented as [r+imm].
276 SelectionDAG &DAG) const;
277
278 /// SelectAddressRegReg - Given the specified addressed, check to see if it
279 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment
280 /// is non-zero, only accept displacement which is not suitable for [r+imm].
281 /// Returns false if it can be represented by [r+imm], which are preferred.
283 SelectionDAG &DAG,
284 MaybeAlign EncodingAlignment = std::nullopt) const;
285
286 /// SelectAddressRegImm - Returns true if the address N can be represented
287 /// by a base register plus a signed 16-bit displacement [r+imm], and if it
288 /// is not better represented as reg+reg. If \p EncodingAlignment is
289 /// non-zero, only accept displacements suitable for instruction encoding
290 /// requirement, i.e. multiples of 4 for DS form.
292 SelectionDAG &DAG,
293 MaybeAlign EncodingAlignment) const;
295 SelectionDAG &DAG) const;
296
297 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
298 /// represented as an indexed [r+r] operation.
300 SelectionDAG &DAG) const;
301
302 /// SelectAddressPCRel - Represent the specified address as pc relative to
303 /// be represented as [pc+imm]
305
307
308 /// LowerOperation - Provide custom lowering hooks for some operations.
309 ///
310 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
311
312 /// ReplaceNodeResults - Replace the results of node with an illegal result
313 /// type with new values built out of custom code.
314 ///
316 SelectionDAG &DAG) const override;
317
318 SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const;
319 SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const;
320
321 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
322
323 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
324 SmallVectorImpl<SDNode *> &Created) const override;
325
326 Register getRegisterByName(const char* RegName, LLT VT,
327 const MachineFunction &MF) const override;
328
330 KnownBits &Known,
331 const APInt &DemandedElts,
332 const SelectionDAG &DAG,
333 unsigned Depth = 0) const override;
334
335 Align getPrefLoopAlignment(MachineLoop *ML) const override;
336
337 bool shouldInsertFencesForAtomic(const Instruction *I) const override {
338 return true;
339 }
340
341 Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr,
342 AtomicOrdering Ord) const override;
343
344 Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr,
345 AtomicOrdering Ord) const override;
346
348 AtomicOrdering Ord) const override;
350 AtomicOrdering Ord) const override;
351
352 bool shouldInlineQuadwordAtomics() const;
353
355 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
356
359
361 AtomicRMWInst *AI, Value *AlignedAddr,
362 Value *Incr, Value *Mask,
363 Value *ShiftAmt,
364 AtomicOrdering Ord) const override;
367 Value *AlignedAddr, Value *CmpVal,
368 Value *NewVal, Value *Mask,
369 AtomicOrdering Ord) const override;
370
373 MachineBasicBlock *MBB) const override;
376 unsigned AtomicSize,
377 unsigned BinOpcode,
378 unsigned CmpOpcode = 0,
379 unsigned CmpPred = 0) const;
382 bool is8bit,
383 unsigned Opcode,
384 unsigned CmpOpcode = 0,
385 unsigned CmpPred = 0) const;
386
388 MachineBasicBlock *MBB) const;
389
391 MachineBasicBlock *MBB) const;
392
394 MachineBasicBlock *MBB) const;
395
396 bool hasInlineStackProbe(const MachineFunction &MF) const override;
397
398 unsigned getStackProbeSize(const MachineFunction &MF) const;
399
400 ConstraintType getConstraintType(StringRef Constraint) const override;
401
402 /// Examine constraint string and operand type and determine a weight value.
403 /// The operand object must already have been set up with the operand type.
405 AsmOperandInfo &info, const char *constraint) const override;
406
407 std::pair<unsigned, const TargetRegisterClass *>
409 StringRef Constraint, MVT VT) const override;
410
411 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
412 /// function arguments in the caller parameter area.
413 Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override;
414
415 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
416 /// vector. If it is invalid, don't add anything to Ops.
418 std::vector<SDValue> &Ops,
419 SelectionDAG &DAG) const override;
420
422 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
423 if (ConstraintCode == "es")
425 else if (ConstraintCode == "Q")
427 else if (ConstraintCode == "Z")
429 else if (ConstraintCode == "Zy")
431 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
432 }
433
436 SelectionDAG &DAG) const override;
437
438 /// isLegalAddressingMode - Return true if the addressing mode represented
439 /// by AM is legal for this target, for a load/store of the specified type.
440 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
441 Type *Ty, unsigned AS,
442 Instruction *I = nullptr) const override;
443
444 /// isLegalICmpImmediate - Return true if the specified immediate is legal
445 /// icmp immediate, that is the target has icmp instructions which can
446 /// compare a register against the immediate without having to materialize
447 /// the immediate into a register.
448 bool isLegalICmpImmediate(int64_t Imm) const override;
449
450 /// isLegalAddImmediate - Return true if the specified immediate is legal
451 /// add immediate, that is the target has add instructions which can
452 /// add a register and the immediate without having to materialize
453 /// the immediate into a register.
454 bool isLegalAddImmediate(int64_t Imm) const override;
455
456 /// isTruncateFree - Return true if it's free to truncate a value of
457 /// type Ty1 to type Ty2. e.g. On PPC it's free to truncate a i64 value in
458 /// register X1 to i32 by referencing its sub-register R1.
459 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
460 bool isTruncateFree(EVT VT1, EVT VT2) const override;
461
462 bool isZExtFree(SDValue Val, EVT VT2) const override;
463
464 bool isFPExtFree(EVT DestVT, EVT SrcVT) const override;
465
466 /// Returns true if it is beneficial to convert a load of a constant
467 /// to just the constant itself.
469 Type *Ty) const override;
470
471 bool convertSelectOfConstantsToMath(EVT VT) const override {
472 return true;
473 }
474
475 bool decomposeMulByConstant(LLVMContext &Context, EVT VT,
476 SDValue C) const override;
477
479 EVT VT) const override {
480 // Only handle float load/store pair because float(fpr) load/store
481 // instruction has more cycles than integer(gpr) load/store in PPC.
482 if (Opc != ISD::LOAD && Opc != ISD::STORE)
483 return false;
484 if (VT != MVT::f32 && VT != MVT::f64)
485 return false;
486
487 return true;
488 }
489
490 // Returns true if the address of the global is stored in TOC entry.
492
493 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
494
495 bool getTgtMemIntrinsic(IntrinsicInfo &Info,
496 const CallInst &I,
497 MachineFunction &MF,
498 unsigned Intrinsic) const override;
499
500 /// It returns EVT::Other if the type should be determined using generic
501 /// target-independent logic.
502 EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op,
503 const AttributeList &FuncAttributes) const override;
504
505 /// Is unaligned memory access allowed for the given type, and is it fast
506 /// relative to software emulation.
508 EVT VT, unsigned AddrSpace, Align Alignment = Align(1),
510 unsigned *Fast = nullptr) const override;
511
512 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
513 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
514 /// expanded to FMAs when this method returns true, otherwise fmuladd is
515 /// expanded to fmul + fadd.
517 EVT VT) const override;
518
519 bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *Ty) const override;
520
521 /// isProfitableToHoist - Check if it is profitable to hoist instruction
522 /// \p I to its dominator block.
523 /// For example, it is not profitable if \p I and it's only user can form a
524 /// FMA instruction, because Powerpc prefers FMADD.
525 bool isProfitableToHoist(Instruction *I) const override;
526
527 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
528
529 // Should we expand the build vector with shuffles?
530 bool
532 unsigned DefinedValues) const override;
533
534 // Keep the zero-extensions for arguments to libcalls.
535 bool shouldKeepZExtForFP16Conv() const override { return true; }
536
537 /// createFastISel - This method returns a target-specific FastISel object,
538 /// or null if the target does not support "fast" instruction selection.
540 const TargetLibraryInfo *LibInfo) const override;
541
542 /// Returns true if an argument of type Ty needs to be passed in a
543 /// contiguous block of registers in calling convention CallConv.
545 Type *Ty, CallingConv::ID CallConv, bool isVarArg,
546 const DataLayout &DL) const override {
547 // We support any array type as "consecutive" block in the parameter
548 // save area. The element type defines the alignment requirement and
549 // whether the argument should go in GPRs, FPRs, or VRs if available.
550 //
551 // Note that clang uses this capability both to implement the ELFv2
552 // homogeneous float/vector aggregate ABI, and to avoid having to use
553 // "byval" when passing aggregates that might fully fit in registers.
554 return Ty->isArrayTy();
555 }
556
557 /// If a physical register, this returns the register that receives the
558 /// exception address on entry to an EH pad.
560 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
561
562 /// If a physical register, this returns the register that receives the
563 /// exception typeid on entry to a landing pad.
565 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
566
567 /// Override to support customized stack guard loading.
568 bool useLoadStackGuardNode(const Module &M) const override;
569
570 bool isFPImmLegal(const APFloat &Imm, EVT VT,
571 bool ForCodeSize) const override;
572
573 unsigned getJumpTableEncoding() const override;
574 bool isJumpTableRelative() const override;
576 SelectionDAG &DAG) const override;
578 unsigned JTI,
579 MCContext &Ctx) const override;
580
581 /// SelectOptimalAddrMode - Based on a node N and it's Parent (a MemSDNode),
582 /// compute the address flags of the node, get the optimal address mode
583 /// based on the flags, and set the Base and Disp based on the address mode.
585 SDValue &Disp, SDValue &Base,
586 SelectionDAG &DAG,
587 MaybeAlign Align) const;
588 /// SelectForceXFormMode - Given the specified address, force it to be
589 /// represented as an indexed [r+r] operation (an XForm instruction).
591 SelectionDAG &DAG) const;
592
594 SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
595 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC)
596 const override;
597 /// Structure that collects some common arguments that get passed around
598 /// between the functions for call lowering.
599 struct CallFlags {
601 const bool IsTailCall : 1;
602 const bool IsVarArg : 1;
603 const bool IsPatchPoint : 1;
604 const bool IsIndirect : 1;
605 const bool HasNest : 1;
606 const bool NoMerge : 1;
607
613 };
614
616 bool IsVarArg) const;
617 bool supportsTailCallFor(const CallBase *CB) const;
618
619 bool hasMultipleConditionRegisters(EVT VT) const override;
620
621 private:
622 struct ReuseLoadInfo {
623 SDValue Ptr;
624 SDValue Chain;
625 SDValue ResChain;
627 bool IsDereferenceable = false;
628 bool IsInvariant = false;
629 Align Alignment;
630 AAMDNodes AAInfo;
631 const MDNode *Ranges = nullptr;
632
633 ReuseLoadInfo() = default;
634
635 MachineMemOperand::Flags MMOFlags() const {
637 if (IsDereferenceable)
639 if (IsInvariant)
641 return F;
642 }
643 };
644
645 // Map that relates a set of common address flags to PPC addressing modes.
646 std::map<PPC::AddrMode, SmallVector<unsigned, 16>> AddrModesMap;
647 void initializeAddrModeMap();
648
649 bool canReuseLoadAddress(SDValue Op, EVT MemVT, ReuseLoadInfo &RLI,
650 SelectionDAG &DAG,
652
653 void LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
654 SelectionDAG &DAG, const SDLoc &dl) const;
655 SDValue LowerFP_TO_INTDirectMove(SDValue Op, SelectionDAG &DAG,
656 const SDLoc &dl) const;
657
658 bool directMoveIsProfitable(const SDValue &Op) const;
659 SDValue LowerINT_TO_FPDirectMove(SDValue Op, SelectionDAG &DAG,
660 const SDLoc &dl) const;
661
662 SDValue LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
663 const SDLoc &dl) const;
664
665 SDValue LowerTRUNCATEVector(SDValue Op, SelectionDAG &DAG) const;
666
667 SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const;
668 SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const;
669
670 bool IsEligibleForTailCallOptimization(
671 const GlobalValue *CalleeGV, CallingConv::ID CalleeCC,
672 CallingConv::ID CallerCC, bool isVarArg,
673 const SmallVectorImpl<ISD::InputArg> &Ins) const;
674
675 bool IsEligibleForTailCallOptimization_64SVR4(
676 const GlobalValue *CalleeGV, CallingConv::ID CalleeCC,
677 CallingConv::ID CallerCC, const CallBase *CB, bool isVarArg,
678 const SmallVectorImpl<ISD::OutputArg> &Outs,
679 const SmallVectorImpl<ISD::InputArg> &Ins, const Function *CallerFunc,
680 bool isCalleeExternalSymbol) const;
681
682 bool isEligibleForTCO(const GlobalValue *CalleeGV, CallingConv::ID CalleeCC,
683 CallingConv::ID CallerCC, const CallBase *CB,
684 bool isVarArg,
685 const SmallVectorImpl<ISD::OutputArg> &Outs,
686 const SmallVectorImpl<ISD::InputArg> &Ins,
687 const Function *CallerFunc,
688 bool isCalleeExternalSymbol) const;
689
690 SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG &DAG, int SPDiff,
691 SDValue Chain, SDValue &LROpOut,
692 SDValue &FPOpOut,
693 const SDLoc &dl) const;
694
695 SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, SDValue GA) const;
696
697 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
698 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
699 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
700 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
701 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
702 SDValue LowerGlobalTLSAddressAIX(SDValue Op, SelectionDAG &DAG) const;
703 SDValue LowerGlobalTLSAddressLinux(SDValue Op, SelectionDAG &DAG) const;
704 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
705 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
706 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
707 SDValue LowerSSUBO(SDValue Op, SelectionDAG &DAG) const;
708 SDValue LowerSADDO(SDValue Op, SelectionDAG &DAG) const;
709 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
710 SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
711 SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const;
712 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
713 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
714 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
715 SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const;
716 SDValue LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const;
717 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
718 SDValue LowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;
719 SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
720 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
721 SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
722 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
723 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
724 const SDLoc &dl) const;
725 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
726 SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
727 SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
728 SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const;
729 SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const;
730 SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const;
731 SDValue LowerFunnelShift(SDValue Op, SelectionDAG &DAG) const;
732 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
733 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
734 SDValue LowerVPERM(SDValue Op, SelectionDAG &DAG, ArrayRef<int> PermMask,
735 EVT VT, SDValue V1, SDValue V2) const;
736 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
737 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
738 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
739 SDValue LowerBSWAP(SDValue Op, SelectionDAG &DAG) const;
740 SDValue LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
741 SDValue LowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG) const;
742 SDValue LowerADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG) const;
743 SDValue LowerADDSUBO(SDValue Op, SelectionDAG &DAG) const;
744 SDValue LowerUCMP(SDValue Op, SelectionDAG &DAG) const;
745 SDValue lowerToLibCall(const char *LibCallName, SDValue Op,
746 SelectionDAG &DAG) const;
747 SDValue lowerLibCallBasedOnType(const char *LibCallFloatName,
748 const char *LibCallDoubleName, SDValue Op,
749 SelectionDAG &DAG) const;
750 bool isLowringToMASSFiniteSafe(SDValue Op) const;
751 bool isLowringToMASSSafe(SDValue Op) const;
752 bool isScalarMASSConversionEnabled() const;
753 SDValue lowerLibCallBase(const char *LibCallDoubleName,
754 const char *LibCallFloatName,
755 const char *LibCallDoubleNameFinite,
756 const char *LibCallFloatNameFinite, SDValue Op,
757 SelectionDAG &DAG) const;
758 SDValue lowerPow(SDValue Op, SelectionDAG &DAG) const;
759 SDValue lowerSin(SDValue Op, SelectionDAG &DAG) const;
760 SDValue lowerCos(SDValue Op, SelectionDAG &DAG) const;
761 SDValue lowerLog(SDValue Op, SelectionDAG &DAG) const;
762 SDValue lowerLog10(SDValue Op, SelectionDAG &DAG) const;
763 SDValue lowerExp(SDValue Op, SelectionDAG &DAG) const;
764 SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) const;
765 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
766 SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
767 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
768 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
769 SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const;
770
771 SDValue LowerVP_LOAD(SDValue Op, SelectionDAG &DAG) const;
772 SDValue LowerVP_STORE(SDValue Op, SelectionDAG &DAG) const;
773
774 SDValue LowerVectorLoad(SDValue Op, SelectionDAG &DAG) const;
775 SDValue LowerVectorStore(SDValue Op, SelectionDAG &DAG) const;
776 SDValue LowerDMFVectorLoad(SDValue Op, SelectionDAG &DAG) const;
777 SDValue LowerDMFVectorStore(SDValue Op, SelectionDAG &DAG) const;
778 SDValue DMFInsert1024(const SmallVectorImpl<SDValue> &Pairs,
779 const SDLoc &dl, SelectionDAG &DAG) const;
780
781 SDValue LowerCallResult(SDValue Chain, SDValue InGlue,
782 CallingConv::ID CallConv, bool isVarArg,
783 const SmallVectorImpl<ISD::InputArg> &Ins,
784 const SDLoc &dl, SelectionDAG &DAG,
785 SmallVectorImpl<SDValue> &InVals) const;
786
787 SDValue FinishCall(CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
788 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
789 SDValue InGlue, SDValue Chain, SDValue CallSeqStart,
790 SDValue &Callee, int SPDiff, unsigned NumBytes,
791 const SmallVectorImpl<ISD::InputArg> &Ins,
792 SmallVectorImpl<SDValue> &InVals,
793 const CallBase *CB) const;
794
795 SDValue
796 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
797 const SmallVectorImpl<ISD::InputArg> &Ins,
798 const SDLoc &dl, SelectionDAG &DAG,
799 SmallVectorImpl<SDValue> &InVals) const override;
800
801 SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
802 SmallVectorImpl<SDValue> &InVals) const override;
803
804 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
805 bool isVarArg,
806 const SmallVectorImpl<ISD::OutputArg> &Outs,
807 LLVMContext &Context, const Type *RetTy) const override;
808
809 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
810 const SmallVectorImpl<ISD::OutputArg> &Outs,
811 const SmallVectorImpl<SDValue> &OutVals,
812 const SDLoc &dl, SelectionDAG &DAG) const override;
813
814 SDValue extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT,
815 SelectionDAG &DAG, SDValue ArgVal,
816 const SDLoc &dl) const;
817
818 SDValue LowerFormalArguments_AIX(
819 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
820 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
821 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
822 SDValue LowerFormalArguments_64SVR4(
823 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
824 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
825 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
826 SDValue LowerFormalArguments_32SVR4(
827 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
828 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
829 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
830
831 SDValue createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
832 SDValue CallSeqStart,
833 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
834 const SDLoc &dl) const;
835
836 SDValue LowerCall_64SVR4(SDValue Chain, SDValue Callee, CallFlags CFlags,
837 const SmallVectorImpl<ISD::OutputArg> &Outs,
838 const SmallVectorImpl<SDValue> &OutVals,
839 const SmallVectorImpl<ISD::InputArg> &Ins,
840 const SDLoc &dl, SelectionDAG &DAG,
841 SmallVectorImpl<SDValue> &InVals,
842 const CallBase *CB) const;
843 SDValue LowerCall_32SVR4(SDValue Chain, SDValue Callee, CallFlags CFlags,
844 const SmallVectorImpl<ISD::OutputArg> &Outs,
845 const SmallVectorImpl<SDValue> &OutVals,
846 const SmallVectorImpl<ISD::InputArg> &Ins,
847 const SDLoc &dl, SelectionDAG &DAG,
848 SmallVectorImpl<SDValue> &InVals,
849 const CallBase *CB) const;
850 SDValue LowerCall_AIX(SDValue Chain, SDValue Callee, CallFlags CFlags,
851 const SmallVectorImpl<ISD::OutputArg> &Outs,
852 const SmallVectorImpl<SDValue> &OutVals,
853 const SmallVectorImpl<ISD::InputArg> &Ins,
854 const SDLoc &dl, SelectionDAG &DAG,
855 SmallVectorImpl<SDValue> &InVals,
856 const CallBase *CB) const;
857
858 SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
859 SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
860 SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
861
862 SDValue DAGCombineExtBoolTrunc(SDNode *N, DAGCombinerInfo &DCI) const;
863 SDValue DAGCombineBuildVector(SDNode *N, DAGCombinerInfo &DCI) const;
864 SDValue DAGCombineTruncBoolExt(SDNode *N, DAGCombinerInfo &DCI) const;
865 SDValue combineStoreFPToInt(SDNode *N, DAGCombinerInfo &DCI) const;
866 SDValue combineFPToIntToFP(SDNode *N, DAGCombinerInfo &DCI) const;
867 SDValue combineSHL(SDNode *N, DAGCombinerInfo &DCI) const;
868 SDValue combineVectorShift(SDNode *N, DAGCombinerInfo &DCI) const;
869 SDValue combineSRA(SDNode *N, DAGCombinerInfo &DCI) const;
870 SDValue combineSRL(SDNode *N, DAGCombinerInfo &DCI) const;
871 SDValue combineMUL(SDNode *N, DAGCombinerInfo &DCI) const;
872 SDValue combineADD(SDNode *N, DAGCombinerInfo &DCI) const;
873 SDValue combineFMALike(SDNode *N, DAGCombinerInfo &DCI) const;
874 SDValue combineTRUNCATE(SDNode *N, DAGCombinerInfo &DCI) const;
875 SDValue combineSetCC(SDNode *N, DAGCombinerInfo &DCI) const;
876 SDValue combineVectorShuffle(ShuffleVectorSDNode *SVN,
877 SelectionDAG &DAG) const;
878 SDValue combineVReverseMemOP(ShuffleVectorSDNode *SVN, LSBaseSDNode *LSBase,
879 DAGCombinerInfo &DCI) const;
880
881 /// ConvertSETCCToSubtract - looks at SETCC that compares ints. It replaces
882 /// SETCC with integer subtraction when (1) there is a legal way of doing it
883 /// (2) keeping the result of comparison in GPR has performance benefit.
884 SDValue ConvertSETCCToSubtract(SDNode *N, DAGCombinerInfo &DCI) const;
885
886 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
887 int &RefinementSteps, bool &UseOneConstNR,
888 bool Reciprocal) const override;
889 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
890 int &RefinementSteps) const override;
891 SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
892 const DenormalMode &Mode) const override;
893 SDValue getSqrtResultForDenormInput(SDValue Operand,
894 SelectionDAG &DAG) const override;
895 unsigned combineRepeatedFPDivisors() const override;
896
897 SDValue
898 combineElementTruncationToVectorTruncation(SDNode *N,
899 DAGCombinerInfo &DCI) const;
900
901 SDValue combineBVLoadsSpecialValue(SDValue Operand,
902 SelectionDAG &DAG) const;
903
904 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be
905 /// handled by the VINSERTH instruction introduced in ISA 3.0. This is
906 /// essentially any shuffle of v8i16 vectors that just inserts one element
907 /// from one vector into the other.
908 SDValue lowerToVINSERTH(ShuffleVectorSDNode *N, SelectionDAG &DAG) const;
909
910 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be
911 /// handled by the VINSERTB instruction introduced in ISA 3.0. This is
912 /// essentially v16i8 vector version of VINSERTH.
913 SDValue lowerToVINSERTB(ShuffleVectorSDNode *N, SelectionDAG &DAG) const;
914
915 /// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be
916 /// handled by the XXSPLTI32DX instruction introduced in ISA 3.1.
917 SDValue lowerToXXSPLTI32DX(ShuffleVectorSDNode *N, SelectionDAG &DAG) const;
918
919 // Return whether the call instruction can potentially be optimized to a
920 // tail call. This will cause the optimizers to attempt to move, or
921 // duplicate return instructions to help enable tail call optimizations.
922 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
923 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
924
925 /// getAddrModeForFlags - Based on the set of address flags, select the most
926 /// optimal instruction format to match by.
927 PPC::AddrMode getAddrModeForFlags(unsigned Flags) const;
928
929 /// computeMOFlags - Given a node N and it's Parent (a MemSDNode), compute
930 /// the address flags of the load/store instruction that is to be matched.
931 /// The address flags are stored in a map, which is then searched
932 /// through to determine the optimal load/store instruction format.
933 unsigned computeMOFlags(const SDNode *Parent, SDValue N,
934 SelectionDAG &DAG) const;
935 }; // end class PPCTargetLowering
936
937 namespace PPC {
938
939 FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
940 const TargetLibraryInfo *LibInfo);
941
942 } // end namespace PPC
943
944 bool isIntS16Immediate(SDNode *N, int16_t &Imm);
945 bool isIntS16Immediate(SDValue Op, int16_t &Imm);
946 bool isIntS34Immediate(SDNode *N, int64_t &Imm);
947 bool isIntS34Immediate(SDValue Op, int64_t &Imm);
948
949 bool convertToNonDenormSingle(APInt &ArgAPInt);
950 bool convertToNonDenormSingle(APFloat &ArgAPFloat);
951 bool checkConvertToNonDenormSingle(APFloat &ArgAPFloat);
952
953} // end namespace llvm
954
955#endif // LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
return SDValue()
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
This file contains the simple types necessary to represent the attributes associated with functions a...
Analysis containing CSE Info
Definition CSEInfo.cpp:27
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define RegName(no)
lazy value info
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register const TargetRegisterInfo * TRI
This file contains the declarations for metadata subclasses.
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file describes how to lower LLVM code to machine code.
Class for arbitrary precision integers.
Definition APInt.h:78
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Context object for machine code objects.
Definition MCContext.h:83
Base class for the full range of assembler expressions which are needed for parsing.
Definition MCExpr.h:34
Metadata node.
Definition Metadata.h:1078
Machine Value Type.
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
Representation of each machine instruction.
Flags
Flags values. These may be or'd together.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOInvariant
The memory access always returns the same value (or traps).
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
CCAssignFn * ccAssignFnForCall(CallingConv::ID CC, bool Return, bool IsVarArg) const
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
isTruncateFree - Return true if it's free to truncate a value of type Ty1 to type Ty2.
Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override
Perform a masked atomicrmw using a target-specific intrinsic.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
bool isFPExtFree(EVT DestVT, EVT SrcVT) const override
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
PPC::AddrMode SelectForceXFormMode(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG) const
SelectForceXFormMode - Given the specified address, force it to be represented as an indexed [r+r] op...
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
bool hasInlineStackProbe(const MachineFunction &MF) const override
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
bool supportsTailCallFor(const CallBase *CB) const
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
MachineBasicBlock * emitProbedAlloca(MachineInstr &MI, MachineBasicBlock *MBB) const
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
MachineBasicBlock * EmitPartwordAtomicBinary(MachineInstr &MI, MachineBasicBlock *MBB, bool is8bit, unsigned Opcode, unsigned CmpOpcode=0, unsigned CmpPred=0) const
SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const override
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, MaybeAlign EncodingAlignment) const
SelectAddressRegImm - Returns true if the address N can be represented by a base register plus a sign...
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
bool isCtlzFast() const override
Return true if ctlz instruction is fast.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const override
Returns true if an argument of type Ty needs to be passed in a contiguous block of registers in calli...
bool isSelectSupported(SelectSupportKind Kind) const override
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
bool hasMultipleConditionRegisters(EVT VT) const override
Does the target have multiple (allocatable) condition registers that can be used to store the results...
TargetLowering::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override
getByValTypeAlignment - Return the desired alignment for ByVal aggregate function arguments in the ca...
bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG, MaybeAlign EncodingAlignment=std::nullopt) const
SelectAddressRegReg - Given the specified addressed, check to see if it can be more efficiently repre...
MachineBasicBlock * EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *MBB, unsigned AtomicSize, unsigned BinOpcode, unsigned CmpOpcode=0, unsigned CmpPred=0) const
bool hasAndNotCompare(SDValue) const override
Return true if the target should transform: (X & Y) == Y ---> (~X & Y) == 0 (X & Y) !...
SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const override
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators.
Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SelectAddressRegRegOnly - Given the specified addressed, force it to be represented as an indexed [r+...
bool useSoftFloat() const override
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const override
Perform a masked cmpxchg using a target-specific intrinsic.
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool enableAggressiveFMAFusion(EVT VT) const override
Return true if target always benefits from combining into FMA for a given value type.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const override
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
bool preferIncOfAddToSubOfNot(EVT VT) const override
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPreIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mod...
bool isProfitableToHoist(Instruction *I) const override
isProfitableToHoist - Check if it is profitable to hoist instruction I to its dominator block.
bool convertSelectOfConstantsToMath(EVT VT) const override
Return true if a select of constants (select Cond, C1, C2) should be transformed into simple math ops...
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
bool convertSetCCLogicToBitwiseLogic(EVT VT) const override
Use bitwise logic to make pairs of compares more efficient.
Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint, return the type of constraint it is for this target.
const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const override
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool shallExtractConstSplatVectorElementToStore(Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const override
Return true if the target shall perform extract vector element and store given that the vector is kno...
bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode...
bool isEqualityCmpFoldedWithSignedCmp() const override
Return true if instruction generated for equality comparison is folded with instruction generated for...
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
getPreferredVectorAction - The code we generate when vector types are legalized by promoting the inte...
EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &FuncAttributes) const override
It returns EVT::Other if the type should be determined using generic target-independent logic.
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const
void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const override
unsigned getStackProbeSize(const MachineFunction &MF) const
PPCTargetLowering(const PPCTargetMachine &TM, const PPCSubtarget &STI)
TargetLowering::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
bool useLoadStackGuardNode(const Module &M) const override
Override to support customized stack guard loading.
bool shouldKeepZExtForFP16Conv() const override
Does this target require the clearing of high-order bits in a register passed to the fp16 to fp conve...
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster than a pair of fmul and fadd i...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Is unaligned memory access allowed for the given type, and is it fast relative to software emulation.
bool shouldExpandBuildVectorWithShuffles(EVT VT, unsigned DefinedValues) const override
bool SelectAddressRegImm34(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG) const
Similar to the 16-bit case but for instructions that take a 34-bit displacement field (prefixed loads...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
bool isJumpTableRelative() const override
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
PPC::AddrMode SelectOptimalAddrMode(const SDNode *Parent, SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, MaybeAlign Align) const
SelectOptimalAddrMode - Based on a node N and it's Parent (a MemSDNode), compute the address flags of...
bool SelectAddressPCRel(SDValue N, SDValue &Base) const
SelectAddressPCRel - Represent the specified address as pc relative to be represented as [pc+imm].
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
bool SelectAddressEVXRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SelectAddressEVXRegReg - Given the specified addressed, check to see if it can be more efficiently re...
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate,...
bool isAccessedAsGotIndirect(SDValue N) const
Align getPrefLoopAlignment(MachineLoop *ML) const override
Return the preferred loop alignment.
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const override
createFastISel - This method returns a target-specific FastISel object, or null if the target does no...
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
bool isLegalAddImmediate(int64_t Imm) const override
isLegalAddImmediate - Return true if the specified immediate is legal add immediate,...
Common code between 32-bit and 64-bit PowerPC targets.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Provides information about what library functions are available for the current target.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
SelectSupportKind
Enum that describes what type of support for selects the target has.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
NegatibleCost
Enum that specifies when a float negation is beneficial.
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
TargetLowering(const TargetLowering &)=delete
virtual unsigned combineRepeatedFPDivisors() const
Indicate whether this target prefers to combine FDIVs with the same divisor.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM Value Representation.
Definition Value.h:75
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
Define some predicates that are used for node matching.
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG)
get_VSPLTI_elt - If this is a build_vector of constants which can be formed by using a vspltis[bhw] i...
bool isXXBRDShuffleMask(ShuffleVectorSDNode *N)
isXXBRDShuffleMask - Return true if this is a shuffle mask suitable for a XXBRD instruction.
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo)
bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for a VRGH* instruction with the ...
bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a VPKUDUM instruction.
bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for a VMRGEW or VMRGOW instructi...
bool isXXBRQShuffleMask(ShuffleVectorSDNode *N)
isXXBRQShuffleMask - Return true if this is a shuffle mask suitable for a XXBRQ instruction.
bool isXXBRWShuffleMask(ShuffleVectorSDNode *N)
isXXBRWShuffleMask - Return true if this is a shuffle mask suitable for a XXBRW instruction.
bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable for a XXPERMDI instruction.
bool isXXBRHShuffleMask(ShuffleVectorSDNode *N)
isXXBRHShuffleMask - Return true if this is a shuffle mask suitable for a XXBRH instruction.
unsigned getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize, SelectionDAG &DAG)
getSplatIdxForPPCMnemonics - Return the splat index as a value that is appropriate for PPC mnemonics ...
bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable for a XXSLDWI instruction.
int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift amount, otherwise return -1.
bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for a VRGL* instruction with the ...
bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, unsigned &InsertAtByte, bool &Swap, bool IsLE)
isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by the XXINSERTW instruction intr...
bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize)
isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand specifies a splat of a singl...
bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a VPKUWUM instruction.
bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a VPKUHUM instruction.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
bool checkConvertToNonDenormSingle(APFloat &ArgAPFloat)
InstructionCost Cost
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
bool isIntS16Immediate(SDNode *N, int16_t &Imm)
isIntS16Immediate - This method tests to see if the node is either a 32-bit or 64-bit immediate,...
bool convertToNonDenormSingle(APInt &ArgAPInt)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
AtomicOrdering
Atomic ordering for LLVM's memory model.
bool isIntS34Immediate(SDNode *N, int64_t &Imm)
isIntS34Immediate - This method tests if value of node given can be accurately represented as a sign ...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:761
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition ValueTypes.h:157
This class contains a discriminated union of information about pointers in memory operands,...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
Structure that collects some common arguments that get passed around between the functions for call l...
CallFlags(CallingConv::ID CC, bool IsTailCall, bool IsVarArg, bool IsPatchPoint, bool IsIndirect, bool HasNest, bool NoMerge)