LLVM  10.0.0svn
AArch64ISelLowering.h
Go to the documentation of this file.
1 //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
15 #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16 
17 #include "AArch64.h"
21 #include "llvm/IR/CallingConv.h"
22 #include "llvm/IR/Instruction.h"
23 
24 namespace llvm {
25 
26 namespace AArch64ISD {
27 
28 enum NodeType : unsigned {
30  WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
31  CALL, // Function call.
32 
33  // Produces the full sequence of instructions for getting the thread pointer
34  // offset of a variable into X0, using the TLSDesc model.
36  ADRP, // Page address of a TargetGlobalAddress operand.
37  ADR, // ADR
38  ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
39  LOADgot, // Load from automatically generated descriptor (e.g. Global
40  // Offset Table, TLS record).
41  RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
42  BRCOND, // Conditional branch instruction; "b.cond".
44  FCSEL, // Conditional move instruction.
45  CSINV, // Conditional select invert.
46  CSNEG, // Conditional select negate.
47  CSINC, // Conditional select increment.
48 
49  // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
50  // ELF.
52  ADC,
53  SBC, // adc, sbc instructions
54 
55  // Arithmetic instructions which write flags.
61 
62  // Conditional compares. Operands: left,right,falsecc,cc,flags
66 
67  // Floating point comparison
69 
70  // Scalar extract
72 
73  // Scalar-to-vector duplication
74  DUP,
79 
80  // Vector immedate moves
88 
89  // Vector immediate ops
92 
93  // Vector bit select: similar to ISD::VSELECT but not all bits within an
94  // element must be identical.
95  BSL,
96 
97  // Vector arithmetic negation
98  NEG,
99 
100  // Vector shuffles
111 
112  // Vector shift by scalar
116 
117  // Vector shift by scalar (again)
123 
124  // Vector comparisons
133 
134  // Vector zero comparisons
145 
146  // Vector across-lanes addition
147  // Only the lower result lane is defined.
150 
151  // Vector across-lanes min/max
152  // Only the lower result lane is defined.
157 
158  // Vector bitwise negation
160 
161  // Vector bitwise selection
163 
164  // Compare-and-branch
169 
170  // Tail calls
172 
173  // Custom prefetch handling
175 
176  // {s|u}int to FP within a FP register.
179 
180  /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
181  /// world w.r.t vectors; which causes additional REV instructions to be
182  /// generated to compensate for the byte-swapping. But sometimes we do
183  /// need to re-interpret the data in SIMD vector registers in big-endian
184  /// mode without emitting such REV instructions.
186 
189 
190  // Reciprocal estimates and steps.
193 
194  // NEON Load/Store with post-increment base updates
218 
223 
224 };
225 
226 } // end namespace AArch64ISD
227 
228 namespace {
229 
230 // Any instruction that defines a 32-bit result zeros out the high half of the
231 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
232 // be copying from a truncate. But any other 32-bit operation will zero-extend
233 // up to 64 bits.
234 // FIXME: X86 also checks for CMOV here. Do we need something similar?
235 static inline bool isDef32(const SDNode &N) {
236  unsigned Opc = N.getOpcode();
237  return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG &&
238  Opc != ISD::CopyFromReg;
239 }
240 
241 } // end anonymous namespace
242 
243 class AArch64Subtarget;
245 
247 public:
248  explicit AArch64TargetLowering(const TargetMachine &TM,
249  const AArch64Subtarget &STI);
250 
251  /// Selects the correct CCAssignFn for a given CallingConvention value.
252  CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
253 
254  /// Selects the correct CCAssignFn for a given CallingConvention value.
255  CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC) const;
256 
257  /// Determine which of the bits specified in Mask are known to be either zero
258  /// or one and return them in the KnownZero/KnownOne bitsets.
259  void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
260  const APInt &DemandedElts,
261  const SelectionDAG &DAG,
262  unsigned Depth = 0) const override;
263 
264  bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
265  TargetLoweringOpt &TLO) const override;
266 
267  MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
268 
269  /// Returns true if the target allows unaligned memory accesses of the
270  /// specified type.
271  bool allowsMisalignedMemoryAccesses(
272  EVT VT, unsigned AddrSpace = 0, unsigned Align = 1,
274  bool *Fast = nullptr) const override;
275  /// LLT variant.
276  bool allowsMisalignedMemoryAccesses(
277  LLT Ty, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags,
278  bool *Fast = nullptr) const override;
279 
280  /// Provide custom lowering hooks for some operations.
281  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
282 
283  const char *getTargetNodeName(unsigned Opcode) const override;
284 
285  SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
286 
287  /// Returns true if a cast between SrcAS and DestAS is a noop.
288  bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
289  // Addrspacecasts are always noops.
290  return true;
291  }
292 
293  /// This method returns a target specific FastISel object, or null if the
294  /// target does not support "fast" ISel.
296  const TargetLibraryInfo *libInfo) const override;
297 
298  bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
299 
300  bool isFPImmLegal(const APFloat &Imm, EVT VT,
301  bool ForCodeSize) const override;
302 
303  /// Return true if the given shuffle mask can be codegen'd directly, or if it
304  /// should be stack expanded.
305  bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
306 
307  /// Return the ISD::SETCC ValueType.
308  EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
309  EVT VT) const override;
310 
311  SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
312 
313  MachineBasicBlock *EmitF128CSEL(MachineInstr &MI,
314  MachineBasicBlock *BB) const;
315 
316  MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
317  MachineBasicBlock *BB) const;
318 
319  MachineBasicBlock *EmitLoweredCatchPad(MachineInstr &MI,
320  MachineBasicBlock *BB) const;
321 
323  EmitInstrWithCustomInserter(MachineInstr &MI,
324  MachineBasicBlock *MBB) const override;
325 
326  bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
327  MachineFunction &MF,
328  unsigned Intrinsic) const override;
329 
330  bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
331  EVT NewVT) const override;
332 
333  bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
334  bool isTruncateFree(EVT VT1, EVT VT2) const override;
335 
336  bool isProfitableToHoist(Instruction *I) const override;
337 
338  bool isZExtFree(Type *Ty1, Type *Ty2) const override;
339  bool isZExtFree(EVT VT1, EVT VT2) const override;
340  bool isZExtFree(SDValue Val, EVT VT2) const override;
341 
342  bool shouldSinkOperands(Instruction *I,
343  SmallVectorImpl<Use *> &Ops) const override;
344 
345  bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
346 
347  unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
348 
349  bool lowerInterleavedLoad(LoadInst *LI,
351  ArrayRef<unsigned> Indices,
352  unsigned Factor) const override;
353  bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
354  unsigned Factor) const override;
355 
356  bool isLegalAddImmediate(int64_t) const override;
357  bool isLegalICmpImmediate(int64_t) const override;
358 
359  bool shouldConsiderGEPOffsetSplit() const override;
360 
361  EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
362  bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
363  const AttributeList &FuncAttributes) const override;
364 
365  LLT getOptimalMemOpLLT(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
366  bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
367  const AttributeList &FuncAttributes) const override;
368 
369  /// Return true if the addressing mode represented by AM is legal for this
370  /// target, for a load/store of the specified type.
371  bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
372  unsigned AS,
373  Instruction *I = nullptr) const override;
374 
375  /// Return the cost of the scaling factor used in the addressing
376  /// mode represented by AM for this target, for a load/store
377  /// of the specified type.
378  /// If the AM is supported, the return value must be >= 0.
379  /// If the AM is not supported, it returns a negative value.
380  int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
381  unsigned AS) const override;
382 
383  /// Return true if an FMA operation is faster than a pair of fmul and fadd
384  /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
385  /// returns true, otherwise fmuladd is expanded to fmul + fadd.
386  bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
387 
388  const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
389 
390  /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
391  bool isDesirableToCommuteWithShift(const SDNode *N,
392  CombineLevel Level) const override;
393 
394  /// Returns true if it is beneficial to convert a load of a constant
395  /// to just the constant itself.
396  bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
397  Type *Ty) const override;
398 
399  /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
400  /// with this index.
401  bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
402  unsigned Index) const override;
403 
404  Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
405  AtomicOrdering Ord) const override;
406  Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
407  Value *Addr, AtomicOrdering Ord) const override;
408 
409  void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
410 
412  shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
413  bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
415  shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
416 
418  shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
419 
420  bool useLoadStackGuardNode() const override;
422  getPreferredVectorAction(MVT VT) const override;
423 
424  /// If the target has a standard location for the stack protector cookie,
425  /// returns the address of that location. Otherwise, returns nullptr.
426  Value *getIRStackGuard(IRBuilder<> &IRB) const override;
427 
428  void insertSSPDeclarations(Module &M) const override;
429  Value *getSDagStackGuard(const Module &M) const override;
430  Function *getSSPStackGuardCheck(const Module &M) const override;
431 
432  /// If the target has a standard location for the unsafe stack pointer,
433  /// returns the address of that location. Otherwise, returns nullptr.
434  Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
435 
436  /// If a physical register, this returns the register that receives the
437  /// exception address on entry to an EH pad.
438  unsigned
439  getExceptionPointerRegister(const Constant *PersonalityFn) const override {
440  // FIXME: This is a guess. Has this been defined yet?
441  return AArch64::X0;
442  }
443 
444  /// If a physical register, this returns the register that receives the
445  /// exception typeid on entry to a landing pad.
446  unsigned
447  getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
448  // FIXME: This is a guess. Has this been defined yet?
449  return AArch64::X1;
450  }
451 
452  bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
453 
454  bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
455  const SelectionDAG &DAG) const override {
456  // Do not merge to float value size (128 bytes) if no implicit
457  // float attribute is set.
458 
459  bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
460  Attribute::NoImplicitFloat);
461 
462  if (NoFloat)
463  return (MemVT.getSizeInBits() <= 64);
464  return true;
465  }
466 
467  bool isCheapToSpeculateCttz() const override {
468  return true;
469  }
470 
471  bool isCheapToSpeculateCtlz() const override {
472  return true;
473  }
474 
475  bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
476 
477  bool hasAndNotCompare(SDValue V) const override {
478  // We can use bics for any scalar.
479  return V.getValueType().isScalarInteger();
480  }
481 
482  bool hasAndNot(SDValue Y) const override {
483  EVT VT = Y.getValueType();
484 
485  if (!VT.isVector())
486  return hasAndNotCompare(Y);
487 
488  return VT.getSizeInBits() >= 64; // vector 'bic'
489  }
490 
491  bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
493  unsigned OldShiftOpcode, unsigned NewShiftOpcode,
494  SelectionDAG &DAG) const override;
495 
496  bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override {
498  return false;
499  return true;
500  }
501 
503  unsigned KeptBits) const override {
504  // For vectors, we don't have a preference..
505  if (XVT.isVector())
506  return false;
507 
508  auto VTIsOk = [](EVT VT) -> bool {
509  return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
510  VT == MVT::i64;
511  };
512 
513  // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
514  // XVT will be larger than KeptBitsVT.
515  MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
516  return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
517  }
518 
519  bool preferIncOfAddToSubOfNot(EVT VT) const override;
520 
521  bool hasBitPreservingFPLogic(EVT VT) const override {
522  // FIXME: Is this always true? It should be true for vectors at least.
523  return VT == MVT::f32 || VT == MVT::f64;
524  }
525 
526  bool supportSplitCSR(MachineFunction *MF) const override {
528  MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
529  }
530  void initializeSplitCSR(MachineBasicBlock *Entry) const override;
531  void insertCopiesSplitCSR(
532  MachineBasicBlock *Entry,
533  const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
534 
535  bool supportSwiftError() const override {
536  return true;
537  }
538 
539  /// Enable aggressive FMA fusion on targets that want it.
540  bool enableAggressiveFMAFusion(EVT VT) const override;
541 
542  /// Returns the size of the platform's va_list object.
543  unsigned getVaListSizeInBits(const DataLayout &DL) const override;
544 
545  /// Returns true if \p VecTy is a legal interleaved access type. This
546  /// function checks the vector element type and the overall width of the
547  /// vector.
548  bool isLegalInterleavedAccessType(VectorType *VecTy,
549  const DataLayout &DL) const;
550 
551  /// Returns the number of interleaved accesses that will be generated when
552  /// lowering accesses of the given type.
553  unsigned getNumInterleavedAccesses(VectorType *VecTy,
554  const DataLayout &DL) const;
555 
556  MachineMemOperand::Flags getMMOFlags(const Instruction &I) const override;
557 
558  bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
559  CallingConv::ID CallConv,
560  bool isVarArg) const override;
561  /// Used for exception handling on Win64.
562  bool needsFixedCatchObjects() const override;
563 private:
564  /// Keep a pointer to the AArch64Subtarget around so that we can
565  /// make the right decision when generating code for different targets.
566  const AArch64Subtarget *Subtarget;
567 
568  bool isExtFreeImpl(const Instruction *Ext) const override;
569 
570  void addTypeForNEON(MVT VT, MVT PromotedBitwiseVT);
571  void addDRTypeForNEON(MVT VT);
572  void addQRTypeForNEON(MVT VT);
573 
574  SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
575  bool isVarArg,
577  const SDLoc &DL, SelectionDAG &DAG,
578  SmallVectorImpl<SDValue> &InVals) const override;
579 
580  SDValue LowerCall(CallLoweringInfo & /*CLI*/,
581  SmallVectorImpl<SDValue> &InVals) const override;
582 
583  SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
584  CallingConv::ID CallConv, bool isVarArg,
586  const SDLoc &DL, SelectionDAG &DAG,
587  SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
588  SDValue ThisVal) const;
589 
590  SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
591 
592  SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
593 
594  bool isEligibleForTailCallOptimization(
595  SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
597  const SmallVectorImpl<SDValue> &OutVals,
598  const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
599 
600  /// Finds the incoming stack arguments which overlap the given fixed stack
601  /// object and incorporates their load into the current chain. This prevents
602  /// an upcoming store from clobbering the stack argument before it's used.
603  SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
604  MachineFrameInfo &MFI, int ClobberedFI) const;
605 
606  bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
607 
608  void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
609  SDValue &Chain) const;
610 
611  bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
612  bool isVarArg,
614  LLVMContext &Context) const override;
615 
616  SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
618  const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
619  SelectionDAG &DAG) const override;
620 
622  unsigned Flag) const;
624  unsigned Flag) const;
626  unsigned Flag) const;
628  unsigned Flag) const;
629  template <class NodeTy>
630  SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
631  template <class NodeTy>
632  SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
633  template <class NodeTy>
634  SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
635  template <class NodeTy>
636  SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
637  SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
638  SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
639  SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
640  SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
641  SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
642  SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
643  SelectionDAG &DAG) const;
644  SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
645  SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
646  SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
647  SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
648  SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
650  SDValue TVal, SDValue FVal, const SDLoc &dl,
651  SelectionDAG &DAG) const;
652  SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
653  SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
654  SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
655  SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
656  SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
657  SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
658  SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
659  SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
660  SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
661  SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
662  SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
663  SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
665  SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
666  SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
669  SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
672  SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
673  SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
674  SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
675  SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
676  SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
677  SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
678  RTLIB::Libcall Call) const;
679  SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
680  SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
681  SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
683  SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
684  SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
685  SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
687  SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
688  SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
689  SDValue LowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
690  SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const;
692  SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SDValue Chain,
693  SDValue &Size,
694  SelectionDAG &DAG) const;
695 
696  SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
697  SmallVectorImpl<SDNode *> &Created) const override;
698  SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
699  int &ExtraSteps, bool &UseOneConst,
700  bool Reciprocal) const override;
701  SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
702  int &ExtraSteps) const override;
703  unsigned combineRepeatedFPDivisors() const override;
704 
705  ConstraintType getConstraintType(StringRef Constraint) const override;
706  unsigned getRegisterByName(const char* RegName, EVT VT,
707  SelectionDAG &DAG) const override;
708 
709  /// Examine constraint string and operand type and determine a weight value.
710  /// The operand object must already have been set up with the operand type.
712  getSingleConstraintMatchWeight(AsmOperandInfo &info,
713  const char *constraint) const override;
714 
715  std::pair<unsigned, const TargetRegisterClass *>
716  getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
717  StringRef Constraint, MVT VT) const override;
718 
719  const char *LowerXConstraint(EVT ConstraintVT) const override;
720 
721  void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
722  std::vector<SDValue> &Ops,
723  SelectionDAG &DAG) const override;
724 
725  unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
726  if (ConstraintCode == "Q")
728  // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
729  // followed by llvm_unreachable so we'll leave them unimplemented in
730  // the backend for now.
731  return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
732  }
733 
734  bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
735  bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
736  bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
737  ISD::MemIndexedMode &AM, bool &IsInc,
738  SelectionDAG &DAG) const;
739  bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
741  SelectionDAG &DAG) const override;
742  bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
743  SDValue &Offset, ISD::MemIndexedMode &AM,
744  SelectionDAG &DAG) const override;
745 
746  void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
747  SelectionDAG &DAG) const override;
748 
749  bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
750 
751  void finalizeLowering(MachineFunction &MF) const override;
752 };
753 
754 namespace AArch64 {
756  const TargetLibraryInfo *libInfo);
757 } // end namespace AArch64
758 
759 } // end namespace llvm
760 
761 #endif
static MVT getIntegerVT(unsigned BitWidth)
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:913
unsigned getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
EVT getValueType() const
Return the ValueType of the referenced return value.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVMContext & Context
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
This class represents lattice values for constants.
Definition: AllocatorList.h:23
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:65
An instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:530
static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition: ValueTypes.h:145
This class represents a function call, abstracting a target machine&#39;s calling convention.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
Function Alias Analysis Results
This instruction constructs a fixed permutation of two input vectors.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:323
unsigned const TargetRegisterInfo * TRI
An instruction for reading from memory.
Definition: Instructions.h:167
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:693
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
bool hasAndNotCompare(SDValue V) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) != Y —> (~X & Y) ...
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:779
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
AtomicOrdering
Atomic ordering for LLVM&#39;s memory model.
This is a fast-path instruction selection class that generates poor code and doesn&#39;t support illegal ...
Definition: FastISel.h:66
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:291
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:410
static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG)
This contains information for each constraint that we are lowering.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
An instruction for storing to memory.
Definition: Instructions.h:320
Natural vector cast.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:1012
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
unsigned getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition: MCRegister.h:19
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:131
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
Machine Value Type.
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
This is an important base class in LLVM.
Definition: Constant.h:41
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:987
CombineLevel
Definition: DAGCombine.h:15
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
lazy value info
Extended Value Type.
Definition: ValueTypes.h:33
static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This structure contains all information that is necessary for lowering calls.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:40
static Value * LowerCTPOP(LLVMContext &Context, Value *V, Instruction *IP)
Emit the code to lower ctpop of V before the specified instruction IP.
CCState - This class holds information needed while lowering arguments and return values...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:212
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:221
Provides information about what library functions are available for the current target.
AddressSpace
Definition: NVPTXBaseInfo.h:21
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
Definition: ISDOpcodes.h:920
Represents one node in the SelectionDAG.
static bool Enabled
Definition: Statistic.cpp:50
const Function & getFunction() const
Return the LLVM function that this machine code represents.
bool hasBitPreservingFPLogic(EVT VT) const override
Return true if it is safe to transform an integer-domain bitwise operation into the equivalent floati...
Class to represent vector types.
Definition: DerivedTypes.h:427
Class for arbitrary precision integers.
Definition: APInt.h:69
bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, const SelectionDAG &DAG) const override
Returns if it&#39;s reasonable to merge stores to MemVT size.
amdgpu Simplify well known AMD library false FunctionCallee Callee
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
Flags
Flags values. These may be or&#39;d together.
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
Representation of each machine instruction.
Definition: MachineInstr.h:64
static unsigned getScalingFactorCost(const TargetTransformInfo &TTI, const LSRUse &LU, const Formula &F, const Loop &L)
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:150
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:619
uint32_t Size
Definition: Profile.cpp:46
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition: ISDOpcodes.h:174
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override
Return true if SHIFT instructions should be expanded to SHIFT_PARTS instructions, and false if a libr...
LLVM Value Representation.
Definition: Value.h:73
static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG)
bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const override
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:65
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
bool isCheapToSpeculateCttz() const override
Return true if it is cheap to speculate a call to intrinsic cttz.
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:498
bool isCheapToSpeculateCtlz() const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
This file describes how to lower LLVM code to machine code.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:950