LLVM 18.0.0git
AArch64ISelLowering.h
Go to the documentation of this file.
1//==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that AArch64 uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
15#define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16
17#include "AArch64.h"
23#include "llvm/IR/CallingConv.h"
24#include "llvm/IR/Instruction.h"
25
26namespace llvm {
27
28namespace AArch64ISD {
29
30// For predicated nodes where the result is a vector, the operation is
31// controlled by a governing predicate and the inactive lanes are explicitly
32// defined with a value, please stick the following naming convention:
33//
34// _MERGE_OP<n> The result value is a vector with inactive lanes equal
35// to source operand OP<n>.
36//
37// _MERGE_ZERO The result value is a vector with inactive lanes
38// actively zeroed.
39//
40// _MERGE_PASSTHRU The result value is a vector with inactive lanes equal
41// to the last source operand which only purpose is being
42// a passthru value.
43//
44// For other cases where no explicit action is needed to set the inactive lanes,
45// or when the result is not a vector and it is needed or helpful to
46// distinguish a node from similar unpredicated nodes, use:
47//
48// _PRED
49//
50enum NodeType : unsigned {
52 WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
53 CALL, // Function call.
54
55 // Pseudo for a OBJC call that gets emitted together with a special `mov
56 // x29, x29` marker instruction.
58
59 CALL_BTI, // Function call followed by a BTI instruction.
60
64
65 // Produces the full sequence of instructions for getting the thread pointer
66 // offset of a variable into X0, using the TLSDesc model.
68 ADRP, // Page address of a TargetGlobalAddress operand.
69 ADR, // ADR
70 ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
71 LOADgot, // Load from automatically generated descriptor (e.g. Global
72 // Offset Table, TLS record).
73 RET_GLUE, // Return with a glue operand. Operand 0 is the chain operand.
74 BRCOND, // Conditional branch instruction; "b.cond".
76 CSINV, // Conditional select invert.
77 CSNEG, // Conditional select negate.
78 CSINC, // Conditional select increment.
79
80 // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
81 // ELF.
84 SBC, // adc, sbc instructions
85
86 // Predicated instructions where inactive lanes produce undefined results.
114
115 // Unpredicated vector instructions
117
119
120 // Predicated instructions with the result of inactive lanes provided by the
121 // last operand.
143
145
146 // Arithmetic instructions which write flags.
152
153 // Conditional compares. Operands: left,right,falsecc,cc,flags
157
158 // Floating point comparison
160
161 // Scalar-to-vector duplication
168
169 // Vector immedate moves
177
178 // Vector immediate ops
181
182 // Vector bitwise select: similar to ISD::VSELECT but not all bits within an
183 // element must be identical.
185
186 // Vector shuffles
198
199 // Vector shift by scalar
203
204 // Vector shift by scalar (again)
210
211 // Vector narrowing shift by immediate (bottom)
213
214 // Vector shift by constant and insert
217
218 // Vector comparisons
227
228 // Vector zero comparisons
239
240 // Vector across-lanes addition
241 // Only the lower result lane is defined.
244
245 // Unsigned sum Long across Vector
247
248 // Add Pairwise of two vectors
250 // Add Long Pairwise
253
254 // udot/sdot instructions
257
258 // Vector across-lanes min/max
259 // Only the lower result lane is defined.
264
274
275 // Vector bitwise insertion
277
278 // Compare-and-branch
283
284 // Tail calls
286
287 // Custom prefetch handling
289
290 // {s|u}int to FP within a FP register.
293
294 /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
295 /// world w.r.t vectors; which causes additional REV instructions to be
296 /// generated to compensate for the byte-swapping. But sometimes we do
297 /// need to re-interpret the data in SIMD vector registers in big-endian
298 /// mode without emitting such REV instructions.
300
301 MRS, // MRS, also sets the flags via a glue.
302
305
307
308 // Reciprocal estimates and steps.
313
318
324
325 // Floating-point reductions.
332
337
339
348
349 // Cast between vectors of the same element type but differ in length.
351
352 // Nodes to build an LD64B / ST64B 64-bit quantity out of i64, and vice versa
355
364
365 // Structured loads.
369
370 // Unsigned gather loads.
380
381 // Signed gather loads
389
390 // Unsigned gather loads.
398
399 // Signed gather loads.
407
408 // Non-temporal gather loads
412
413 // Contiguous masked store.
415
416 // Scatter store
426
427 // Non-temporal scatter store
430
431 // SME
434
435 // Asserts that a function argument (i32) is zero-extended to i8 by
436 // the caller
438
439 // 128-bit system register accesses
440 // lo64, hi64, chain = MRRS(chain, sysregname)
442 // chain = MSRR(chain, sysregname, lo64, hi64)
444
445 // Strict (exception-raising) floating point comparison
448
449 // SME ZA loads and stores
452
453 // NEON Load/Store with post-increment base updates
477
482
489
490 // Memory Operations
495};
496
497} // end namespace AArch64ISD
498
499namespace AArch64 {
500/// Possible values of current rounding mode, which is specified in bits
501/// 23:22 of FPCR.
503 RN = 0, // Round to Nearest
504 RP = 1, // Round towards Plus infinity
505 RM = 2, // Round towards Minus infinity
506 RZ = 3, // Round towards Zero
507 rmMask = 3 // Bit mask selecting rounding mode
509
510// Bit position of rounding mode bits in FPCR.
511const unsigned RoundingBitsPos = 22;
512
513// Registers used to pass function arguments.
516
517/// Maximum allowed number of unprobed bytes above SP at an ABI
518/// boundary.
519const unsigned StackProbeMaxUnprobedStack = 1024;
520
521/// Maximum number of iterations to unroll for a constant size probing loop.
522const unsigned StackProbeMaxLoopUnroll = 4;
523
524} // namespace AArch64
525
526class AArch64Subtarget;
527
529public:
530 explicit AArch64TargetLowering(const TargetMachine &TM,
531 const AArch64Subtarget &STI);
532
533 /// Control the following reassociation of operands: (op (op x, c1), y) -> (op
534 /// (op x, y), c1) where N0 is (op x, c1) and N1 is y.
536 SDValue N1) const override;
537
538 /// Selects the correct CCAssignFn for a given CallingConvention value.
539 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
540
541 /// Selects the correct CCAssignFn for a given CallingConvention value.
543
544 /// Determine which of the bits specified in Mask are known to be either zero
545 /// or one and return them in the KnownZero/KnownOne bitsets.
547 const APInt &DemandedElts,
548 const SelectionDAG &DAG,
549 unsigned Depth = 0) const override;
550
552 const APInt &DemandedElts,
553 const SelectionDAG &DAG,
554 unsigned Depth) const override;
555
556 MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const override {
557 // Returning i64 unconditionally here (i.e. even for ILP32) means that the
558 // *DAG* representation of pointers will always be 64-bits. They will be
559 // truncated and extended when transferred to memory, but the 64-bit DAG
560 // allows us to use AArch64's addressing modes much more easily.
561 return MVT::getIntegerVT(64);
562 }
563
565 const APInt &DemandedElts,
566 TargetLoweringOpt &TLO) const override;
567
568 MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
569
570 /// Returns true if the target allows unaligned memory accesses of the
571 /// specified type.
573 EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1),
575 unsigned *Fast = nullptr) const override;
576 /// LLT variant.
577 bool allowsMisalignedMemoryAccesses(LLT Ty, unsigned AddrSpace,
578 Align Alignment,
580 unsigned *Fast = nullptr) const override;
581
582 /// Provide custom lowering hooks for some operations.
583 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
584
585 const char *getTargetNodeName(unsigned Opcode) const override;
586
587 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
588
589 /// This method returns a target specific FastISel object, or null if the
590 /// target does not support "fast" ISel.
592 const TargetLibraryInfo *libInfo) const override;
593
594 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
595
596 bool isFPImmLegal(const APFloat &Imm, EVT VT,
597 bool ForCodeSize) const override;
598
599 /// Return true if the given shuffle mask can be codegen'd directly, or if it
600 /// should be stack expanded.
601 bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
602
603 /// Similar to isShuffleMaskLegal. Return true is the given 'select with zero'
604 /// shuffle mask can be codegen'd directly.
605 bool isVectorClearMaskLegal(ArrayRef<int> M, EVT VT) const override;
606
607 /// Return the ISD::SETCC ValueType.
609 EVT VT) const override;
610
612
614 MachineBasicBlock *BB) const;
615
617 MachineBasicBlock *BB) const;
618
619 MachineBasicBlock *EmitTileLoad(unsigned Opc, unsigned BaseReg,
621 MachineBasicBlock *BB) const;
623 MachineBasicBlock *EmitZAInstr(unsigned Opc, unsigned BaseReg,
625 bool HasTile) const;
627
630 MachineBasicBlock *MBB) const override;
631
632 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
633 MachineFunction &MF,
634 unsigned Intrinsic) const override;
635
637 EVT NewVT) const override;
638
639 bool shouldRemoveRedundantExtend(SDValue Op) const override;
640
641 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
642 bool isTruncateFree(EVT VT1, EVT VT2) const override;
643
644 bool isProfitableToHoist(Instruction *I) const override;
645
646 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
647 bool isZExtFree(EVT VT1, EVT VT2) const override;
648 bool isZExtFree(SDValue Val, EVT VT2) const override;
649
651 SmallVectorImpl<Use *> &Ops) const override;
652
654 Instruction *I, Loop *L, const TargetTransformInfo &TTI) const override;
655
656 bool hasPairedLoad(EVT LoadedType, Align &RequiredAligment) const override;
657
658 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
659
662 ArrayRef<unsigned> Indices,
663 unsigned Factor) const override;
665 unsigned Factor) const override;
666
668 LoadInst *LI) const override;
669
671 StoreInst *SI) const override;
672
673 bool isLegalAddImmediate(int64_t) const override;
674 bool isLegalICmpImmediate(int64_t) const override;
675
677 SDValue ConstNode) const override;
678
679 bool shouldConsiderGEPOffsetSplit() const override;
680
682 const AttributeList &FuncAttributes) const override;
683
685 const AttributeList &FuncAttributes) const override;
686
687 /// Return true if the addressing mode represented by AM is legal for this
688 /// target, for a load/store of the specified type.
689 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
690 unsigned AS,
691 Instruction *I = nullptr) const override;
692
693 /// Return true if an FMA operation is faster than a pair of fmul and fadd
694 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
695 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
697 EVT VT) const override;
698 bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *Ty) const override;
699
701 CodeGenOptLevel OptLevel) const override;
702
703 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
705
706 /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
708 CombineLevel Level) const override;
709
710 bool isDesirableToPullExtFromShl(const MachineInstr &MI) const override {
711 return false;
712 }
713
714 /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
715 bool isDesirableToCommuteXorWithShift(const SDNode *N) const override;
716
717 /// Return true if it is profitable to fold a pair of shifts into a mask.
719 CombineLevel Level) const override;
720
721 bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
722 EVT VT) const override;
723
724 /// Returns true if it is beneficial to convert a load of a constant
725 /// to just the constant itself.
727 Type *Ty) const override;
728
729 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
730 /// with this index.
731 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
732 unsigned Index) const override;
733
734 bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
735 bool MathUsed) const override {
736 // Using overflow ops for overflow checks only should beneficial on
737 // AArch64.
739 }
740
741 Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr,
742 AtomicOrdering Ord) const override;
744 AtomicOrdering Ord) const override;
745
746 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override;
747
748 bool isOpSuitableForLDPSTP(const Instruction *I) const;
749 bool isOpSuitableForLSE128(const Instruction *I) const;
750 bool isOpSuitableForRCPC3(const Instruction *I) const;
751 bool shouldInsertFencesForAtomic(const Instruction *I) const override;
752 bool
754
756 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
758 shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
760 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
761
764
765 bool useLoadStackGuardNode() const override;
767 getPreferredVectorAction(MVT VT) const override;
768
769 /// If the target has a standard location for the stack protector cookie,
770 /// returns the address of that location. Otherwise, returns nullptr.
771 Value *getIRStackGuard(IRBuilderBase &IRB) const override;
772
773 void insertSSPDeclarations(Module &M) const override;
774 Value *getSDagStackGuard(const Module &M) const override;
775 Function *getSSPStackGuardCheck(const Module &M) const override;
776
777 /// If the target has a standard location for the unsafe stack pointer,
778 /// returns the address of that location. Otherwise, returns nullptr.
779 Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const override;
780
781 /// If a physical register, this returns the register that receives the
782 /// exception address on entry to an EH pad.
784 getExceptionPointerRegister(const Constant *PersonalityFn) const override {
785 // FIXME: This is a guess. Has this been defined yet?
786 return AArch64::X0;
787 }
788
789 /// If a physical register, this returns the register that receives the
790 /// exception typeid on entry to a landing pad.
792 getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
793 // FIXME: This is a guess. Has this been defined yet?
794 return AArch64::X1;
795 }
796
797 bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
798
799 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
800 const MachineFunction &MF) const override {
801 // Do not merge to float value size (128 bytes) if no implicit
802 // float attribute is set.
803
804 bool NoFloat = MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat);
805
806 if (NoFloat)
807 return (MemVT.getSizeInBits() <= 64);
808 return true;
809 }
810
811 bool isCheapToSpeculateCttz(Type *) const override {
812 return true;
813 }
814
815 bool isCheapToSpeculateCtlz(Type *) const override {
816 return true;
817 }
818
819 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
820
821 bool hasAndNotCompare(SDValue V) const override {
822 // We can use bics for any scalar.
823 return V.getValueType().isScalarInteger();
824 }
825
826 bool hasAndNot(SDValue Y) const override {
827 EVT VT = Y.getValueType();
828
829 if (!VT.isVector())
830 return hasAndNotCompare(Y);
831
832 TypeSize TS = VT.getSizeInBits();
833 // TODO: We should be able to use bic/bif too for SVE.
834 return !TS.isScalable() && TS.getFixedValue() >= 64; // vector 'bic'
835 }
836
839 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
840 SelectionDAG &DAG) const override;
841
844 unsigned ExpansionFactor) const override;
845
847 unsigned KeptBits) const override {
848 // For vectors, we don't have a preference..
849 if (XVT.isVector())
850 return false;
851
852 auto VTIsOk = [](EVT VT) -> bool {
853 return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
854 VT == MVT::i64;
855 };
856
857 // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
858 // XVT will be larger than KeptBitsVT.
859 MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
860 return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
861 }
862
863 bool preferIncOfAddToSubOfNot(EVT VT) const override;
864
865 bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
866
867 bool isComplexDeinterleavingSupported() const override;
869 ComplexDeinterleavingOperation Operation, Type *Ty) const override;
870
873 ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
874 Value *Accumulator = nullptr) const override;
875
876 bool supportSplitCSR(MachineFunction *MF) const override {
878 MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
879 }
880 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
882 MachineBasicBlock *Entry,
883 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
884
885 bool supportSwiftError() const override {
886 return true;
887 }
888
889 bool supportKCFIBundles() const override { return true; }
890
893 const TargetInstrInfo *TII) const override;
894
895 /// Enable aggressive FMA fusion on targets that want it.
896 bool enableAggressiveFMAFusion(EVT VT) const override;
897
898 /// Returns the size of the platform's va_list object.
899 unsigned getVaListSizeInBits(const DataLayout &DL) const override;
900
901 /// Returns true if \p VecTy is a legal interleaved access type. This
902 /// function checks the vector element type and the overall width of the
903 /// vector.
905 bool &UseScalable) const;
906
907 /// Returns the number of interleaved accesses that will be generated when
908 /// lowering accesses of the given type.
909 unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL,
910 bool UseScalable) const;
911
913 const Instruction &I) const override;
914
916 Type *Ty, CallingConv::ID CallConv, bool isVarArg,
917 const DataLayout &DL) const override;
918
919 /// Used for exception handling on Win64.
920 bool needsFixedCatchObjects() const override;
921
922 bool fallBackToDAGISel(const Instruction &Inst) const override;
923
924 /// SVE code generation for fixed length vectors does not custom lower
925 /// BUILD_VECTOR. This makes BUILD_VECTOR legalisation a source of stores to
926 /// merge. However, merging them creates a BUILD_VECTOR that is just as
927 /// illegal as the original, thus leading to an infinite legalisation loop.
928 /// NOTE: Once BUILD_VECTOR is legal or can be custom lowered for all legal
929 /// vector types this override can be removed.
930 bool mergeStoresAfterLegalization(EVT VT) const override;
931
932 // If the platform/function should have a redzone, return the size in bytes.
933 unsigned getRedZoneSize(const Function &F) const {
934 if (F.hasFnAttribute(Attribute::NoRedZone))
935 return 0;
936 return 128;
937 }
938
939 bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) const;
941
943 bool AllowUnknown = false) const override;
944
945 bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override;
946
947 bool shouldExpandCttzElements(EVT VT) const override;
948
949 /// If a change in streaming mode is required on entry to/return from a
950 /// function call it emits and returns the corresponding SMSTART or SMSTOP node.
951 /// \p Entry tells whether this is before/after the Call, which is necessary
952 /// because PSTATE.SM is only queried once.
954 SDValue Chain, SDValue InGlue,
955 SDValue PStateSM, bool Entry) const;
956
957 bool isVScaleKnownToBeAPowerOfTwo() const override { return true; }
958
959 // Normally SVE is only used for byte size vectors that do not fit within a
960 // NEON vector. This changes when OverrideNEON is true, allowing SVE to be
961 // used for 64bit and 128bit vectors as well.
962 bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON = false) const;
963
964 // Follow NEON ABI rules even when using SVE for fixed length vectors.
966 EVT VT) const override;
969 EVT VT) const override;
972 EVT &IntermediateVT,
973 unsigned &NumIntermediates,
974 MVT &RegisterVT) const override;
975
976 /// True if stack clash protection is enabled for this functions.
977 bool hasInlineStackProbe(const MachineFunction &MF) const override;
978
979private:
980 /// Keep a pointer to the AArch64Subtarget around so that we can
981 /// make the right decision when generating code for different targets.
982 const AArch64Subtarget *Subtarget;
983
984 bool isExtFreeImpl(const Instruction *Ext) const override;
985
986 void addTypeForNEON(MVT VT);
987 void addTypeForFixedLengthSVE(MVT VT, bool StreamingSVE);
988 void addDRTypeForNEON(MVT VT);
989 void addQRTypeForNEON(MVT VT);
990
991 unsigned allocateLazySaveBuffer(SDValue &Chain, const SDLoc &DL,
992 SelectionDAG &DAG) const;
993
994 SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
995 bool isVarArg,
997 const SDLoc &DL, SelectionDAG &DAG,
998 SmallVectorImpl<SDValue> &InVals) const override;
999
1000 SDValue LowerCall(CallLoweringInfo & /*CLI*/,
1001 SmallVectorImpl<SDValue> &InVals) const override;
1002
1003 SDValue LowerCallResult(SDValue Chain, SDValue InGlue,
1004 CallingConv::ID CallConv, bool isVarArg,
1005 const SmallVectorImpl<CCValAssign> &RVLocs,
1006 const SDLoc &DL, SelectionDAG &DAG,
1007 SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
1008 SDValue ThisVal) const;
1009
1010 SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
1011 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
1012 SDValue LowerStore128(SDValue Op, SelectionDAG &DAG) const;
1013 SDValue LowerABS(SDValue Op, SelectionDAG &DAG) const;
1014
1015 SDValue LowerMGATHER(SDValue Op, SelectionDAG &DAG) const;
1016 SDValue LowerMSCATTER(SDValue Op, SelectionDAG &DAG) const;
1017
1018 SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) const;
1019
1020 SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
1021 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
1022 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
1023
1024 bool
1025 isEligibleForTailCallOptimization(const CallLoweringInfo &CLI) const;
1026
1027 /// Finds the incoming stack arguments which overlap the given fixed stack
1028 /// object and incorporates their load into the current chain. This prevents
1029 /// an upcoming store from clobbering the stack argument before it's used.
1030 SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
1031 MachineFrameInfo &MFI, int ClobberedFI) const;
1032
1033 bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
1034
1035 void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
1036 SDValue &Chain) const;
1037
1038 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1039 bool isVarArg,
1041 LLVMContext &Context) const override;
1042
1043 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1045 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
1046 SelectionDAG &DAG) const override;
1047
1048 SDValue getTargetNode(GlobalAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
1049 unsigned Flag) const;
1050 SDValue getTargetNode(JumpTableSDNode *N, EVT Ty, SelectionDAG &DAG,
1051 unsigned Flag) const;
1052 SDValue getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG,
1053 unsigned Flag) const;
1054 SDValue getTargetNode(BlockAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
1055 unsigned Flag) const;
1056 template <class NodeTy>
1057 SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
1058 template <class NodeTy>
1059 SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
1060 template <class NodeTy>
1061 SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
1062 template <class NodeTy>
1063 SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
1064 SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
1065 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
1066 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
1067 SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
1068 SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
1069 SDValue LowerELFTLSLocalExec(const GlobalValue *GV, SDValue ThreadBase,
1070 const SDLoc &DL, SelectionDAG &DAG) const;
1071 SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
1072 SelectionDAG &DAG) const;
1073 SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
1074 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
1075 SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const;
1076 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
1077 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
1078 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
1079 SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
1080 SDValue TVal, SDValue FVal, const SDLoc &dl,
1081 SelectionDAG &DAG) const;
1082 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
1083 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
1084 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
1085 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
1086 SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
1087 SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
1088 SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
1089 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
1090 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
1091 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
1092 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
1093 SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
1094 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
1095 SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
1096 SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
1097 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
1098 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
1099 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
1100 SDValue LowerZERO_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const;
1101 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
1102 SDValue LowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG) const;
1103 SDValue LowerDUPQLane(SDValue Op, SelectionDAG &DAG) const;
1104 SDValue LowerToPredicatedOp(SDValue Op, SelectionDAG &DAG,
1105 unsigned NewOp) const;
1106 SDValue LowerToScalableOp(SDValue Op, SelectionDAG &DAG) const;
1107 SDValue LowerVECTOR_SPLICE(SDValue Op, SelectionDAG &DAG) const;
1108 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
1109 SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
1110 SDValue LowerVECTOR_DEINTERLEAVE(SDValue Op, SelectionDAG &DAG) const;
1111 SDValue LowerVECTOR_INTERLEAVE(SDValue Op, SelectionDAG &DAG) const;
1112 SDValue LowerDIV(SDValue Op, SelectionDAG &DAG) const;
1113 SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
1114 SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
1115 SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) const;
1116 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
1117 SDValue LowerCTPOP_PARITY(SDValue Op, SelectionDAG &DAG) const;
1118 SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const;
1119 SDValue LowerBitreverse(SDValue Op, SelectionDAG &DAG) const;
1120 SDValue LowerMinMax(SDValue Op, SelectionDAG &DAG) const;
1121 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
1122 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
1123 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
1124 SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
1125 SDValue LowerVectorFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const;
1126 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
1127 SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const;
1128 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
1129 SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
1130 SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
1131 SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) const;
1132 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
1133 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
1134 SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
1135 SDValue LowerVSCALE(SDValue Op, SelectionDAG &DAG) const;
1136 SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
1137 SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
1138 SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const;
1139 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
1140 SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SDValue Chain,
1141 SDValue &Size,
1142 SelectionDAG &DAG) const;
1143 SDValue LowerAVG(SDValue Op, SelectionDAG &DAG, unsigned NewOp) const;
1144
1145 SDValue LowerFixedLengthVectorIntDivideToSVE(SDValue Op,
1146 SelectionDAG &DAG) const;
1147 SDValue LowerFixedLengthVectorIntExtendToSVE(SDValue Op,
1148 SelectionDAG &DAG) const;
1149 SDValue LowerFixedLengthVectorLoadToSVE(SDValue Op, SelectionDAG &DAG) const;
1150 SDValue LowerFixedLengthVectorMLoadToSVE(SDValue Op, SelectionDAG &DAG) const;
1151 SDValue LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp, SelectionDAG &DAG) const;
1152 SDValue LowerPredReductionToSVE(SDValue ScalarOp, SelectionDAG &DAG) const;
1153 SDValue LowerReductionToSVE(unsigned Opcode, SDValue ScalarOp,
1154 SelectionDAG &DAG) const;
1155 SDValue LowerFixedLengthVectorSelectToSVE(SDValue Op, SelectionDAG &DAG) const;
1156 SDValue LowerFixedLengthVectorSetccToSVE(SDValue Op, SelectionDAG &DAG) const;
1157 SDValue LowerFixedLengthVectorStoreToSVE(SDValue Op, SelectionDAG &DAG) const;
1158 SDValue LowerFixedLengthVectorMStoreToSVE(SDValue Op,
1159 SelectionDAG &DAG) const;
1160 SDValue LowerFixedLengthVectorTruncateToSVE(SDValue Op,
1161 SelectionDAG &DAG) const;
1162 SDValue LowerFixedLengthExtractVectorElt(SDValue Op, SelectionDAG &DAG) const;
1163 SDValue LowerFixedLengthInsertVectorElt(SDValue Op, SelectionDAG &DAG) const;
1164 SDValue LowerFixedLengthBitcastToSVE(SDValue Op, SelectionDAG &DAG) const;
1165 SDValue LowerFixedLengthConcatVectorsToSVE(SDValue Op,
1166 SelectionDAG &DAG) const;
1167 SDValue LowerFixedLengthFPExtendToSVE(SDValue Op, SelectionDAG &DAG) const;
1168 SDValue LowerFixedLengthFPRoundToSVE(SDValue Op, SelectionDAG &DAG) const;
1169 SDValue LowerFixedLengthIntToFPToSVE(SDValue Op, SelectionDAG &DAG) const;
1170 SDValue LowerFixedLengthFPToIntToSVE(SDValue Op, SelectionDAG &DAG) const;
1171 SDValue LowerFixedLengthVECTOR_SHUFFLEToSVE(SDValue Op,
1172 SelectionDAG &DAG) const;
1173
1174 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
1175 SmallVectorImpl<SDNode *> &Created) const override;
1176 SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
1177 SmallVectorImpl<SDNode *> &Created) const override;
1178 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1179 int &ExtraSteps, bool &UseOneConst,
1180 bool Reciprocal) const override;
1181 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1182 int &ExtraSteps) const override;
1183 SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
1184 const DenormalMode &Mode) const override;
1185 SDValue getSqrtResultForDenormInput(SDValue Operand,
1186 SelectionDAG &DAG) const override;
1187 unsigned combineRepeatedFPDivisors() const override;
1188
1189 ConstraintType getConstraintType(StringRef Constraint) const override;
1190 Register getRegisterByName(const char* RegName, LLT VT,
1191 const MachineFunction &MF) const override;
1192
1193 /// Examine constraint string and operand type and determine a weight value.
1194 /// The operand object must already have been set up with the operand type.
1196 getSingleConstraintMatchWeight(AsmOperandInfo &info,
1197 const char *constraint) const override;
1198
1199 std::pair<unsigned, const TargetRegisterClass *>
1200 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1201 StringRef Constraint, MVT VT) const override;
1202
1203 const char *LowerXConstraint(EVT ConstraintVT) const override;
1204
1205 void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint,
1206 std::vector<SDValue> &Ops,
1207 SelectionDAG &DAG) const override;
1208
1210 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
1211 if (ConstraintCode == "Q")
1213 // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
1214 // followed by llvm_unreachable so we'll leave them unimplemented in
1215 // the backend for now.
1216 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
1217 }
1218
1219 /// Handle Lowering flag assembly outputs.
1220 SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag,
1221 const SDLoc &DL,
1222 const AsmOperandInfo &Constraint,
1223 SelectionDAG &DAG) const override;
1224
1225 bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const override;
1226 bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const override;
1227 bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
1228 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
1229 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
1230 bool getIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
1231 SDValue &Offset, SelectionDAG &DAG) const;
1232 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
1234 SelectionDAG &DAG) const override;
1235 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
1236 SDValue &Offset, ISD::MemIndexedMode &AM,
1237 SelectionDAG &DAG) const override;
1238 bool isIndexingLegal(MachineInstr &MI, Register Base, Register Offset,
1239 bool IsPre, MachineRegisterInfo &MRI) const override;
1240
1241 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
1242 SelectionDAG &DAG) const override;
1243 void ReplaceBITCASTResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
1244 SelectionDAG &DAG) const;
1245 void ReplaceExtractSubVectorResults(SDNode *N,
1246 SmallVectorImpl<SDValue> &Results,
1247 SelectionDAG &DAG) const;
1248
1249 bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
1250
1251 void finalizeLowering(MachineFunction &MF) const override;
1252
1253 bool shouldLocalize(const MachineInstr &MI,
1254 const TargetTransformInfo *TTI) const override;
1255
1256 bool SimplifyDemandedBitsForTargetNode(SDValue Op,
1257 const APInt &OriginalDemandedBits,
1258 const APInt &OriginalDemandedElts,
1259 KnownBits &Known,
1260 TargetLoweringOpt &TLO,
1261 unsigned Depth) const override;
1262
1263 bool isTargetCanonicalConstantNode(SDValue Op) const override;
1264
1265 // With the exception of data-predicate transitions, no instructions are
1266 // required to cast between legal scalable vector types. However:
1267 // 1. Packed and unpacked types have different bit lengths, meaning BITCAST
1268 // is not universally useable.
1269 // 2. Most unpacked integer types are not legal and thus integer extends
1270 // cannot be used to convert between unpacked and packed types.
1271 // These can make "bitcasting" a multiphase process. REINTERPRET_CAST is used
1272 // to transition between unpacked and packed types of the same element type,
1273 // with BITCAST used otherwise.
1274 // This function does not handle predicate bitcasts.
1275 SDValue getSVESafeBitCast(EVT VT, SDValue Op, SelectionDAG &DAG) const;
1276
1277 // Returns the runtime value for PSTATE.SM. When the function is streaming-
1278 // compatible, this generates a call to __arm_sme_state.
1279 SDValue getPStateSM(SelectionDAG &DAG, SDValue Chain, SMEAttrs Attrs,
1280 SDLoc DL, EVT VT) const;
1281
1282 bool preferScalarizeSplat(SDNode *N) const override;
1283
1284 unsigned getMinimumJumpTableEntries() const override;
1285};
1286
1287namespace AArch64 {
1288FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
1289 const TargetLibraryInfo *libInfo);
1290} // end namespace AArch64
1291
1292} // end namespace llvm
1293
1294#endif
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
uint64_t Addr
uint64_t Size
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define RegName(no)
lazy value info
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
PowerPC Reduce CR logical Operation
const char LLVMTargetMachineRef TM
static cl::opt< RegAllocEvictionAdvisorAnalysis::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Development, "development", "for training")))
This file describes how to lower LLVM code to machine code.
Value * RHS
Value * LHS
static constexpr uint32_t Opcode
Definition: aarch32.h:200
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT) const override
Return true if pulling a binary operation into a select with an identity constant is profitable.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void initializeSplitCSR(MachineBasicBlock *Entry) const override
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
SDValue changeStreamingMode(SelectionDAG &DAG, SDLoc DL, bool Enable, SDValue Chain, SDValue InGlue, SDValue PStateSM, bool Entry) const
If a change in streaming mode is required on entry to/return from a function call it emits and return...
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool hasAndNotCompare(SDValue V) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) !...
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
bool isShuffleMaskLegal(ArrayRef< int > M, EVT VT) const override
Return true if the given shuffle mask can be codegen'd directly, or if it should be stack expanded.
unsigned getVaListSizeInBits(const DataLayout &DL) const override
Returns the size of the platform's va_list object.
void insertCopiesSplitCSR(MachineBasicBlock *Entry, const SmallVectorImpl< MachineBasicBlock * > &Exits) const override
Insert explicit copies in entry and exit blocks.
bool shouldInsertTrailingFenceForAtomicStore(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert a trailing fence without reducing the ordering f...
bool shouldExpandCttzElements(EVT VT) const override
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
MachineBasicBlock * EmitTileLoad(unsigned Opc, unsigned BaseReg, MachineInstr &MI, MachineBasicBlock *BB) const
unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL, bool UseScalable) const
Returns the number of interleaved accesses that will be generated when lowering accesses of the given...
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
Provide custom lowering hooks for some operations.
bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override
Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT from min(max(fptoi)) satur...
bool isIntDivCheap(EVT VT, AttributeList Attr) const override
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool shouldRemoveRedundantExtend(SDValue Op) const override
Return true (the default) if it is profitable to remove a sext_inreg(x) where the sext is redundant,...
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC) const
Selects the correct CCAssignFn for a given CallingConvention value.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ISD::SETCC ValueType.
bool optimizeExtendOrTruncateConversion(Instruction *I, Loop *L, const TargetTransformInfo &TTI) const override
Try to optimize extending or truncating conversion instructions (like zext, trunc,...
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
This method returns a target specific FastISel object, or null if the target does not support "fast" ...
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const override
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand fl...
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
bool isLegalICmpImmediate(int64_t) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
bool preferIncOfAddToSubOfNot(EVT VT) const override
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
MachineBasicBlock * EmitZAInstr(unsigned Opc, unsigned BaseReg, MachineInstr &MI, MachineBasicBlock *BB, bool HasTile) const
ShiftLegalizationStrategy preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const override
bool isOpSuitableForLSE128(const Instruction *I) const
bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const override
Lower an interleaved load into a ldN intrinsic.
bool isVScaleKnownToBeAPowerOfTwo() const override
Return true only if vscale must be a power of two.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
bool shouldSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const override
Check if sinking I's operands to I's basic block is profitable, because the operands can be folded in...
bool fallBackToDAGISel(const Instruction &Inst) const override
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
Function * getSSPStackGuardCheck(const Module &M) const override
If the target has a standard stack protection check function that performs validation and error handl...
bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const override
Try to convert math with an overflow comparison into the corresponding DAG node operation.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Value * createComplexDeinterleavingIR(IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, Value *Accumulator=nullptr) const override
Create the IR node for the given complex deinterleaving operation.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Returns true if the target allows unaligned memory accesses of the specified type.
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
bool isLegalInterleavedAccessType(VectorType *VecTy, const DataLayout &DL, bool &UseScalable) const
Returns true if VecTy is a legal interleaved access type.
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const override
For some targets, an LLVM struct type must be broken down into multiple simple types,...
Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
MachineBasicBlock * EmitLoweredCatchRet(MachineInstr &MI, MachineBasicBlock *BB) const
bool isComplexDeinterleavingSupported() const override
Does this target support complex deinterleaving.
bool isZExtFree(Type *Ty1, Type *Ty2) const override
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const override
SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const
MachineBasicBlock * EmitZero(MachineInstr &MI, MachineBasicBlock *BB) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
bool useLoadStackGuardNode() const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const override
If the target has a standard location for the unsafe stack pointer, returns the address of that locat...
bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override
Return if the target supports combining a chain like:
bool isProfitableToHoist(Instruction *I) const override
Check if it is profitable to hoist instruction in then/else to if.
bool isOpSuitableForRCPC3(const Instruction *I) const
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const override
Return true if it is profitable to reduce a load to a smaller type.
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const override
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isCheapToSpeculateCttz(Type *) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override
Lower an interleaved store into a stN intrinsic.
unsigned getRedZoneSize(const Function &F) const
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
MachineBasicBlock * EmitFill(MachineInstr &MI, MachineBasicBlock *BB) const
bool isCheapToSpeculateCtlz(Type *) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
bool isReassocProfitable(SelectionDAG &DAG, SDValue N0, SDValue N1) const override
Control the following reassociation of operands: (op (op x, c1), y) -> (op (op x, y),...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
MachineBasicBlock * EmitF128CSEL(MachineInstr &MI, MachineBasicBlock *BB) const
LLT getOptimalMemOpLLT(const MemOp &Op, const AttributeList &FuncAttributes) const override
LLT returning variant.
bool isDesirableToPullExtFromShl(const MachineInstr &MI) const override
GlobalISel - return true if it's profitable to perform the combine: shl ([sza]ext x),...
bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const override
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>>...
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool needsFixedCatchObjects() const override
Used for exception handling on Win64.
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
bool lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI, LoadInst *LI) const override
Lower a deinterleave intrinsic to a target specific load intrinsic.
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
Value * getIRStackGuard(IRBuilderBase &IRB) const override
If the target has a standard location for the stack protector cookie, returns the address of that loc...
bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const override
bool generateFMAsInMachineCombiner(EVT VT, CodeGenOptLevel OptLevel) const override
bool isComplexDeinterleavingOperationSupported(ComplexDeinterleavingOperation Operation, Type *Ty) const override
Does this target support complex deinterleaving with the given operation and type.
bool hasPairedLoad(EVT LoadedType, Align &RequiredAligment) const override
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType...
bool isOpSuitableForLDPSTP(const Instruction *I) const
bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to fold a pair of shifts into a mask.
bool isLegalAddImmediate(int64_t) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool shouldConsiderGEPOffsetSplit() const override
bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const override
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
bool isVectorClearMaskLegal(ArrayRef< int > M, EVT VT) const override
Similar to isShuffleMaskLegal.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for this result type with this index.
ArrayRef< MCPhysReg > getRoundingControlRegisters() const override
Returns a 0 terminated array of rounding control registers that can be attached into strict FP call.
MachineInstr * EmitKCFICheck(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator &MBBI, const TargetInstrInfo *TII) const override
bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) const
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
This method can be implemented by targets that want to expose additional information about sign bits ...
bool isDesirableToCommuteXorWithShift(const SDNode *N) const override
Returns false if N is a bit extraction pattern of (X >> C) & Mask.
bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override
Returns false if N is a bit extraction pattern of (X >> C) & Mask.
bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, const MachineFunction &MF) const override
Returns if it's reasonable to merge stores to MemVT size.
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
bool lowerInterleaveIntrinsicToStore(IntrinsicInst *II, StoreInst *SI) const override
Lower an interleave intrinsic to a target specific store intrinsic.
bool enableAggressiveFMAFusion(EVT VT) const override
Enable aggressive FMA fusion on targets that want it.
Value * getSDagStackGuard(const Module &M) const override
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
bool supportKCFIBundles() const override
Return true if the target supports kcfi operand bundles.
bool isMulAddWithConstProfitable(SDValue AddNode, SDValue ConstNode) const override
Return true if it may be profitable to transform (mul (add x, c1), c2) -> (add (mul x,...
bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON=false) const
bool mergeStoresAfterLegalization(EVT VT) const override
SVE code generation for fixed length vectors does not custom lower BUILD_VECTOR.
Class for arbitrary precision integers.
Definition: APInt.h:76
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:521
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:726
CCState - This class holds information needed while lowering arguments and return values.
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:262
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:666
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:94
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:177
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:44
Machine Value Type.
static MVT getIntegerVT(unsigned BitWidth)
Instructions::iterator instr_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Function & getFunction()
Return the LLVM function that this machine code represents.
Representation of each machine instruction.
Definition: MachineInstr.h:68
Flags
Flags values. These may be or'd together.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:225
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
An instruction for storing to memory.
Definition: Instructions.h:301
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const
Try to convert math with an overflow comparison into the corresponding DAG node operation.
ShiftLegalizationStrategy
Return the preferred strategy to legalize tihs SHIFT instruction, with ExpansionFactor being the recu...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:78
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
Base class of all SIMD vector types.
Definition: DerivedTypes.h:403
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:189
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:173
@ NVCAST
Natural vector cast.
ArrayRef< MCPhysReg > getFPRArgRegs()
Rounding
Possible values of current rounding mode, which is specified in bits 23:22 of FPCR.
const unsigned StackProbeMaxLoopUnroll
Maximum number of iterations to unroll for a constant size probing loop.
const unsigned StackProbeMaxUnprobedStack
Maximum allowed number of unprobed bytes above SP at an ABI boundary.
const unsigned RoundingBitsPos
ArrayRef< MCPhysReg > getGPRArgRegs()
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
@ CXX_FAST_TLS
Used for access functions.
Definition: CallingConv.h:72
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:1383
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
Definition: ISDOpcodes.h:1395
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:1455
static const int FIRST_TARGET_STRICTFP_OPCODE
FIRST_TARGET_STRICTFP_OPCODE - Target-specific pre-isel operations which cannot raise FP exceptions s...
Definition: ISDOpcodes.h:1389
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1506
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:1486
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
AddressSpace
Definition: NVPTXBaseInfo.h:21
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:54
AtomicOrdering
Atomic ordering for LLVM's memory model.
TargetTransformInfo TTI
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
CombineLevel
Definition: DAGCombine.h:15
DWARFExpression::Operation Op
@ Enable
Enable colors.
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Represent subnormal handling kind for floating point instruction inputs and outputs.
Extended Value Type.
Definition: ValueTypes.h:34
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:351
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:160