LLVM  13.0.0git
Classes | Macros | Enumerations | Functions | Variables
AArch64ISelLowering.cpp File Reference
#include "AArch64ISelLowering.h"
#include "AArch64CallingConvention.h"
#include "AArch64ExpandImm.h"
#include "AArch64MachineFunctionInfo.h"
#include "AArch64PerfectShuffle.h"
#include "AArch64RegisterInfo.h"
#include "AArch64Subtarget.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "Utils/AArch64BaseInfo.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/ObjCARCUtil.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/TargetCallingConv.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/OperandTraits.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MachineValueType.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <algorithm>
#include <bitset>
#include <cassert>
#include <cctype>
#include <cstdint>
#include <cstdlib>
#include <iterator>
#include <limits>
#include <tuple>
#include <utility>
#include <vector>
#include "AArch64GenAsmMatcher.inc"

Go to the source code of this file.

Classes

struct  GenericSetCCInfo
 Helper structure to keep track of ISD::SET_CC operands. More...
 
struct  AArch64SetCCInfo
 Helper structure to keep track of a SET_CC lowered into AArch64 code. More...
 
union  SetCCInfo
 Helper structure to keep track of SetCC information. More...
 
struct  SetCCInfoAndKind
 Helper structure to be able to read SetCC information. More...
 

Macros

#define DEBUG_TYPE   "aarch64-lower"
 
#define LCALLNAMES(A, B, N)
 
#define LCALLNAME4(A, B)
 
#define LCALLNAME5(A, B)
 
#define MAKE_CASE(V)
 
#define GET_REGISTER_MATCHER
 

Enumerations

enum  PredicateConstraint { Upl, Upa, Invalid }
 

Functions

 STATISTIC (NumTailCalls, "Number of tail calls")
 
 STATISTIC (NumShiftInserts, "Number of vector shift inserts")
 
 STATISTIC (NumOptimizedImms, "Number of times immediates were optimized")
 
static EVT getPackedSVEVectorVT (EVT VT)
 
static EVT getPackedSVEVectorVT (ElementCount EC)
 
static EVT getPromotedVTForPredicate (EVT VT)
 
static bool isPackedVectorType (EVT VT, SelectionDAG &DAG)
 Returns true if VT's elements occupy the lowest bit positions of its associated register class without any intervening space. More...
 
static bool isMergePassthruOpcode (unsigned Opc)
 
static bool optimizeLogicalImm (SDValue Op, unsigned Size, uint64_t Imm, const APInt &Demanded, TargetLowering::TargetLoweringOpt &TLO, unsigned NewOpc)
 
static bool isZerosVector (const SDNode *N)
 isZerosVector - Check whether SDNode N is a zero-filled vector. More...
 
static AArch64CC::CondCode changeIntCCToAArch64CC (ISD::CondCode CC)
 changeIntCCToAArch64CC - Convert a DAG integer condition code to an AArch64 CC More...
 
static void changeFPCCToAArch64CC (ISD::CondCode CC, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2)
 changeFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC. More...
 
static void changeFPCCToANDAArch64CC (ISD::CondCode CC, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2)
 Convert a DAG fp condition code to an AArch64 CC. More...
 
static void changeVectorFPCCToAArch64CC (ISD::CondCode CC, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2, bool &Invert)
 changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC usable with the vector instructions. More...
 
static bool isLegalArithImmed (uint64_t C)
 
static bool isCMN (SDValue Op, ISD::CondCode CC)
 
static SDValue emitStrictFPComparison (SDValue LHS, SDValue RHS, const SDLoc &dl, SelectionDAG &DAG, SDValue Chain, bool IsSignaling)
 
static SDValue emitComparison (SDValue LHS, SDValue RHS, ISD::CondCode CC, const SDLoc &dl, SelectionDAG &DAG)
 
static unsigned getCmpOperandFoldingProfit (SDValue Op)
 Returns how profitable it is to fold a comparison's operand's shift and/or extension operations. More...
 
static SDValue getAArch64Cmp (SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue &AArch64cc, SelectionDAG &DAG, const SDLoc &dl)
 
static std::pair< SDValue, SDValuegetAArch64XALUOOp (AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG)
 
static SDValue LowerADDC_ADDE_SUBC_SUBE (SDValue Op, SelectionDAG &DAG)
 
static SDValue LowerXALUO (SDValue Op, SelectionDAG &DAG)
 
static SDValue LowerPREFETCH (SDValue Op, SelectionDAG &DAG)
 
static SDValue LowerBITCAST (SDValue Op, SelectionDAG &DAG)
 
static EVT getExtensionTo64Bits (const EVT &OrigVT)
 
static SDValue addRequiredExtensionForVectorMULL (SDValue N, SelectionDAG &DAG, const EVT &OrigTy, const EVT &ExtTy, unsigned ExtOpcode)
 
static bool isExtendedBUILD_VECTOR (SDNode *N, SelectionDAG &DAG, bool isSigned)
 
static SDValue skipExtensionForVectorMULL (SDNode *N, SelectionDAG &DAG)
 
static bool isSignExtended (SDNode *N, SelectionDAG &DAG)
 
static bool isZeroExtended (SDNode *N, SelectionDAG &DAG)
 
static bool isAddSubSExt (SDNode *N, SelectionDAG &DAG)
 
static bool isAddSubZExt (SDNode *N, SelectionDAG &DAG)
 
static SDValue getPTrue (SelectionDAG &DAG, SDLoc DL, EVT VT, int Pattern)
 
unsigned getGatherVecOpcode (bool IsScaled, bool IsSigned, bool NeedsExtend)
 
unsigned getScatterVecOpcode (bool IsScaled, bool IsSigned, bool NeedsExtend)
 
unsigned getSignExtendedGatherOpcode (unsigned Opcode)
 
bool getGatherScatterIndexIsExtended (SDValue Index)
 
void selectGatherScatterAddrMode (SDValue &BasePtr, SDValue &Index, EVT MemVT, unsigned &Opcode, bool IsGather, SelectionDAG &DAG)
 
static SDValue LowerTruncateVectorStore (SDLoc DL, StoreSDNode *ST, EVT VT, EVT MemVT, SelectionDAG &DAG)
 
static bool canGuaranteeTCO (CallingConv::ID CC)
 Return true if the calling convention is one that we can guarantee TCO for. More...
 
static bool mayTailCallThisCC (CallingConv::ID CC)
 Return true if we might ever do TCO for calls with this calling convention. More...
 
std::pair< SDValue, uint64_t > lookThroughSignExtension (SDValue Val)
 
static SDValue getEstimate (const AArch64Subtarget *ST, unsigned Opcode, SDValue Operand, SelectionDAG &DAG, int &ExtraSteps)
 
static PredicateConstraint parsePredicateConstraint (StringRef Constraint)
 
static SDValue WidenVector (SDValue V64Reg, SelectionDAG &DAG)
 WidenVector - Given a value in the V64 register class, produce the equivalent value in the V128 register class. More...
 
static unsigned getExtFactor (SDValue &V)
 getExtFactor - Determine the adjustment factor for the position when generating an "extract from vector registers" instruction. More...
 
static SDValue NarrowVector (SDValue V128Reg, SelectionDAG &DAG)
 NarrowVector - Given a value in the V128 register class, produce the equivalent value in the V64 register class. More...
 
static bool isSingletonEXTMask (ArrayRef< int > M, EVT VT, unsigned &Imm)
 
static bool isWideDUPMask (ArrayRef< int > M, EVT VT, unsigned BlockSize, unsigned &DupLaneOp)
 Check if a vector shuffle corresponds to a DUP instructions with a larger element width than the vector lane type. More...
 
static bool isEXTMask (ArrayRef< int > M, EVT VT, bool &ReverseEXT, unsigned &Imm)
 
static bool isREVMask (ArrayRef< int > M, EVT VT, unsigned BlockSize)
 isREVMask - Check if a vector shuffle corresponds to a REV instruction with the specified blocksize. More...
 
static bool isZIPMask (ArrayRef< int > M, EVT VT, unsigned &WhichResult)
 
static bool isUZPMask (ArrayRef< int > M, EVT VT, unsigned &WhichResult)
 
static bool isTRNMask (ArrayRef< int > M, EVT VT, unsigned &WhichResult)
 
static bool isZIP_v_undef_Mask (ArrayRef< int > M, EVT VT, unsigned &WhichResult)
 isZIP_v_undef_Mask - Special case of isZIPMask for canonical form of "vector_shuffle v, v", i.e., "vector_shuffle v, undef". More...
 
static bool isUZP_v_undef_Mask (ArrayRef< int > M, EVT VT, unsigned &WhichResult)
 isUZP_v_undef_Mask - Special case of isUZPMask for canonical form of "vector_shuffle v, v", i.e., "vector_shuffle v, undef". More...
 
static bool isTRN_v_undef_Mask (ArrayRef< int > M, EVT VT, unsigned &WhichResult)
 isTRN_v_undef_Mask - Special case of isTRNMask for canonical form of "vector_shuffle v, v", i.e., "vector_shuffle v, undef". More...
 
static bool isINSMask (ArrayRef< int > M, int NumInputElements, bool &DstIsLeft, int &Anomaly)
 
static bool isConcatMask (ArrayRef< int > Mask, EVT VT, bool SplitLHS)
 
static SDValue tryFormConcatFromShuffle (SDValue Op, SelectionDAG &DAG)
 
static SDValue GeneratePerfectShuffle (unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const SDLoc &dl)
 GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit the specified operations to build the shuffle. More...
 
static SDValue GenerateTBL (SDValue Op, ArrayRef< int > ShuffleMask, SelectionDAG &DAG)
 
static unsigned getDUPLANEOp (EVT EltType)
 
static SDValue constructDup (SDValue V, int Lane, SDLoc dl, EVT VT, unsigned Opcode, SelectionDAG &DAG)
 
static bool resolveBuildVector (BuildVectorSDNode *BVN, APInt &CnstBits, APInt &UndefBits)
 
static SDValue tryAdvSIMDModImm64 (unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits)
 
static SDValue tryAdvSIMDModImm32 (unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits, const SDValue *LHS=nullptr)
 
static SDValue tryAdvSIMDModImm16 (unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits, const SDValue *LHS=nullptr)
 
static SDValue tryAdvSIMDModImm321s (unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits)
 
static SDValue tryAdvSIMDModImm8 (unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits)
 
static SDValue tryAdvSIMDModImmFP (unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits)
 
static bool isAllConstantBuildVector (const SDValue &PotentialBVec, uint64_t &ConstVal)
 
static unsigned getIntrinsicID (const SDNode *N)
 
static SDValue tryLowerToSLI (SDNode *N, SelectionDAG &DAG)
 
static SDValue NormalizeBuildVector (SDValue Op, SelectionDAG &DAG)
 
static SDValue ConstantBuildVector (SDValue Op, SelectionDAG &DAG)
 
static bool getVShiftImm (SDValue Op, unsigned ElementBits, int64_t &Cnt)
 getVShiftImm - Check if this is a valid build_vector for the immediate operand of a vector shift operation, where all the elements of the build_vector must have the same constant integer value. More...
 
static bool isVShiftLImm (SDValue Op, EVT VT, bool isLong, int64_t &Cnt)
 isVShiftLImm - Check if this is a valid build_vector for the immediate operand of a vector shift left operation. More...
 
static bool isVShiftRImm (SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt)
 isVShiftRImm - Check if this is a valid build_vector for the immediate operand of a vector shift right operation. More...
 
static SDValue EmitVectorComparison (SDValue LHS, SDValue RHS, AArch64CC::CondCode CC, bool NoNans, EVT VT, const SDLoc &dl, SelectionDAG &DAG)
 
static SDValue getReductionSDNode (unsigned Op, SDLoc DL, SDValue ScalarOp, SelectionDAG &DAG)
 
template<unsigned NumVecs>
static bool setInfoSVEStN (const AArch64TargetLowering &TLI, const DataLayout &DL, AArch64TargetLowering::IntrinsicInfo &Info, const CallInst &CI)
 Set the IntrinsicInfo for the aarch64_sve_st<N> intrinsics. More...
 
static bool areExtractShuffleVectors (Value *Op1, Value *Op2)
 Check if both Op1 and Op2 are shufflevector extracts of either the lower or upper half of the vector elements. More...
 
static bool areExtractExts (Value *Ext1, Value *Ext2)
 Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth of the vector elements. More...
 
static bool isOperandOfVmullHighP64 (Value *Op)
 Check if Op could be used with vmull_high_p64 intrinsic. More...
 
static bool areOperandsOfVmullHighP64 (Value *Op1, Value *Op2)
 Check if Op1 and Op2 could be used with vmull_high_p64 intrinsic. More...
 
static SDValue foldVectorXorShiftIntoCmp (SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
 Turn vector tests of the signbit in the form of: xor (sra X, elt_size(X)-1), -1 into: cmge X, X, #0. More...
 
static SDValue performVecReduceAddCombine (SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *ST)
 
static SDValue performABSCombine (SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
 
static SDValue performXorCombine (SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
 
static bool IsSVECntIntrinsic (SDValue S)
 
static EVT calculatePreExtendType (SDValue Extend, SelectionDAG &DAG)
 Calculates what the pre-extend type is, based on the extension operation node provided by Extend. More...
 
static SDValue performCommonVectorExtendCombine (SDValue VectorShuffle, SelectionDAG &DAG)
 Combines a dup(sext/zext) node pattern into sext/zext(dup) making use of the vector SExt/ZExt rather than the scalar SExt/ZExt. More...
 
static SDValue performMulVectorExtendCombine (SDNode *Mul, SelectionDAG &DAG)
 Combines a mul(dup(sext/zext)) node pattern into mul(sext/zext(dup)) making use of the vector SExt/ZExt rather than the scalar SExt/ZExt. More...
 
static SDValue performMulCombine (SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
 
static SDValue performVectorCompareAndMaskUnaryOpCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performIntToFpCombine (SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
 
static SDValue performFpToIntCombine (SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
 Fold a floating-point multiply by power of two into floating-point to fixed-point conversion. More...
 
static SDValue performFDivCombine (SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
 Fold a floating-point divide by power of two into fixed-point to floating-point conversion. More...
 
static bool findEXTRHalf (SDValue N, SDValue &Src, uint32_t &ShiftAmount, bool &FromHi)
 An EXTR instruction is made up of two shifts, ORed together. More...
 
static SDValue tryCombineToEXTR (SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
 EXTR instruction extracts a contiguous chunk of bits from two existing registers viewed as a high/low pair. More...
 
static SDValue tryCombineToBSL (SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
 
static SDValue performORCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
 
static bool isConstantSplatVectorMaskForType (SDNode *N, EVT MemVT)
 
static SDValue performSVEAndCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
 
static SDValue performANDCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
 
static SDValue performSRLCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
 
static SDValue performVectorTruncateCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static bool hasPairwiseAdd (unsigned Opcode, EVT VT, bool FullFP16)
 
static SDValue performExtractVectorEltCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performConcatVectorsCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue tryCombineFixedPointConvert (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue tryExtendDUPToExtractHigh (SDValue N, SelectionDAG &DAG)
 
static bool isEssentiallyExtractHighSubvector (SDValue N)
 
static bool isSetCC (SDValue Op, SetCCInfoAndKind &SetCCInfo)
 Check whether or not Op is a SET_CC operation, either a generic or an AArch64 lowered one. More...
 
static bool isSetCCOrZExtSetCC (const SDValue &Op, SetCCInfoAndKind &Info)
 
static SDValue performSetccAddFolding (SDNode *Op, SelectionDAG &DAG)
 
static SDValue performUADDVCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performAddDotCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performAddSubLongCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue performAddSubCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue tryCombineLongOpWithDup (unsigned IID, SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue tryCombineShiftImm (unsigned IID, SDNode *N, SelectionDAG &DAG)
 
static SDValue tryCombineCRC32 (unsigned Mask, SDNode *N, SelectionDAG &DAG)
 
static SDValue combineAcrossLanesIntrinsic (unsigned Opc, SDNode *N, SelectionDAG &DAG)
 
static SDValue LowerSVEIntrinsicIndex (SDNode *N, SelectionDAG &DAG)
 
static SDValue LowerSVEIntrinsicDUP (SDNode *N, SelectionDAG &DAG)
 
static SDValue LowerSVEIntrinsicEXT (SDNode *N, SelectionDAG &DAG)
 
static SDValue tryConvertSVEWideCompare (SDNode *N, ISD::CondCode CC, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue getPTest (SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op, AArch64CC::CondCode Cond)
 
static SDValue combineSVEReductionInt (SDNode *N, unsigned Opc, SelectionDAG &DAG)
 
static SDValue combineSVEReductionFP (SDNode *N, unsigned Opc, SelectionDAG &DAG)
 
static SDValue combineSVEReductionOrderedFP (SDNode *N, unsigned Opc, SelectionDAG &DAG)
 
static SDValue convertMergedOpToPredOp (SDNode *N, unsigned PredOpc, SelectionDAG &DAG)
 
static SDValue performIntrinsicCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
 
static SDValue performExtendCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue splitStoreSplat (SelectionDAG &DAG, StoreSDNode &St, SDValue SplatVal, unsigned NumVecElts)
 
static MVT getSVEContainerType (EVT ContentTy)
 
static SDValue performLD1Combine (SDNode *N, SelectionDAG &DAG, unsigned Opc)
 
static SDValue performLDNT1Combine (SDNode *N, SelectionDAG &DAG)
 
template<unsigned Opcode>
static SDValue performLD1ReplicateCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performST1Combine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performSTNT1Combine (SDNode *N, SelectionDAG &DAG)
 
static SDValue replaceZeroVectorStore (SelectionDAG &DAG, StoreSDNode &St)
 Replace a splat of zeros to a vector store by scalar stores of WZR/XZR. More...
 
static SDValue replaceSplatVectorStore (SelectionDAG &DAG, StoreSDNode &St)
 Replace a splat of a scalar to a vector store by scalar stores of the scalar value. More...
 
static SDValue splitStores (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
 
static SDValue performUzpCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performGLD1Combine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performPostLD1Combine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, bool IsLaneOp)
 Target-specific DAG combine function for post-increment LD1 (lane) and post-increment LD1R. More...
 
static bool performTBISimplification (SDValue Addr, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 Simplify Addr given that the top byte of it is ignored by HW during address translation. More...
 
static SDValue performSTORECombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
 
static SDValue performNEONPostLDSTCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 Target-specific DAG combine function for NEON load/store intrinsics to merge base address updates. More...
 
static bool checkValueWidth (SDValue V, unsigned width, ISD::LoadExtType &ExtType)
 
static bool isEquivalentMaskless (unsigned CC, unsigned width, ISD::LoadExtType ExtType, int AddConstant, int CompConstant)
 
static SDValue performCONDCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, unsigned CCIndex, unsigned CmpIndex)
 
static SDValue performBRCONDCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue performCSELCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue getTestBitOperand (SDValue Op, unsigned &Bit, bool &Invert, SelectionDAG &DAG)
 
static SDValue performTBZCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue performVSelectCombine (SDNode *N, SelectionDAG &DAG)
 
static SDValue performSelectCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
 A vector select: "(select vL, vR, (setcc LHS, RHS))" is best performed with the compare-mask instructions rather than going via NZCV, even if LHS and RHS are really scalar. More...
 
static SDValue performNVCASTCombine (SDNode *N)
 Get rid of unnecessary NVCASTs (that don't change the type). More...
 
static SDValue performGlobalAddressCombine (SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget, const TargetMachine &TM)
 
static SDValue performStepVectorCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue getScaledOffsetForBitWidth (SelectionDAG &DAG, SDValue Offset, SDLoc DL, unsigned BitWidth)
 
static bool isValidImmForSVEVecImmAddrMode (unsigned OffsetInBytes, unsigned ScalarSizeInBytes)
 Check if the value of OffsetInBytes can be used as an immediate for the gather load/prefetch and scatter store instructions with vector base and immediate offset addressing mode: More...
 
static bool isValidImmForSVEVecImmAddrMode (SDValue Offset, unsigned ScalarSizeInBytes)
 Check if the value of Offset represents a valid immediate for the SVE gather load/prefetch and scatter store instructiona with vector base and immediate offset addressing mode: More...
 
static SDValue performScatterStoreCombine (SDNode *N, SelectionDAG &DAG, unsigned Opcode, bool OnlyPackedOffsets=true)
 
static SDValue performGatherLoadCombine (SDNode *N, SelectionDAG &DAG, unsigned Opcode, bool OnlyPackedOffsets=true)
 
static SDValue performSignExtendInRegCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
 
static SDValue legalizeSVEGatherPrefetchOffsVec (SDNode *N, SelectionDAG &DAG)
 Legalize the gather prefetch (scalar + vector addressing mode) when the offset vector is an unpacked 32-bit scalable vector. More...
 
static SDValue combineSVEPrefetchVecBaseImmOff (SDNode *N, SelectionDAG &DAG, unsigned ScalarSizeInBytes)
 Combines a node carrying the intrinsic aarch64_sve_prf<T>_gather_scalar_offset into a node that uses aarch64_sve_prfb_gather_uxtw_index when the scalar offset passed to aarch64_sve_prf<T>_gather_scalar_offset is not a valid immediate for the sve gather prefetch instruction with vector plus immediate addressing mode. More...
 
static bool isLanes1toNKnownZero (SDValue Op)
 
static SDValue removeRedundantInsertVectorElt (SDNode *N)
 
static SDValue performInsertVectorEltCombine (SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
 
static void ReplaceBITCASTResults (SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
 
static void ReplaceReductionResults (SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, unsigned InterOp, unsigned AcrossOp)
 
static std::pair< SDValue, SDValuesplitInt128 (SDValue N, SelectionDAG &DAG)
 
static SDValue createGPRPairNode (SelectionDAG &DAG, SDValue V)
 
static void ReplaceCMP_SWAP_128Results (SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
 
static ValueUseTlsOffset (IRBuilder<> &IRB, unsigned Offset)
 
static EVT getContainerForFixedLengthVector (SelectionDAG &DAG, EVT VT)
 
static SDValue getPredicateForFixedLengthVector (SelectionDAG &DAG, SDLoc &DL, EVT VT)
 
static SDValue getPredicateForScalableVector (SelectionDAG &DAG, SDLoc &DL, EVT VT)
 
static SDValue getPredicateForVector (SelectionDAG &DAG, SDLoc &DL, EVT VT)
 
static SDValue convertToScalableVector (SelectionDAG &DAG, EVT VT, SDValue V)
 
static SDValue convertFromScalableVector (SelectionDAG &DAG, EVT VT, SDValue V)
 
static SDValue emitConditionalComparison (SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue CCOp, AArch64CC::CondCode Predicate, AArch64CC::CondCode OutCC, const SDLoc &DL, SelectionDAG &DAG)
 can be transformed to: not (and (not (and (setCC (cmp C)) (setCD (cmp D)))) (and (not (setCA (cmp A)) (not (setCB (cmp B))))))" which can be implemented as: cmp C ccmp D, inv(CD), CC ccmp A, CA, inv(CD) ccmp B, CB, inv(CA) check for CB flags More...
 
static bool canEmitConjunction (const SDValue Val, bool &CanNegate, bool &MustBeFirst, bool WillNegate, unsigned Depth=0)
 Returns true if Val is a tree of AND/OR/SETCC operations that can be expressed as a conjunction. More...
 
static SDValue emitConjunctionRec (SelectionDAG &DAG, SDValue Val, AArch64CC::CondCode &OutCC, bool Negate, SDValue CCOp, AArch64CC::CondCode Predicate)
 Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain of CCMP/CFCMP ops. More...
 
static SDValue emitConjunction (SelectionDAG &DAG, SDValue Val, AArch64CC::CondCode &OutCC)
 Emit expression as a conjunction (a series of CCMP/CFCMP ops). More...
 

Variables

cl::opt< bool > EnableAArch64ELFLocalDynamicTLSGeneration ("aarch64-elf-ldtls-generation", cl::Hidden, cl::desc("Allow AArch64 Local Dynamic TLS code generation"), cl::init(false))
 
static cl::opt< bool > EnableOptimizeLogicalImm ("aarch64-enable-logical-imm", cl::Hidden, cl::desc("Enable AArch64 logical imm instruction " "optimization"), cl::init(true))
 
static cl::opt< bool > EnableCombineMGatherIntrinsics ("aarch64-enable-mgather-combine", cl::Hidden, cl::desc("Combine extends of AArch64 masked " "gather intrinsics"), cl::init(true))
 
static const MVT MVT_CC = MVT::i32
 Value type used for condition codes. More...
 

Macro Definition Documentation

◆ DEBUG_TYPE

#define DEBUG_TYPE   "aarch64-lower"

Definition at line 96 of file AArch64ISelLowering.cpp.

◆ GET_REGISTER_MATCHER

#define GET_REGISTER_MATCHER

Definition at line 7430 of file AArch64ISelLowering.cpp.

◆ LCALLNAME4

#define LCALLNAME4 (   A,
  B 
)
Value:
LCALLNAMES(A, B, 1) \
LCALLNAMES(A, B, 2) LCALLNAMES(A, B, 4) LCALLNAMES(A, B, 8)

◆ LCALLNAME5

#define LCALLNAME5 (   A,
  B 
)
Value:
LCALLNAMES(A, B, 1) \
LCALLNAMES(A, B, 2) \
LCALLNAMES(A, B, 4) LCALLNAMES(A, B, 8) LCALLNAMES(A, B, 16)

◆ LCALLNAMES

#define LCALLNAMES (   A,
  B,
  N 
)
Value:
setLibcallName(A##N##_RELAX, #B #N "_relax"); \
setLibcallName(A##N##_ACQ, #B #N "_acq"); \
setLibcallName(A##N##_REL, #B #N "_rel"); \
setLibcallName(A##N##_ACQ_REL, #B #N "_acq_rel");

◆ MAKE_CASE

#define MAKE_CASE (   V)
Value:
case V: \
return #V;

Enumeration Type Documentation

◆ PredicateConstraint

Enumerator
Upl 
Upa 
Invalid 

Definition at line 7806 of file AArch64ISelLowering.cpp.

Function Documentation

◆ addRequiredExtensionForVectorMULL()

static SDValue addRequiredExtensionForVectorMULL ( SDValue  N,
SelectionDAG DAG,
const EVT OrigTy,
const EVT ExtTy,
unsigned  ExtOpcode 
)
static

◆ areExtractExts()

static bool areExtractExts ( Value Ext1,
Value Ext2 
)
static

Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth of the vector elements.

Definition at line 11126 of file AArch64ISelLowering.cpp.

References llvm::MipsISD::Ext, llvm::PatternMatch::m_Value(), llvm::PatternMatch::m_ZExtOrSExt(), and llvm::PatternMatch::match().

Referenced by llvm::AArch64TargetLowering::shouldSinkOperands().

◆ areExtractShuffleVectors()

static bool areExtractShuffleVectors ( Value Op1,
Value Op2 
)
static

◆ areOperandsOfVmullHighP64()

static bool areOperandsOfVmullHighP64 ( Value Op1,
Value Op2 
)
static

Check if Op1 and Op2 could be used with vmull_high_p64 intrinsic.

Definition at line 11153 of file AArch64ISelLowering.cpp.

References isOperandOfVmullHighP64().

Referenced by llvm::AArch64TargetLowering::shouldSinkOperands().

◆ calculatePreExtendType()

static EVT calculatePreExtendType ( SDValue  Extend,
SelectionDAG DAG 
)
static

Calculates what the pre-extend type is, based on the extension operation node provided by Extend.

In the case that Extend is a SIGN_EXTEND or a ZERO_EXTEND, the pre-extend type is pulled directly from the operand, while other extend operations need a bit more inspection to get this information.

Parameters
ExtendThe SDNode from the DAG that represents the extend operation
DAGThe SelectionDAG hosting the Extend node
Returns
The type representing the Extend source type, or MVT::Other if no valid type can be determined

Definition at line 12045 of file AArch64ISelLowering.cpp.

References llvm::ISD::AND, llvm::ISD::AssertSext, llvm::ISD::AssertZext, llvm::SDValue::getNode(), llvm::SDValue::getOpcode(), llvm::SDValue::getOperand(), llvm::SDValue::getValueType(), llvm::VTSDNode::getVT(), llvm::MVT::i16, llvm::MVT::i32, llvm::MVT::i8, llvm_unreachable, llvm::BitmaskEnumDetail::Mask(), llvm::MVT::Other, llvm::ISD::SIGN_EXTEND, llvm::ISD::SIGN_EXTEND_INREG, and llvm::ISD::ZERO_EXTEND.

Referenced by performCommonVectorExtendCombine().

◆ canEmitConjunction()

static bool canEmitConjunction ( const SDValue  Val,
bool &  CanNegate,
bool &  MustBeFirst,
bool  WillNegate,
unsigned  Depth = 0 
)
static

Returns true if Val is a tree of AND/OR/SETCC operations that can be expressed as a conjunction.

See CMP;CCMP matching.

Parameters
CanNegateSet to true if we can negate the whole sub-tree just by changing the conditions on the SETCC tests. (this means we can call emitConjunctionRec() with Negate==true on this sub-tree)
MustBeFirstSet to true if this subtree needs to be negated and we cannot do the negation naturally. We are required to emit the subtree first in this case.
WillNegateIs true if are called when the result of this subexpression must be negated. This happens when the outer expression is an OR. We can use this fact to know that we have a double negation (or (or ...) ...) that can be implemented for free.

Definition at line 2539 of file AArch64ISelLowering.cpp.

References llvm::ISD::AND, assert(), llvm::Depth, llvm::MVT::f128, llvm::SDNode::getOpcode(), llvm::SDNode::getOperand(), llvm::SDValue::getValueType(), llvm::SDValue::hasOneUse(), llvm::ISD::OR, and llvm::ISD::SETCC.

Referenced by emitConjunction(), and emitConjunctionRec().

◆ canGuaranteeTCO()

static bool canGuaranteeTCO ( CallingConv::ID  CC)
static

Return true if the calling convention is one that we can guarantee TCO for.

Definition at line 5165 of file AArch64ISelLowering.cpp.

References llvm::CallingConv::Fast.

Referenced by mayTailCallThisCC().

◆ changeFPCCToAArch64CC()

static void changeFPCCToAArch64CC ( ISD::CondCode  CC,
AArch64CC::CondCode CondCode,
AArch64CC::CondCode CondCode2 
)
static

◆ changeFPCCToANDAArch64CC()

static void changeFPCCToANDAArch64CC ( ISD::CondCode  CC,
AArch64CC::CondCode CondCode,
AArch64CC::CondCode CondCode2 
)
static

Convert a DAG fp condition code to an AArch64 CC.

This differs from changeFPCCToAArch64CC in that it returns cond codes that should be AND'ed instead of OR'ed.

Definition at line 2290 of file AArch64ISelLowering.cpp.

References llvm::AArch64CC::AL, assert(), changeFPCCToAArch64CC(), llvm::AArch64CC::LE, llvm::AArch64CC::NE, llvm::AArch64CC::PL, llvm::ISD::SETONE, llvm::ISD::SETUEQ, and llvm::AArch64CC::VC.

Referenced by emitConjunctionRec().

◆ changeIntCCToAArch64CC()

static AArch64CC::CondCode changeIntCCToAArch64CC ( ISD::CondCode  CC)
static

◆ changeVectorFPCCToAArch64CC()

static void changeVectorFPCCToAArch64CC ( ISD::CondCode  CC,
AArch64CC::CondCode CondCode,
AArch64CC::CondCode CondCode2,
bool &  Invert 
)
static

changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC usable with the vector instructions.

Fewer operations are available without a real NZCV register, so we have to use less efficient combinations to get the same effect.

Definition at line 2320 of file AArch64ISelLowering.cpp.

References changeFPCCToAArch64CC(), llvm::MVT::f32, llvm::AArch64CC::GE, llvm::ISD::getSetCCInverse(), LLVM_FALLTHROUGH, llvm::AArch64CC::MI, llvm::ISD::SETO, llvm::ISD::SETUEQ, llvm::ISD::SETUGE, llvm::ISD::SETUGT, llvm::ISD::SETULE, llvm::ISD::SETULT, and llvm::ISD::SETUO.

◆ checkValueWidth()

static bool checkValueWidth ( SDValue  V,
unsigned  width,
ISD::LoadExtType ExtType 
)
static

◆ combineAcrossLanesIntrinsic()

static SDValue combineAcrossLanesIntrinsic ( unsigned  Opc,
SDNode N,
SelectionDAG DAG 
)
static

◆ combineSVEPrefetchVecBaseImmOff()

static SDValue combineSVEPrefetchVecBaseImmOff ( SDNode N,
SelectionDAG DAG,
unsigned  ScalarSizeInBytes 
)
static

Combines a node carrying the intrinsic aarch64_sve_prf<T>_gather_scalar_offset into a node that uses aarch64_sve_prfb_gather_uxtw_index when the scalar offset passed to aarch64_sve_prf<T>_gather_scalar_offset is not a valid immediate for the sve gather prefetch instruction with vector plus immediate addressing mode.

Definition at line 15885 of file AArch64ISelLowering.cpp.

References DL, llvm::SelectionDAG::getConstant(), llvm::SelectionDAG::getNode(), llvm::SelectionDAG::getVTList(), llvm::MVT::i64, isValidImmForSVEVecImmAddrMode(), N, llvm::MVT::Other, and std::swap().

Referenced by llvm::AArch64TargetLowering::PerformDAGCombine().

◆ combineSVEReductionFP()

static SDValue combineSVEReductionFP ( SDNode N,
unsigned  Opc,
SelectionDAG DAG 
)
static

◆ combineSVEReductionInt()

static SDValue combineSVEReductionInt ( SDNode N,
unsigned  Opc,
SelectionDAG DAG 
)
static

◆ combineSVEReductionOrderedFP()

static SDValue combineSVEReductionOrderedFP ( SDNode N,
unsigned  Opc,
SelectionDAG DAG 
)
static

◆ ConstantBuildVector()

static SDValue ConstantBuildVector ( SDValue  Op,
SelectionDAG DAG 
)
static

◆ constructDup()

static SDValue constructDup ( SDValue  V,
int  Lane,
SDLoc  dl,
EVT  VT,
unsigned  Opcode,
SelectionDAG DAG 
)
static

◆ convertFromScalableVector()

static SDValue convertFromScalableVector ( SelectionDAG DAG,
EVT  VT,
SDValue  V 
)
static

◆ convertMergedOpToPredOp()

static SDValue convertMergedOpToPredOp ( SDNode N,
unsigned  PredOpc,
SelectionDAG DAG 
)
static

◆ convertToScalableVector()

static SDValue convertToScalableVector ( SelectionDAG DAG,
EVT  VT,
SDValue  V 
)
static

◆ createGPRPairNode()

static SDValue createGPRPairNode ( SelectionDAG DAG,
SDValue  V 
)
static

◆ emitComparison()

static SDValue emitComparison ( SDValue  LHS,
SDValue  RHS,
ISD::CondCode  CC,
const SDLoc dl,
SelectionDAG DAG 
)
static

◆ emitConditionalComparison()

static SDValue emitConditionalComparison ( SDValue  LHS,
SDValue  RHS,
ISD::CondCode  CC,
SDValue  CCOp,
AArch64CC::CondCode  Predicate,
AArch64CC::CondCode  OutCC,
const SDLoc DL,
SelectionDAG DAG 
)
static

can be transformed to: not (and (not (and (setCC (cmp C)) (setCD (cmp D)))) (and (not (setCA (cmp A)) (not (setCB (cmp B))))))" which can be implemented as: cmp C ccmp D, inv(CD), CC ccmp A, CA, inv(CD) ccmp B, CB, inv(CA) check for CB flags

A counterexample is "or (and A B) (and C D)" which translates to not (and (not (and (not A) (not B))) (not (and (not C) (not D)))), we can only implement 1 of the inner (not) operations, but not both! Create a conditional comparison; Use CCMP, CCMN or FCCMP as appropriate.

Definition at line 2491 of file AArch64ISelLowering.cpp.

References assert(), llvm::AArch64ISD::CCMN, llvm::AArch64ISD::CCMP, DL, llvm::MVT::f128, llvm::MVT::f16, llvm::MVT::f32, llvm::AArch64ISD::FCCMP, llvm::ISD::FP_EXTEND, llvm::SelectionDAG::getConstant(), llvm::AArch64CC::getInvertedCondCode(), llvm::SelectionDAG::getNode(), llvm::AArch64CC::getNZCVToSatisfyCondCode(), llvm::SDValue::getOpcode(), llvm::SDValue::getOperand(), llvm::SelectionDAG::getSubtarget(), llvm::SDValue::getValueType(), llvm::MVT::i32, llvm::EVT::isFloatingPoint(), llvm::isNullConstant(), MVT_CC, llvm::ISD::SETEQ, llvm::ISD::SETNE, and llvm::ISD::SUB.

Referenced by emitConjunctionRec().

◆ emitConjunction()

static SDValue emitConjunction ( SelectionDAG DAG,
SDValue  Val,
AArch64CC::CondCode OutCC 
)
static

Emit expression as a conjunction (a series of CCMP/CFCMP ops).

In some cases this is even possible with OR operations in the expression. See CMP;CCMP matching.

See also
emitConjunctionRec().

Definition at line 2713 of file AArch64ISelLowering.cpp.

References llvm::AArch64CC::AL, canEmitConjunction(), and emitConjunctionRec().

Referenced by getAArch64Cmp().

◆ emitConjunctionRec()

static SDValue emitConjunctionRec ( SelectionDAG DAG,
SDValue  Val,
AArch64CC::CondCode OutCC,
bool  Negate,
SDValue  CCOp,
AArch64CC::CondCode  Predicate 
)
static

Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain of CCMP/CFCMP ops.

See CMP;CCMP matching. Tries to transform the given i1 producing node Val to a series compare and conditional compare operations.

Returns
an NZCV flags producing node and sets OutCC to the flags that should be tested or returns SDValue() if transformation was not possible. Negate is true if we want this sub-tree being negated just by changing SETCC conditions.

Definition at line 2601 of file AArch64ISelLowering.cpp.

References llvm::AArch64CC::AL, llvm::ISD::AND, assert(), canEmitConjunction(), changeFPCCToANDAArch64CC(), changeIntCCToAArch64CC(), DL, emitComparison(), emitConditionalComparison(), llvm::AArch64CC::getInvertedCondCode(), llvm::SDValue::getNode(), llvm::SDNode::getOpcode(), llvm::SDNode::getOperand(), llvm::ISD::getSetCCInverse(), llvm::SDValue::getValueType(), llvm::SDNode::hasOneUse(), llvm::EVT::isFloatingPoint(), llvm::EVT::isInteger(), llvm::yaml::isInteger(), llvm::ISD::OR, llvm::ISD::SETCC, and std::swap().

Referenced by emitConjunction().

◆ emitStrictFPComparison()

static SDValue emitStrictFPComparison ( SDValue  LHS,
SDValue  RHS,
const SDLoc dl,
SelectionDAG DAG,
SDValue  Chain,
bool  IsSignaling 
)
static

◆ EmitVectorComparison()

static SDValue EmitVectorComparison ( SDValue  LHS,
SDValue  RHS,
AArch64CC::CondCode  CC,
bool  NoNans,
EVT  VT,
const SDLoc dl,
SelectionDAG DAG 
)
static

◆ findEXTRHalf()

static bool findEXTRHalf ( SDValue  N,
SDValue Src,
uint32_t ShiftAmount,
bool &  FromHi 
)
static

An EXTR instruction is made up of two shifts, ORed together.

This helper searches for and classifies those shifts.

Definition at line 12520 of file AArch64ISelLowering.cpp.

References N, llvm::ISD::SHL, and llvm::ISD::SRL.

Referenced by tryCombineToEXTR().

◆ foldVectorXorShiftIntoCmp()

static SDValue foldVectorXorShiftIntoCmp ( SDNode N,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)
static

Turn vector tests of the signbit in the form of: xor (sra X, elt_size(X)-1), -1 into: cmge X, X, #0.

Definition at line 11858 of file AArch64ISelLowering.cpp.

References llvm::AArch64ISD::CMGEz, llvm::SDValue::getNode(), llvm::SelectionDAG::getNode(), llvm::EVT::getSizeInBits(), llvm::AArch64Subtarget::hasNEON(), llvm::ISD::isBuildVectorAllOnes(), llvm::EVT::isVector(), N, Shift, and llvm::AArch64ISD::VASHR.

Referenced by performXorCombine().

◆ GeneratePerfectShuffle()

static SDValue GeneratePerfectShuffle ( unsigned  PFEntry,
SDValue  LHS,
SDValue  RHS,
SelectionDAG DAG,
const SDLoc dl 
)
static

◆ GenerateTBL()

static SDValue GenerateTBL ( SDValue  Op,
ArrayRef< int ShuffleMask,
SelectionDAG DAG 
)
static

◆ getAArch64Cmp()

static SDValue getAArch64Cmp ( SDValue  LHS,
SDValue  RHS,
ISD::CondCode  CC,
SDValue AArch64cc,
SelectionDAG DAG,
const SDLoc dl 
)
static

◆ getAArch64XALUOOp()

static std::pair<SDValue, SDValue> getAArch64XALUOOp ( AArch64CC::CondCode CC,
SDValue  Op,
SelectionDAG DAG 
)
static

◆ getCmpOperandFoldingProfit()

static unsigned getCmpOperandFoldingProfit ( SDValue  Op)
static

Returns how profitable it is to fold a comparison's operand's shift and/or extension operations.

Definition at line 2727 of file AArch64ISelLowering.cpp.

References llvm::ISD::AND, llvm::MVT::i32, llvm::MVT::i64, llvm::BitmaskEnumDetail::Mask(), Shift, llvm::ISD::SHL, llvm::ISD::SIGN_EXTEND_INREG, llvm::ISD::SRA, and llvm::ISD::SRL.

Referenced by getAArch64Cmp().

◆ getContainerForFixedLengthVector()

static EVT getContainerForFixedLengthVector ( SelectionDAG DAG,
EVT  VT 
)
static

◆ getDUPLANEOp()

static unsigned getDUPLANEOp ( EVT  EltType)
static

◆ getEstimate()

static SDValue getEstimate ( const AArch64Subtarget ST,
unsigned  Opcode,
SDValue  Operand,
SelectionDAG DAG,
int ExtraSteps 
)
static

◆ getExtensionTo64Bits()

static EVT getExtensionTo64Bits ( const EVT OrigVT)
static

◆ getExtFactor()

static unsigned getExtFactor ( SDValue V)
static

getExtFactor - Determine the adjustment factor for the position when generating an "extract from vector registers" instruction.

Definition at line 8159 of file AArch64ISelLowering.cpp.

References llvm::EVT::getSizeInBits(), llvm::SDValue::getValueType(), and llvm::EVT::getVectorElementType().

Referenced by GeneratePerfectShuffle(), and llvm::AArch64TargetLowering::ReconstructShuffle().

◆ getGatherScatterIndexIsExtended()

bool getGatherScatterIndexIsExtended ( SDValue  Index)

◆ getGatherVecOpcode()

unsigned getGatherVecOpcode ( bool  IsScaled,
bool  IsSigned,
bool  NeedsExtend 
)

◆ getIntrinsicID()

static unsigned getIntrinsicID ( const SDNode N)
static

◆ getPackedSVEVectorVT() [1/2]

static EVT getPackedSVEVectorVT ( ElementCount  EC)
inlinestatic

◆ getPackedSVEVectorVT() [2/2]

static EVT getPackedSVEVectorVT ( EVT  VT)
inlinestatic

◆ getPredicateForFixedLengthVector()

static SDValue getPredicateForFixedLengthVector ( SelectionDAG DAG,
SDLoc DL,
EVT  VT 
)
static

◆ getPredicateForScalableVector()

static SDValue getPredicateForScalableVector ( SelectionDAG DAG,
SDLoc DL,
EVT  VT 
)
static

◆ getPredicateForVector()

static SDValue getPredicateForVector ( SelectionDAG DAG,
SDLoc DL,
EVT  VT 
)
static

◆ getPromotedVTForPredicate()

static EVT getPromotedVTForPredicate ( EVT  VT)
inlinestatic

◆ getPTest()

static SDValue getPTest ( SelectionDAG DAG,
EVT  VT,
SDValue  Pg,
SDValue  Op,
AArch64CC::CondCode  Cond 
)
static

◆ getPTrue()

static SDValue getPTrue ( SelectionDAG DAG,
SDLoc  DL,
EVT  VT,
int  Pattern 
)
inlinestatic

◆ getReductionSDNode()

static SDValue getReductionSDNode ( unsigned  Op,
SDLoc  DL,
SDValue  ScalarOp,
SelectionDAG DAG 
)
static

◆ getScaledOffsetForBitWidth()

static SDValue getScaledOffsetForBitWidth ( SelectionDAG DAG,
SDValue  Offset,
SDLoc  DL,
unsigned  BitWidth 
)
static

◆ getScatterVecOpcode()

unsigned getScatterVecOpcode ( bool  IsScaled,
bool  IsSigned,
bool  NeedsExtend 
)

◆ getSignExtendedGatherOpcode()

unsigned getSignExtendedGatherOpcode ( unsigned  Opcode)

◆ getSVEContainerType()

static MVT getSVEContainerType ( EVT  ContentTy)
static

◆ getTestBitOperand()

static SDValue getTestBitOperand ( SDValue  Op,
unsigned &  Bit,
bool &  Invert,
SelectionDAG DAG 
)
static

◆ getVShiftImm()

static bool getVShiftImm ( SDValue  Op,
unsigned  ElementBits,
int64_t &  Cnt 
)
static

getVShiftImm - Check if this is a valid build_vector for the immediate operand of a vector shift operation, where all the elements of the build_vector must have the same constant integer value.

Definition at line 10274 of file AArch64ISelLowering.cpp.

References llvm::ISD::BITCAST, llvm::APInt::getSExtValue(), and llvm::BuildVectorSDNode::isConstantSplat().

Referenced by isVShiftLImm(), and isVShiftRImm().

◆ hasPairwiseAdd()

static bool hasPairwiseAdd ( unsigned  Opcode,
EVT  VT,
bool  FullFP16 
)
static

◆ isAddSubSExt()

static bool isAddSubSExt ( SDNode N,
SelectionDAG DAG 
)
static

◆ isAddSubZExt()

static bool isAddSubZExt ( SDNode N,
SelectionDAG DAG 
)
static

◆ isAllConstantBuildVector()

static bool isAllConstantBuildVector ( const SDValue PotentialBVec,
uint64_t &  ConstVal 
)
static

◆ isCMN()

static bool isCMN ( SDValue  Op,
ISD::CondCode  CC 
)
static

◆ isConcatMask()

static bool isConcatMask ( ArrayRef< int Mask,
EVT  VT,
bool  SplitLHS 
)
static

◆ isConstantSplatVectorMaskForType()

static bool isConstantSplatVectorMaskForType ( SDNode N,
EVT  MemVT 
)
static

◆ isEquivalentMaskless()

static bool isEquivalentMaskless ( unsigned  CC,
unsigned  width,
ISD::LoadExtType  ExtType,
int  AddConstant,
int  CompConstant 
)
static

◆ isEssentiallyExtractHighSubvector()

static bool isEssentiallyExtractHighSubvector ( SDValue  N)
static

◆ isExtendedBUILD_VECTOR()

static bool isExtendedBUILD_VECTOR ( SDNode N,
SelectionDAG DAG,
bool  isSigned 
)
static

◆ isEXTMask()

static bool isEXTMask ( ArrayRef< int M,
EVT  VT,
bool &  ReverseEXT,
unsigned &  Imm 
)
static

◆ isINSMask()

static bool isINSMask ( ArrayRef< int M,
int  NumInputElements,
bool &  DstIsLeft,
int Anomaly 
)
static

Definition at line 8674 of file AArch64ISelLowering.cpp.

References i, and M.

Referenced by llvm::AArch64TargetLowering::isShuffleMaskLegal().

◆ isLanes1toNKnownZero()

static bool isLanes1toNKnownZero ( SDValue  Op)
static

◆ isLegalArithImmed()

static bool isLegalArithImmed ( uint64_t  C)
static

Definition at line 2351 of file AArch64ISelLowering.cpp.

References llvm::dbgs(), and LLVM_DEBUG.

Referenced by getAArch64Cmp(), tryAdjustICmpImmAndPred(), and trySwapICmpOperands().

◆ isMergePassthruOpcode()

static bool isMergePassthruOpcode ( unsigned  Opc)
static

◆ isOperandOfVmullHighP64()

static bool isOperandOfVmullHighP64 ( Value Op)
static

◆ isPackedVectorType()

static bool isPackedVectorType ( EVT  VT,
SelectionDAG DAG 
)
inlinestatic

Returns true if VT's elements occupy the lowest bit positions of its associated register class without any intervening space.

For example, nxv2f16, nxv4f16 and nxv8f16 are legal types that belong to the same register class, but only nxv8f16 can be treated as a packed vector.

Definition at line 191 of file AArch64ISelLowering.cpp.

References assert(), llvm::TypeSize::getKnownMinSize(), llvm::EVT::getSizeInBits(), llvm::SelectionDAG::getTargetLoweringInfo(), llvm::EVT::isFixedLengthVector(), llvm::TargetLoweringBase::isTypeLegal(), llvm::EVT::isVector(), and llvm::AArch64::SVEBitsPerBlock.

◆ isREVMask()

static bool isREVMask ( ArrayRef< int M,
EVT  VT,
unsigned  BlockSize 
)
static

isREVMask - Check if a vector shuffle corresponds to a REV instruction with the specified blocksize.

(The order of the elements within each block of the vector is reversed.)

Definition at line 8551 of file AArch64ISelLowering.cpp.

References assert(), BlockSize, llvm::EVT::getScalarSizeInBits(), llvm::EVT::getVectorNumElements(), i, and M.

Referenced by llvm::AArch64TargetLowering::isShuffleMaskLegal().

◆ isSetCC()

static bool isSetCC ( SDValue  Op,
SetCCInfoAndKind SetCCInfo 
)
static

Check whether or not Op is a SET_CC operation, either a generic or an AArch64 lowered one.

SetCCInfo is filled accordingly.

Postcondition
SetCCInfo is meanginfull only when this function returns true.
Returns
True when Op is a kind of SET_CC operation.

Definition at line 13287 of file AArch64ISelLowering.cpp.

References SetCCInfo::AArch64, GenericSetCCInfo::CC, AArch64SetCCInfo::CC, AArch64SetCCInfo::Cmp, llvm::AArch64ISD::CSEL, SetCCInfo::Generic, llvm::AArch64CC::getInvertedCondCode(), llvm::ConstantSDNode::isNullValue(), llvm::ConstantSDNode::isOne(), GenericSetCCInfo::Opnd0, GenericSetCCInfo::Opnd1, llvm::ISD::SETCC, and std::swap().

Referenced by isSetCCOrZExtSetCC().

◆ isSetCCOrZExtSetCC()

static bool isSetCCOrZExtSetCC ( const SDValue Op,
SetCCInfoAndKind Info 
)
static

Definition at line 13330 of file AArch64ISelLowering.cpp.

References Info, isSetCC(), and llvm::ISD::ZERO_EXTEND.

Referenced by performSetccAddFolding().

◆ isSignExtended()

static bool isSignExtended ( SDNode N,
SelectionDAG DAG 
)
static

◆ isSingletonEXTMask()

static bool isSingletonEXTMask ( ArrayRef< int M,
EVT  VT,
unsigned &  Imm 
)
static

Definition at line 8404 of file AArch64ISelLowering.cpp.

References llvm::EVT::getVectorNumElements(), i, and M.

◆ IsSVECntIntrinsic()

static bool IsSVECntIntrinsic ( SDValue  S)
static

Definition at line 12020 of file AArch64ISelLowering.cpp.

References llvm::getIntrinsicID(), and S.

Referenced by performMulCombine().

◆ isTRN_v_undef_Mask()

static bool isTRN_v_undef_Mask ( ArrayRef< int M,
EVT  VT,
unsigned &  WhichResult 
)
static

isTRN_v_undef_Mask - Special case of isTRNMask for canonical form of "vector_shuffle v, v", i.e., "vector_shuffle v, undef".

Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.

Definition at line 8661 of file AArch64ISelLowering.cpp.

References llvm::EVT::getVectorNumElements(), i, and M.

Referenced by llvm::AArch64TargetLowering::isShuffleMaskLegal().

◆ isTRNMask()

static bool isTRNMask ( ArrayRef< int M,
EVT  VT,
unsigned &  WhichResult 
)
static

◆ isUZP_v_undef_Mask()

static bool isUZP_v_undef_Mask ( ArrayRef< int M,
EVT  VT,
unsigned &  WhichResult 
)
static

isUZP_v_undef_Mask - Special case of isUZPMask for canonical form of "vector_shuffle v, v", i.e., "vector_shuffle v, undef".

Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,

Definition at line 8642 of file AArch64ISelLowering.cpp.

References llvm::EVT::getVectorNumElements(), i, j(), and M.

Referenced by llvm::AArch64TargetLowering::isShuffleMaskLegal().

◆ isUZPMask()

static bool isUZPMask ( ArrayRef< int M,
EVT  VT,
unsigned &  WhichResult 
)
static

◆ isValidImmForSVEVecImmAddrMode() [1/2]

static bool isValidImmForSVEVecImmAddrMode ( SDValue  Offset,
unsigned  ScalarSizeInBytes 
)
static

Check if the value of Offset represents a valid immediate for the SVE gather load/prefetch and scatter store instructiona with vector base and immediate offset addressing mode:

 [<Zn>.[S|D]{, #<imm>}]

where <imm> = sizeof(<T>) * k, for k = 0, 1, ..., 31.

Definition at line 15517 of file AArch64ISelLowering.cpp.

References llvm::ConstantSDNode::getZExtValue(), isValidImmForSVEVecImmAddrMode(), and Offset.

◆ isValidImmForSVEVecImmAddrMode() [2/2]

static bool isValidImmForSVEVecImmAddrMode ( unsigned  OffsetInBytes,
unsigned  ScalarSizeInBytes 
)
inlinestatic

Check if the value of OffsetInBytes can be used as an immediate for the gather load/prefetch and scatter store instructions with vector base and immediate offset addressing mode:

 [<Zn>.[S|D]{, #<imm>}]

where <imm> = sizeof(<T>) * k, for k = 0, 1, ..., 31.

Definition at line 15497 of file AArch64ISelLowering.cpp.

Referenced by combineSVEPrefetchVecBaseImmOff(), isValidImmForSVEVecImmAddrMode(), performGatherLoadCombine(), and performScatterStoreCombine().

◆ isVShiftLImm()

static bool isVShiftLImm ( SDValue  Op,
EVT  VT,
bool  isLong,
int64_t &  Cnt 
)
static

isVShiftLImm - Check if this is a valid build_vector for the immediate operand of a vector shift left operation.

That value must be in the range: 0 <= Value < ElementBits for a left shift; or 0 <= Value <= ElementBits for a long left shift.

Definition at line 10294 of file AArch64ISelLowering.cpp.

References assert(), llvm::EVT::getScalarSizeInBits(), getVShiftImm(), and llvm::EVT::isVector().

◆ isVShiftRImm()

static bool isVShiftRImm ( SDValue  Op,
EVT  VT,
bool  isNarrow,
int64_t &  Cnt 
)
static

isVShiftRImm - Check if this is a valid build_vector for the immediate operand of a vector shift right operation.

The value must be in the range: 1 <= Value <= ElementBits for a right shift; or

Definition at line 10305 of file AArch64ISelLowering.cpp.

References assert(), llvm::EVT::getScalarSizeInBits(), getVShiftImm(), and llvm::EVT::isVector().

◆ isWideDUPMask()

static bool isWideDUPMask ( ArrayRef< int M,
EVT  VT,
unsigned  BlockSize,
unsigned &  DupLaneOp 
)
static

Check if a vector shuffle corresponds to a DUP instructions with a larger element width than the vector lane type.

If that is the case the function returns true and writes the value of the DUP instruction lane operand into DupLaneOp

Definition at line 8437 of file AArch64ISelLowering.cpp.

References assert(), BlockSize, llvm::find_if(), llvm::EVT::getScalarSizeInBits(), llvm::EVT::getSizeInBits(), llvm::EVT::getVectorNumElements(), I, and M.

◆ isZeroExtended()

static bool isZeroExtended ( SDNode N,
SelectionDAG DAG 
)
static

◆ isZerosVector()

static bool isZerosVector ( const SDNode N)
static

isZerosVector - Check whether SDNode N is a zero-filled vector.

Definition at line 2180 of file AArch64ISelLowering.cpp.

References llvm::ISD::BITCAST, llvm::AArch64ISD::DUP, llvm::ISD::isConstantSplatVectorAllZeros(), and N.

Referenced by performAddDotCombine().

◆ isZIP_v_undef_Mask()

static bool isZIP_v_undef_Mask ( ArrayRef< int M,
EVT  VT,
unsigned &  WhichResult 
)
static

isZIP_v_undef_Mask - Special case of isZIPMask for canonical form of "vector_shuffle v, v", i.e., "vector_shuffle v, undef".

Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.

Definition at line 8623 of file AArch64ISelLowering.cpp.

References llvm::EVT::getVectorNumElements(), i, and M.

Referenced by llvm::AArch64TargetLowering::isShuffleMaskLegal().

◆ isZIPMask()

static bool isZIPMask ( ArrayRef< int M,
EVT  VT,
unsigned &  WhichResult 
)
static

◆ legalizeSVEGatherPrefetchOffsVec()

static SDValue legalizeSVEGatherPrefetchOffsVec ( SDNode N,
SelectionDAG DAG 
)
static

Legalize the gather prefetch (scalar + vector addressing mode) when the offset vector is an unpacked 32-bit scalable vector.

The other cases (Offset != nxv2i32) do not need legalization.

Definition at line 15862 of file AArch64ISelLowering.cpp.

References llvm::ISD::ANY_EXTEND, DL, llvm::SelectionDAG::getNode(), llvm::SelectionDAG::getVTList(), N, llvm::MVT::nxv2i32, llvm::MVT::nxv2i64, Offset, and llvm::MVT::Other.

Referenced by llvm::AArch64TargetLowering::PerformDAGCombine().

◆ lookThroughSignExtension()

std::pair<SDValue, uint64_t> lookThroughSignExtension ( SDValue  Val)

◆ LowerADDC_ADDE_SUBC_SUBE()

static SDValue LowerADDC_ADDE_SUBC_SUBE ( SDValue  Op,
SelectionDAG DAG 
)
static

◆ LowerBITCAST()

static SDValue LowerBITCAST ( SDValue  Op,
SelectionDAG DAG 
)
static

◆ LowerPREFETCH()

static SDValue LowerPREFETCH ( SDValue  Op,
SelectionDAG DAG 
)
static

◆ LowerSVEIntrinsicDUP()

static SDValue LowerSVEIntrinsicDUP ( SDNode N,
SelectionDAG DAG 
)
static

◆ LowerSVEIntrinsicEXT()

static SDValue LowerSVEIntrinsicEXT ( SDNode N,
SelectionDAG DAG 
)
static

◆ LowerSVEIntrinsicIndex()

static SDValue LowerSVEIntrinsicIndex ( SDNode N,
SelectionDAG DAG 
)
static

◆ LowerTruncateVectorStore()

static SDValue LowerTruncateVectorStore ( SDLoc  DL,
StoreSDNode ST,
EVT  VT,
EVT  MemVT,
SelectionDAG DAG 
)
static

◆ LowerXALUO()

static SDValue LowerXALUO ( SDValue  Op,
SelectionDAG DAG 
)
static

◆ mayTailCallThisCC()

static bool mayTailCallThisCC ( CallingConv::ID  CC)
static

Return true if we might ever do TCO for calls with this calling convention.

Definition at line 5170 of file AArch64ISelLowering.cpp.

References llvm::CallingConv::AArch64_SVE_VectorCall, llvm::CallingConv::C, canGuaranteeTCO(), llvm::CallingConv::PreserveMost, and llvm::CallingConv::Swift.

◆ NarrowVector()

static SDValue NarrowVector ( SDValue  V128Reg,
SelectionDAG DAG 
)
static

NarrowVector - Given a value in the V128 register class, produce the equivalent value in the V64 register class.

Definition at line 8166 of file AArch64ISelLowering.cpp.

References DL, llvm::EVT::getSimpleVT(), llvm::SelectionDAG::getTargetExtractSubreg(), llvm::SDValue::getValueType(), llvm::EVT::getVectorElementType(), llvm::EVT::getVectorNumElements(), and llvm::MVT::getVectorVT().

◆ NormalizeBuildVector()

static SDValue NormalizeBuildVector ( SDValue  Op,
SelectionDAG DAG 
)
static

◆ optimizeLogicalImm()

static bool optimizeLogicalImm ( SDValue  Op,
unsigned  Size,
uint64_t  Imm,
const APInt Demanded,
TargetLowering::TargetLoweringOpt TLO,
unsigned  NewOpc 
)
static

◆ parsePredicateConstraint()

static PredicateConstraint parsePredicateConstraint ( StringRef  Constraint)
static

Definition at line 7812 of file AArch64ISelLowering.cpp.

References Invalid, P, Upa, and Upl.

◆ performABSCombine()

static SDValue performABSCombine ( SDNode N,
SelectionDAG DAG,
TargetLowering::DAGCombinerInfo DCI,
const AArch64Subtarget Subtarget 
)
static

◆ performAddDotCombine()

static SDValue performAddDotCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performAddSubCombine()

static SDValue performAddSubCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performAddSubLongCombine()

static SDValue performAddSubLongCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performANDCombine()

static SDValue performANDCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI 
)
static

◆ performBRCONDCombine()

static SDValue performBRCONDCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performCommonVectorExtendCombine()

static SDValue performCommonVectorExtendCombine ( SDValue  VectorShuffle,
SelectionDAG DAG 
)
static

◆ performConcatVectorsCombine()

static SDValue performConcatVectorsCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performCONDCombine()

static SDValue performCONDCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG,
unsigned  CCIndex,
unsigned  CmpIndex 
)
static

◆ performCSELCombine()

static SDValue performCSELCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

Definition at line 15159 of file AArch64ISelLowering.cpp.

References N, and performCONDCombine().

Referenced by llvm::AArch64TargetLowering::PerformDAGCombine().

◆ performExtendCombine()

static SDValue performExtendCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performExtractVectorEltCombine()

static SDValue performExtractVectorEltCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performFDivCombine()

static SDValue performFDivCombine ( SDNode N,
SelectionDAG DAG,
TargetLowering::DAGCombinerInfo DCI,
const AArch64Subtarget Subtarget 
)
static

◆ performFpToIntCombine()

static SDValue performFpToIntCombine ( SDNode N,
SelectionDAG DAG,
TargetLowering::DAGCombinerInfo DCI,
const AArch64Subtarget Subtarget 
)
static

◆ performGatherLoadCombine()

static SDValue performGatherLoadCombine ( SDNode N,
SelectionDAG DAG,
unsigned  Opcode,
bool  OnlyPackedOffsets = true 
)
static

◆ performGLD1Combine()

static SDValue performGLD1Combine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performGlobalAddressCombine()

static SDValue performGlobalAddressCombine ( SDNode N,
SelectionDAG DAG,
const AArch64Subtarget Subtarget,
const TargetMachine TM 
)
static

◆ performInsertVectorEltCombine()

static SDValue performInsertVectorEltCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI 
)
static

◆ performIntrinsicCombine()

static SDValue performIntrinsicCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
const AArch64Subtarget Subtarget 
)
static

Definition at line 13871 of file AArch64ISelLowering.cpp.

References llvm::AArch64ISD::ANDV_PRED, llvm::AArch64CC::ANY_ACTIVE, combineAcrossLanesIntrinsic(), combineSVEReductionFP(), combineSVEReductionInt(), combineSVEReductionOrderedFP(), convertMergedOpToPredOp(), llvm::TargetLowering::DAGCombinerInfo::DAG, llvm::AArch64ISD::EORV_PRED, llvm::AArch64ISD::FADDA_PRED, llvm::AArch64ISD::FADDV_PRED, llvm::AArch64CC::FIRST_ACTIVE, llvm::ISD::FMAXIMUM, llvm::AArch64ISD::FMAXNMV_PRED, llvm::ISD::FMAXNUM, llvm::AArch64ISD::FMAXV_PRED, llvm::ISD::FMINIMUM, llvm::AArch64ISD::FMINNMV_PRED, llvm::ISD::FMINNUM, llvm::AArch64ISD::FMINV_PRED, llvm::SelectionDAG::getCondCode(), llvm::getIntrinsicID(), llvm::SelectionDAG::getNode(), getPTest(), llvm::MVT::i64, llvm::AArch64CC::LAST_ACTIVE, LowerSVEIntrinsicDUP(), LowerSVEIntrinsicEXT(), LowerSVEIntrinsicIndex(), N, llvm::AArch64ISD::ORV_PRED, llvm::AArch64ISD::SADDV, llvm::AArch64ISD::SADDV_PRED, llvm::AArch64ISD::SETCC_MERGE_ZERO, llvm::ISD::SETEQ, llvm::ISD::SETGE, llvm::ISD::SETGT, llvm::ISD::SETLE, llvm::ISD::SETLT, llvm::ISD::SETNE, llvm::ISD::SETUGE, llvm::ISD::SETUGT, llvm::ISD::SETULE, llvm::ISD::SETULT, llvm::AArch64ISD::SHL_PRED, llvm::AArch64ISD::SMAX_PRED, llvm::AArch64ISD::SMAXV, llvm::AArch64ISD::SMAXV_PRED, llvm::AArch64ISD::SMIN_PRED, llvm::AArch64ISD::SMINV, llvm::AArch64ISD::SMINV_PRED, llvm::ISD::SPLAT_VECTOR, llvm::AArch64ISD::SRA_PRED, llvm::AArch64ISD::SRL_PRED, tryCombineCRC32(), tryCombineFixedPointConvert(), tryCombineLongOpWithDup(), tryCombineShiftImm(), tryConvertSVEWideCompare(), llvm::AArch64ISD::UADDV, llvm::AArch64ISD::UADDV_PRED, llvm::AArch64ISD::UMAX_PRED, llvm::AArch64ISD::UMAXV, llvm::AArch64ISD::UMAXV_PRED, llvm::AArch64ISD::UMIN_PRED, llvm::AArch64ISD::UMINV, llvm::AArch64ISD::UMINV_PRED, and llvm::ISD::VSELECT.

Referenced by llvm::AArch64TargetLowering::PerformDAGCombine().

◆ performIntToFpCombine()

static SDValue performIntToFpCombine ( SDNode N,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)
static

◆ performLD1Combine()

static SDValue performLD1Combine ( SDNode N,
SelectionDAG DAG,
unsigned  Opc 
)
static

◆ performLD1ReplicateCombine()

template<unsigned Opcode>
static SDValue performLD1ReplicateCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performLDNT1Combine()

static SDValue performLDNT1Combine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performMulCombine()

static SDValue performMulCombine ( SDNode N,
SelectionDAG DAG,
TargetLowering::DAGCombinerInfo DCI,
const AArch64Subtarget Subtarget 
)
static

◆ performMulVectorExtendCombine()

static SDValue performMulVectorExtendCombine ( SDNode Mul,
SelectionDAG DAG 
)
static

Combines a mul(dup(sext/zext)) node pattern into mul(sext/zext(dup)) making use of the vector SExt/ZExt rather than the scalar SExt/ZExt.

Definition at line 12159 of file AArch64ISelLowering.cpp.

References DL, llvm::SelectionDAG::getNode(), llvm::Mul, and performCommonVectorExtendCombine().

Referenced by performMulCombine().

◆ performNEONPostLDSTCombine()

static SDValue performNEONPostLDSTCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

Target-specific DAG combine function for NEON load/store intrinsics to merge base address updates.

Definition at line 14704 of file AArch64ISelLowering.cpp.

References llvm::ISD::ADD, Addr, llvm::TargetLowering::DAGCombinerInfo::CombineTo(), llvm::SelectionDAG::getMemIntrinsicNode(), llvm::MemSDNode::getMemOperand(), llvm::MemSDNode::getMemoryVT(), llvm::SDValue::getNode(), llvm::User::getOperand(), llvm::SelectionDAG::getRegister(), llvm::EVT::getSizeInBits(), llvm::EVT::getVectorNumElements(), llvm::SelectionDAG::getVTList(), llvm::SDNode::hasPredecessorHelper(), i, llvm::MVT::i64, llvm::SmallPtrSetImpl< PtrType >::insert(), llvm::TargetLowering::DAGCombinerInfo::isBeforeLegalize(), llvm::TargetLowering::DAGCombinerInfo::isCalledByLegalizer(), llvm::AArch64ISD::LD1x2post, llvm::AArch64ISD::LD1x3post, llvm::AArch64ISD::LD1x4post, llvm::AArch64ISD::LD2DUPpost, llvm::AArch64ISD::LD2LANEpost, llvm::AArch64ISD::LD2post, llvm::AArch64ISD::LD3DUPpost, llvm::AArch64ISD::LD3LANEpost, llvm::AArch64ISD::LD3post, llvm::AArch64ISD::LD4DUPpost, llvm::AArch64ISD::LD4LANEpost, llvm::AArch64ISD::LD4post, llvm_unreachable, llvm::makeArrayRef(), n, N, llvm::MVT::Other, llvm::AArch64ISD::ST1x2post, llvm::AArch64ISD::ST1x3post, llvm::AArch64ISD::ST1x4post, llvm::AArch64ISD::ST2LANEpost, llvm::AArch64ISD::ST2post, llvm::AArch64ISD::ST3LANEpost, llvm::AArch64ISD::ST3post, llvm::AArch64ISD::ST4LANEpost, and llvm::AArch64ISD::ST4post.

Referenced by llvm::AArch64TargetLowering::PerformDAGCombine().

◆ performNVCASTCombine()

static SDValue performNVCASTCombine ( SDNode N)
static

Get rid of unnecessary NVCASTs (that don't change the type).

Definition at line 15403 of file AArch64ISelLowering.cpp.

References N.

Referenced by llvm::AArch64TargetLowering::PerformDAGCombine().

◆ performORCombine()

static SDValue performORCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
const AArch64Subtarget Subtarget 
)
static

◆ performPostLD1Combine()

static SDValue performPostLD1Combine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
bool  IsLaneOp 
)
static

◆ performScatterStoreCombine()

static SDValue performScatterStoreCombine ( SDNode N,
SelectionDAG DAG,
unsigned  Opcode,
bool  OnlyPackedOffsets = true 
)
static

◆ performSelectCombine()

static SDValue performSelectCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI 
)
static

◆ performSetccAddFolding()

static SDValue performSetccAddFolding ( SDNode Op,
SelectionDAG DAG 
)
static

◆ performSignExtendInRegCombine()

static SDValue performSignExtendInRegCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

Definition at line 15730 of file AArch64ISelLowering.cpp.

References assert(), llvm::TargetLowering::DAGCombinerInfo::CombineTo(), DL, EnableCombineMGatherIntrinsics, llvm::MipsISD::Ext, llvm::SelectionDAG::getContext(), llvm::EVT::getDoubleNumVectorElementsVT(), llvm::SDValue::getNode(), llvm::SelectionDAG::getNode(), llvm::SDNode::getNumOperands(), llvm::SDNode::getOpcode(), llvm::SDNode::getOperand(), llvm::SDValue::getValue(), llvm::SDValue::getValueType(), llvm::SelectionDAG::getValueType(), llvm::EVT::getVectorElementType(), llvm::SelectionDAG::getVTList(), llvm::AArch64ISD::GLD1_IMM_MERGE_ZERO, llvm::AArch64ISD::GLD1_MERGE_ZERO, llvm::AArch64ISD::GLD1_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLD1_SXTW_MERGE_ZERO, llvm::AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLD1_UXTW_MERGE_ZERO, llvm::AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLD1S_IMM_MERGE_ZERO, llvm::AArch64ISD::GLD1S_MERGE_ZERO, llvm::AArch64ISD::GLD1S_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLD1S_SXTW_MERGE_ZERO, llvm::AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLD1S_UXTW_MERGE_ZERO, llvm::AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLDFF1_IMM_MERGE_ZERO, llvm::AArch64ISD::GLDFF1_MERGE_ZERO, llvm::AArch64ISD::GLDFF1_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLDFF1_SXTW_MERGE_ZERO, llvm::AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLDFF1_UXTW_MERGE_ZERO, llvm::AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLDFF1S_IMM_MERGE_ZERO, llvm::AArch64ISD::GLDFF1S_MERGE_ZERO, llvm::AArch64ISD::GLDFF1S_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLDFF1S_SXTW_MERGE_ZERO, llvm::AArch64ISD::GLDFF1S_SXTW_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLDFF1S_UXTW_MERGE_ZERO, llvm::AArch64ISD::GLDFF1S_UXTW_SCALED_MERGE_ZERO, llvm::AArch64ISD::GLDNT1_MERGE_ZERO, llvm::AArch64ISD::GLDNT1S_MERGE_ZERO, llvm::SDValue::hasOneUse(), I, llvm::MVT::i16, llvm::MVT::i32, llvm::MVT::i8, llvm::TargetLowering::DAGCombinerInfo::isBeforeLegalizeOps(), llvm::AArch64ISD::LD1_MERGE_ZERO, llvm::AArch64ISD::LD1S_MERGE_ZERO, llvm::AArch64ISD::LDFF1_MERGE_ZERO, llvm::AArch64ISD::LDFF1S_MERGE_ZERO, llvm::AArch64ISD::LDNF1_MERGE_ZERO, llvm::AArch64ISD::LDNF1S_MERGE_ZERO, N, llvm::MVT::Other, llvm::ISD::SIGN_EXTEND_INREG, llvm::AArch64ISD::SUNPKHI, llvm::AArch64ISD::SUNPKLO, llvm::AArch64ISD::UUNPKHI, and llvm::AArch64ISD::UUNPKLO.

Referenced by llvm::AArch64TargetLowering::PerformDAGCombine().

◆ performSRLCombine()

static SDValue performSRLCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI 
)
static

◆ performST1Combine()

static SDValue performST1Combine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performStepVectorCombine()

static SDValue performStepVectorCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performSTNT1Combine()

static SDValue performSTNT1Combine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performSTORECombine()

static SDValue performSTORECombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)
static

◆ performSVEAndCombine()

static SDValue performSVEAndCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI 
)
static

◆ performTBISimplification()

static bool performTBISimplification ( SDValue  Addr,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performTBZCombine()

static SDValue performTBZCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performUADDVCombine()

static SDValue performUADDVCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performUzpCombine()

static SDValue performUzpCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performVecReduceAddCombine()

static SDValue performVecReduceAddCombine ( SDNode N,
SelectionDAG DAG,
const AArch64Subtarget ST 
)
static

◆ performVectorCompareAndMaskUnaryOpCombine()

static SDValue performVectorCompareAndMaskUnaryOpCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performVectorTruncateCombine()

static SDValue performVectorTruncateCombine ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ performVSelectCombine()

static SDValue performVSelectCombine ( SDNode N,
SelectionDAG DAG 
)
static

◆ performXorCombine()

static SDValue performXorCombine ( SDNode N,
SelectionDAG DAG,
TargetLowering::DAGCombinerInfo DCI,
const AArch64Subtarget Subtarget 
)
static

◆ removeRedundantInsertVectorElt()

static SDValue removeRedundantInsertVectorElt ( SDNode N)
static

◆ ReplaceBITCASTResults()

static void ReplaceBITCASTResults ( SDNode N,
SmallVectorImpl< SDValue > &  Results,
SelectionDAG DAG 
)
static

◆ ReplaceCMP_SWAP_128Results()

static void ReplaceCMP_SWAP_128Results ( SDNode N,
SmallVectorImpl< SDValue > &  Results,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)
static

◆ ReplaceReductionResults()

static void ReplaceReductionResults ( SDNode N,
SmallVectorImpl< SDValue > &  Results,
SelectionDAG DAG,
unsigned  InterOp,
unsigned  AcrossOp 
)
static

◆ replaceSplatVectorStore()

static SDValue replaceSplatVectorStore ( SelectionDAG DAG,
StoreSDNode St 
)
static

Replace a splat of a scalar to a vector store by scalar stores of the scalar value.

The load store optimizer pass will merge them to store pair stores. This has better performance than a splat of the scalar followed by a split vector store. Even if the stores are not merged it is four stores vs a dup, followed by an ext.b and two stores.

Definition at line 14358 of file AArch64ISelLowering.cpp.

References llvm::SDValue::getOpcode(), llvm::SDValue::getOperand(), llvm::StoreSDNode::getValue(), llvm::SDValue::getValueType(), llvm::EVT::getVectorNumElements(), llvm::ConstantSDNode::getZExtValue(), I, llvm::ISD::INSERT_VECTOR_ELT, llvm::EVT::isFloatingPoint(), llvm::StoreSDNode::isTruncatingStore(), and splitStoreSplat().

Referenced by splitStores().

◆ replaceZeroVectorStore()

static SDValue replaceZeroVectorStore ( SelectionDAG DAG,
StoreSDNode St 
)
static

Replace a splat of zeros to a vector store by scalar stores of WZR/XZR.

The load store optimizer pass will merge them to store pair stores. This should be better than a movi to create the vector zero followed by a vector store if the zero constant is not re-used, since one instructions and one register live range will be removed.

For example, the final generated code should be:

stp xzr, xzr, [x0]

instead of:

movi v0.2d, #0 str q0, [x0]

Definition at line 14291 of file AArch64ISelLowering.cpp.

References llvm::ISD::BUILD_VECTOR, DL, llvm::StoreSDNode::getBasePtr(), llvm::SDNode::getConstantOperandVal(), llvm::SelectionDAG::getCopyFromReg(), llvm::SelectionDAG::getEntryNode(), llvm::SDValue::getOpcode(), llvm::SDValue::getOperand(), llvm::EVT::getSizeInBits(), llvm::StoreSDNode::getValue(), llvm::SDValue::getValueType(), llvm::EVT::getVectorElementType(), llvm::EVT::getVectorNumElements(), llvm::SDValue::hasOneUse(), I, llvm::MVT::i32, llvm::MVT::i64, llvm::SelectionDAG::isBaseWithConstantOffset(), llvm::isNullConstant(), llvm::isNullFPConstant(), llvm::EVT::isScalableVector(), llvm::StoreSDNode::isTruncatingStore(), Offset, and splitStoreSplat().

Referenced by splitStores().

◆ resolveBuildVector()

static bool resolveBuildVector ( BuildVectorSDNode BVN,
APInt CnstBits,
APInt UndefBits 
)
static

◆ selectGatherScatterAddrMode()

void selectGatherScatterAddrMode ( SDValue BasePtr,
SDValue Index,
EVT  MemVT,
unsigned &  Opcode,
bool  IsGather,
SelectionDAG DAG 
)

◆ setInfoSVEStN()

template<unsigned NumVecs>
static bool setInfoSVEStN ( const AArch64TargetLowering TLI,
const DataLayout DL,
AArch64TargetLowering::IntrinsicInfo &  Info,
const CallInst CI 
)
static

◆ skipExtensionForVectorMULL()

static SDValue skipExtensionForVectorMULL ( SDNode N,
SelectionDAG DAG 
)
static

◆ splitInt128()

static std::pair<SDValue, SDValue> splitInt128 ( SDValue  N,
SelectionDAG DAG 
)
static

◆ splitStores()

static SDValue splitStores ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG,
const AArch64Subtarget Subtarget 
)
static

◆ splitStoreSplat()

static SDValue splitStoreSplat ( SelectionDAG DAG,
StoreSDNode St,
SDValue  SplatVal,
unsigned  NumVecElts 
)
static

◆ STATISTIC() [1/3]

STATISTIC ( NumOptimizedImms  ,
"Number of times immediates were optimized"   
)

◆ STATISTIC() [2/3]

STATISTIC ( NumShiftInserts  ,
"Number of vector shift inserts"   
)

◆ STATISTIC() [3/3]

STATISTIC ( NumTailCalls  ,
"Number of tail calls  
)

◆ tryAdvSIMDModImm16()

static SDValue tryAdvSIMDModImm16 ( unsigned  NewOp,
SDValue  Op,
SelectionDAG DAG,
const APInt Bits,
const SDValue LHS = nullptr 
)
static

◆ tryAdvSIMDModImm32()

static SDValue tryAdvSIMDModImm32 ( unsigned  NewOp,
SDValue  Op,
SelectionDAG DAG,
const APInt Bits,
const SDValue LHS = nullptr 
)
static

◆ tryAdvSIMDModImm321s()

static SDValue tryAdvSIMDModImm321s ( unsigned  NewOp,
SDValue  Op,
SelectionDAG DAG,
const APInt Bits 
)
static

◆ tryAdvSIMDModImm64()

static SDValue tryAdvSIMDModImm64 ( unsigned  NewOp,
SDValue  Op,
SelectionDAG DAG,
const APInt Bits 
)
static

◆ tryAdvSIMDModImm8()

static SDValue tryAdvSIMDModImm8 ( unsigned  NewOp,
SDValue  Op,
SelectionDAG DAG,
const APInt Bits 
)
static

◆ tryAdvSIMDModImmFP()

static SDValue tryAdvSIMDModImmFP ( unsigned  NewOp,
SDValue  Op,
SelectionDAG DAG,
const APInt Bits 
)
static

◆ tryCombineCRC32()

static SDValue tryCombineCRC32 ( unsigned  Mask,
SDNode N,
SelectionDAG DAG 
)
static

◆ tryCombineFixedPointConvert()

static SDValue tryCombineFixedPointConvert ( SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ tryCombineLongOpWithDup()

static SDValue tryCombineLongOpWithDup ( unsigned  IID,
SDNode N,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ tryCombineShiftImm()

static SDValue tryCombineShiftImm ( unsigned  IID,
SDNode N,
SelectionDAG DAG 
)
static

◆ tryCombineToBSL()

static SDValue tryCombineToBSL ( SDNode N,
TargetLowering::DAGCombinerInfo DCI 
)
static

◆ tryCombineToEXTR()

static SDValue tryCombineToEXTR ( SDNode N,
TargetLowering::DAGCombinerInfo DCI 
)
static

EXTR instruction extracts a contiguous chunk of bits from two existing registers viewed as a high/low pair.

This function looks for the pattern: (or (shl VAL1, #N), (srl VAL2, #RegWidth-N)) and replaces it with an EXTR. Can't quite be done in TableGen because the two immediates aren't independent.

Definition at line 12542 of file AArch64ISelLowering.cpp.

References assert(), llvm::TargetLowering::DAGCombinerInfo::DAG, DL, llvm::AArch64ISD::EXTR, findEXTRHalf(), llvm::SelectionDAG::getConstant(), llvm::SelectionDAG::getNode(), llvm::EVT::getSizeInBits(), llvm::MVT::i32, llvm::MVT::i64, N, llvm::ISD::OR, and std::swap().

Referenced by performORCombine().

◆ tryConvertSVEWideCompare()

static SDValue tryConvertSVEWideCompare ( SDNode N,
ISD::CondCode  CC,
TargetLowering::DAGCombinerInfo DCI,
SelectionDAG DAG 
)
static

◆ tryExtendDUPToExtractHigh()

static SDValue tryExtendDUPToExtractHigh ( SDValue  N,
SelectionDAG DAG 
)
static

◆ tryFormConcatFromShuffle()

static SDValue tryFormConcatFromShuffle ( SDValue  Op,
SelectionDAG DAG 
)
static

◆ tryLowerToSLI()

static SDValue tryLowerToSLI ( SDNode N,
SelectionDAG DAG 
)
static

◆ UseTlsOffset()

static Value* UseTlsOffset ( IRBuilder<> &  IRB,
unsigned  Offset 
)
static

◆ WidenVector()

static SDValue WidenVector ( SDValue  V64Reg,
SelectionDAG DAG 
)
static

Variable Documentation

◆ EnableAArch64ELFLocalDynamicTLSGeneration

cl::opt<bool> EnableAArch64ELFLocalDynamicTLSGeneration("aarch64-elf-ldtls-generation", cl::Hidden, cl::desc("Allow AArch64 Local Dynamic TLS code generation"), cl::init(false))

◆ EnableCombineMGatherIntrinsics

cl::opt<bool> EnableCombineMGatherIntrinsics("aarch64-enable-mgather-combine", cl::Hidden, cl::desc("Combine extends of AArch64 masked " "gather intrinsics"), cl::init(true))
static

◆ EnableOptimizeLogicalImm

cl::opt<bool> EnableOptimizeLogicalImm("aarch64-enable-logical-imm", cl::Hidden, cl::desc("Enable AArch64 logical imm instruction " "optimization"), cl::init(true))
static

◆ MVT_CC

const MVT MVT_CC = MVT::i32
static

Value type used for condition codes.

Definition at line 127 of file AArch64ISelLowering.cpp.

Referenced by emitComparison(), emitConditionalComparison(), and getAArch64Cmp().

B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
LCALLNAMES
#define LCALLNAMES(A, B, N)
A
* A
Definition: README_ALTIVEC.txt:89
N
#define N