14 #ifndef LLVM_LIB_TARGET_X86_X86ISELLOWERING_H 15 #define LLVM_LIB_TARGET_X86_X86ISELLOWERING_H 23 class X86TargetMachine;
683 bool hasSymbolicDisplacement =
true);
688 bool is64Bit,
bool IsVarArg,
bool GuaranteeTCO);
702 unsigned getJumpTableEncoding()
const override;
703 bool useSoftFloat()
const override;
722 unsigned JTI,
MCContext &Ctx)
const override;
728 unsigned getByValTypeAlignment(
Type *Ty,
742 EVT getOptimalMemOpType(uint64_t
Size,
unsigned DstAlign,
unsigned SrcAlign,
743 bool IsMemset,
bool ZeroMemset,
bool MemcpyStrSrc,
752 bool isSafeMemOpType(
MVT VT)
const override;
756 bool allowsMisalignedMemoryAccesses(
EVT VT,
unsigned AS,
unsigned Align,
758 bool *
Fast)
const override;
768 void LowerOperationWrapper(
SDNode *
N,
790 bool isDesirableToCombineBuildVectorToShuffleTruncate(
797 bool isTypeDesirableForOp(
unsigned Opc,
EVT VT)
const override;
803 bool IsDesirableToPromoteOp(
SDValue Op,
EVT &PVT)
const override;
809 bool ForCodeSize,
unsigned Depth)
const override;
813 bool LegalOperations,
bool ForCodeSize,
814 unsigned Depth)
const override;
821 const char *getTargetNodeName(
unsigned Opcode)
const override;
832 bool isCheapToSpeculateCttz()
const override;
834 bool isCheapToSpeculateCtlz()
const override;
836 bool isCtlzFast()
const override;
858 bool isMaskAndCmp0FoldingBeneficial(
const Instruction &AndI)
const override;
860 bool hasAndNotCompare(
SDValue Y)
const override;
862 bool hasAndNot(
SDValue Y)
const override;
866 bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
868 unsigned OldShiftOpcode,
unsigned NewShiftOpcode,
871 bool shouldFoldConstantShiftPairToMask(
const SDNode *N,
874 bool shouldFoldMaskToVariableShiftPair(
SDValue Y)
const override;
878 unsigned KeptBits)
const override {
883 auto VTIsOk = [](
EVT VT) ->
bool {
891 return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
896 bool shouldSplatInsEltVarIndex(
EVT VT)
const override;
903 MVT hasFastEqualityCompare(
unsigned NumBits)
const override;
907 EVT VT)
const override;
909 bool targetShrinkDemandedConstant(
SDValue Op,
const APInt &Demanded,
914 void computeKnownBitsForTargetNode(
const SDValue Op,
916 const APInt &DemandedElts,
918 unsigned Depth = 0)
const override;
921 unsigned ComputeNumSignBitsForTargetNode(
SDValue Op,
922 const APInt &DemandedElts,
924 unsigned Depth)
const override;
926 bool SimplifyDemandedVectorEltsForTargetNode(
SDValue Op,
927 const APInt &DemandedElts,
931 unsigned Depth)
const override;
933 bool SimplifyDemandedBitsForTargetNode(
SDValue Op,
935 const APInt &DemandedElts,
938 unsigned Depth)
const override;
940 SDValue SimplifyMultipleUseDemandedBitsForTargetNode(
950 bool ExpandInlineAsm(
CallInst *CI)
const override;
958 const char *constraint)
const override;
960 const char *LowerXConstraint(
EVT ConstraintVT)
const override;
965 void LowerAsmOperandForConstraint(
SDValue Op,
966 std::string &Constraint,
967 std::vector<SDValue> &Ops,
972 if (ConstraintCode ==
"i")
974 else if (ConstraintCode ==
"o")
976 else if (ConstraintCode ==
"v")
978 else if (ConstraintCode ==
"X")
992 std::pair<unsigned, const TargetRegisterClass *>
999 Type *Ty,
unsigned AS,
1006 bool isLegalICmpImmediate(int64_t Imm)
const override;
1012 bool isLegalAddImmediate(int64_t Imm)
const override;
1014 bool isLegalStoreImmediate(int64_t Imm)
const override;
1022 unsigned AS)
const override;
1024 bool isVectorShiftByScalarCheap(
Type *Ty)
const override;
1027 bool isBinOp(
unsigned Opcode)
const override;
1030 bool isCommutativeBinOp(
unsigned Opcode)
const override;
1035 bool isTruncateFree(
Type *Ty1,
Type *Ty2)
const override;
1036 bool isTruncateFree(
EVT VT1,
EVT VT2)
const override;
1038 bool allowTruncateForTailCall(
Type *Ty1,
Type *Ty2)
const override;
1048 bool isZExtFree(
Type *Ty1,
Type *Ty2)
const override;
1049 bool isZExtFree(
EVT VT1,
EVT VT2)
const override;
1050 bool isZExtFree(
SDValue Val,
EVT VT2)
const override;
1054 bool isVectorLoadExtDesirable(
SDValue)
const override;
1059 bool isFMAFasterThanFMulAndFAdd(
EVT VT)
const override;
1064 bool isNarrowingProfitable(
EVT VT1,
EVT VT2)
const override;
1072 unsigned Intrinsic)
const override;
1077 bool isFPImmLegal(
const APFloat &Imm,
EVT VT,
1078 bool ForCodeSize)
const override;
1092 bool areJTsAllowed(
const Function *Fn)
const override;
1101 return !X86ScalarSSEf64 || VT ==
MVT::f80;
1107 EVT NewVT)
const override;
1112 return (VT ==
MVT::f64 && X86ScalarSSEf64) ||
1113 (VT ==
MVT::f32 && X86ScalarSSEf32);
1118 bool shouldConvertConstantLoadToIntImm(
const APInt &Imm,
1119 Type *Ty)
const override;
1121 bool reduceSelectOfFPConstantLoads(
EVT CmpOpVT)
const override;
1123 bool convertSelectOfConstantsToMath(
EVT VT)
const override;
1128 bool shouldUseStrictFP_TO_INT(
EVT FpVT,
EVT IntVT,
1129 bool IsSigned)
const override;
1133 bool isExtractSubvectorCheap(
EVT ResVT,
EVT SrcVT,
1134 unsigned Index)
const override;
1139 bool shouldScalarizeBinop(
SDValue)
const override;
1150 bool shouldFormOverflowOp(
unsigned Opcode,
EVT VT)
const override;
1153 unsigned AddrSpace)
const override {
1159 bool isLoadBitCastBeneficial(
EVT LoadVT,
EVT BitcastVT,
1168 Register getRegisterByName(
const char* RegName,
EVT VT,
1174 getExceptionPointerRegister(
const Constant *PersonalityFn)
const override;
1179 getExceptionSelectorRegister(
const Constant *PersonalityFn)
const override;
1181 virtual bool needsFixedCatchObjects()
const override;
1192 bool useLoadStackGuardNode()
const override;
1193 bool useStackGuardXorFP()
const override;
1194 void insertSSPDeclarations(
Module &M)
const override;
1195 Value *getSDagStackGuard(
const Module &M)
const override;
1198 const SDLoc &DL)
const override;
1209 bool isNoopAddrSpaceCast(
unsigned SrcAS,
unsigned DestAS)
const override;
1215 EVT VT)
const override;
1217 unsigned getNumRegistersForCallingConv(
LLVMContext &Context,
1219 EVT VT)
const override;
1221 unsigned getVectorTypeBreakdownForCallingConv(
1223 unsigned &NumIntermediates,
MVT &RegisterVT)
const override;
1227 bool supportSwiftError()
const override;
1239 bool lowerInterleavedLoad(
LoadInst *LI,
1242 unsigned Factor)
const override;
1247 unsigned Factor)
const override;
1254 std::pair<const TargetRegisterClass *, uint8_t>
1256 MVT VT)
const override;
1266 bool X86ScalarSSEf32;
1267 bool X86ScalarSSEf64;
1270 std::vector<APFloat> LegalFPImmediates;
1274 void addLegalFPImmediate(
const APFloat& Imm) {
1275 LegalFPImmediates.push_back(Imm);
1301 bool isCalleeStructRet,
1302 bool isCallerStructRet,
1309 SDValue Chain,
bool IsTailCall,
1310 bool Is64Bit,
int FPDiff,
1311 const SDLoc &dl)
const;
1313 unsigned GetAlignedArgumentStackSize(
unsigned StackSize,
1316 unsigned getAddressSpace(
void)
const;
1325 unsigned getGlobalWrapperKind(
const GlobalValue *GV =
nullptr,
1326 const unsigned char OpFlags = 0)
const;
1336 bool ForCall)
const;
1389 void insertCopiesSplitCSR(
1393 bool isUsedByReturnOnly(
SDNode *N,
SDValue &Chain)
const override;
1395 bool mayBeEmittedAsTailCall(
const CallInst *CI)
const override;
1408 shouldExpandAtomicLoadInIR(
LoadInst *SI)
const override;
1409 bool shouldExpandAtomicStoreInIR(
StoreInst *SI)
const override;
1411 shouldExpandAtomicRMWInIR(
AtomicRMWInst *AI)
const override;
1414 lowerIdempotentRMWIntoFencedLoad(
AtomicRMWInst *AI)
const override;
1416 bool lowerAtomicStoreAsStoreSDNode(
const StoreInst &SI)
const override;
1417 bool lowerAtomicLoadAsLoadSDNode(
const LoadInst &LI)
const override;
1419 bool needsCmpXchgNb(
Type *MemType)
const;
1431 EmitVAStartSaveXMMRegsWithCustomInserter(
MachineInstr &BInstr,
1500 int &RefinementSteps,
bool &UseOneConstNR,
1501 bool Reciprocal)
const override;
1505 int &RefinementSteps)
const override;
1508 unsigned combineRepeatedFPDivisors()
const override;
1525 :
MemSDNode(Opcode, Order, dl, VTs, MemVT, MMO) {}
1543 :
MemSDNode(Opcode, Order, dl, VTs, MemVT, MMO) {}
1613 :
MemSDNode(Opc, Order, dl, VTs, MemVT, MMO) {}
1655 template <
typename T =
int>
1658 assert(Mask.
empty() &&
"Expected an empty shuffle mask vector");
1661 for (
int i = 0; i < NumElts; ++i) {
1662 unsigned LaneStart = (i / NumEltsInLane) * NumEltsInLane;
1663 int Pos = (i % NumEltsInLane) / 2 + LaneStart;
1664 Pos += (Unary ? 0 : NumElts * (i % 2));
1665 Pos += (Lo ? 0 : NumEltsInLane / 2);
1674 template <
typename T>
1677 assert(0 < Scale &&
"Unexpected scaling factor");
1678 size_t NumElts = Mask.
size();
1679 ScaledMask.
assign(NumElts * Scale, -1);
1681 for (
size_t i = 0; i != NumElts; ++i) {
1686 for (
size_t s = 0; s != Scale; ++s)
1687 ScaledMask[(Scale * i) + s] = M;
1692 for (
size_t s = 0; s != Scale; ++s)
1693 ScaledMask[(Scale * i) + s] = (Scale * M) + s;
1698 #endif // LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
const SDValue & getIndex() const
Double shift instructions.
bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const override
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
static MVT getIntegerVT(unsigned BitWidth)
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
This instruction implements a fp->int store from FP stack slots.
TruncUSStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
BUILTIN_OP_END - This must be the last enum value in this list.
A parsed version of the target data layout string in and methods for querying it. ...
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
Vector comparison generating mask bits for fp and integer signed and unsigned data types...
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
Repeat move, corresponds to X86::REP_MOVSx.
void createUnpackShuffleMask(MVT VT, SmallVectorImpl< T > &Mask, bool Lo, bool Unary)
Generate unpacklo/unpackhi shuffle mask.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
static bool classof(const SDNode *N)
Return with a flag operand.
const SDValue & getValue() const
bool isConstantSplat(SDValue Op, APInt &SplatVal)
If Op is a constant whose elements are all the same constant or undefined, return true and return the...
const SDValue & getBasePtr() const
This class represents lattice values for constants.
const SDValue & getScale() const
bool mergeStoresAfterLegalization(EVT MemVT) const override
Do not merge vector stores after legalization because that may conflict with x86-specific store split...
Compute Double Block Packed Sum-Absolute-Differences.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
A Module instance is used to store all the information related to an LLVM module. ...
static bool classof(const SDNode *N)
Same as call except it adds the NoTrack prefix.
static void LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, bool isTailCall, bool isVector, SmallVectorImpl< SDValue > &MemOpChains, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments, const SDLoc &dl)
LowerMemOpCallTo - Store the argument to the stack or remember it in case of tail calls...
bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const override
Return true if it is cheaper to split the store of a merged int val from a pair of smaller values int...
void push_back(const T &Elt)
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
This class represents a function call, abstracting a target machine's calling convention.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
unsigned getVectorNumElements() const
Function Alias Analysis Results
This instruction constructs a fixed permutation of two input vectors.
bool isExtractVecEltCheap(EVT VT, unsigned Index) const override
Extract of a scalar FP value from index 0 of a vector is free.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
unsigned const TargetRegisterInfo * TRI
bool isInteger() const
Return true if this is an integer or a vector integer type.
static bool classof(const SDNode *N)
SSE4A Extraction and Insertion.
static bool classof(const SDNode *N)
An instruction for reading from memory.
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Bitwise logical ANDNOT of floating point values.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
bool isCalleePop(CallingConv::ID CallingConv, bool is64Bit, bool IsVarArg, bool GuaranteeTCO)
Determines whether the callee is required to pop its own arguments.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
X86MaskedGatherScatterSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
Copies a 64-bit value from an MMX vector to the low word of an XMM vector, with the high word zero fi...
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
X86 compare and logical compare instructions.
MaskedTruncUSStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
Extract an 8-bit value from a vector and zero extend it to i32, corresponds to X86::PEXTRB.
A description of a memory reference used in the backend.
X86StoreSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
Dynamic (non-constant condition) vector blend where only the sign bits of the condition elements are ...
Bitwise Logical AND NOT of Packed FP values.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Base class for the full range of assembler expressions which are needed for parsing.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
This instruction implements SINT_TO_FP with the integer source in memory and FP reg result...
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Integer horizontal add/sub.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Copies a 64-bit value from the low word of an XMM vector to an MMX vector.
void assign(size_type NumElts, const T &Elt)
Context object for machine code objects.
Copies a 32-bit value from the low word of a MMX vector to a GPR.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
X86 FP SETCC, similar to above, but with output as an i1 mask and and a version with SAE...
Return from interrupt. Operand 0 is the number of bytes to pop.
This contains information for each constraint that we are lowering.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
const SDValue & getMask() const
An instruction for storing to memory.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
static bool classof(const SDNode *N)
This instruction implements FP_TO_SINT with the integer destination in memory and a FP reg source...
X86 FP SETCC, implemented with CMP{cc}SS/CMP{cc}SD.
const SDValue & getBasePtr() const
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
Floating point horizontal add/sub.
unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override
Analysis containing CSE Info
Bitwise logical XOR of floating point values.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
static bool classof(const SDNode *N)
const SDValue & getMask() const
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
The instances of the Type class are immutable: once they are created, they are never changed...
This instruction implements an extending load to FP stack slots.
This is an important class for using LLVM in a threaded context.
Insert any element of a 4 x float vector into any element of a destination 4 x floatvector.
unsigned getScalarSizeInBits() const
size_t size() const
size - Get the array size.
This is an important base class in LLVM.
Repeat fill, corresponds to X86::REP_STOSx.
static bool is64Bit(const char *name)
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool storeOfVectorConstantIsCheap(EVT MemVT, unsigned NumElem, unsigned AddrSpace) const override
Return true if it is expected to be cheaper to do a store of a non-zero vector constant with the give...
X86 conditional branches.
Insert the lower 16-bits of a 32-bit value to a vector, corresponds to X86::PINSRW.
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Commutative FMIN and FMAX.
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
On Darwin, this node represents the result of the popl at function entry, used for PIC code...
bool convertSetCCLogicToBitwiseLogic(EVT VT) const override
Use bitwise logic to make pairs of compares more efficient.
static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
const SDValue & getValue() const
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
std::vector< ArgListEntry > ArgListTy
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This structure contains all information that is necessary for lowering calls.
This struct is a compact representation of a valid (non-zero power of two) alignment.
These operations represent an abstract X86 call instruction, which includes a bunch of information...
Floating point max and min.
static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG)
TruncSStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
bool isScalarFPTypeInSSEReg(EVT VT) const
Return true if the specified scalar FP type is computed in an SSE register, not on the X87 floating p...
Copies a GPR into the low 32-bit word of a MMX vector and zero out the high word. ...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
Provides information about what library functions are available for the current target.
CCValAssign - Represent assignment of one arg/retval to a location.
X86MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
This is an abstract virtual class for memory operations.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Floating point reciprocal-sqrt and reciprocal approximation.
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
const SDValue & getValue() const
Represents one node in the SelectionDAG.
X86 bit-test instructions.
const Function & getFunction() const
Return the LLVM function that this machine code represents.
static bool classof(const SDNode *N)
MaskedTruncSStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
Class for arbitrary precision integers.
amdgpu Simplify well known AMD library false FunctionCallee Callee
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static bool classof(const SDNode *N)
const char * getClearCacheBuiltinName() const override
Intel processors have a unified instruction and data cache.
Flags
Flags values. These may be or'd together.
bool ShouldShrinkFPConstant(EVT VT) const override
If true, then instruction selection should seek to shrink the FP constant of the specified type to a ...
Representation of each machine instruction.
static unsigned getScalingFactorCost(const TargetTransformInfo &TTI, const LSRUse &LU, const Formula &F, const Loop &L)
bool isVector() const
Return true if this is a vector value type.
Insert the lower 8-bits of a 32-bit value to a vector, corresponds to X86::PINSRB.
LLVM_NODISCARD bool empty() const
A wrapper node for TargetConstantPool, TargetJumpTable, TargetExternalSymbol, TargetGlobalAddress, TargetGlobalTLSAddress, MCSymbol and TargetBlockAddress.
Bitwise logical AND of floating point values.
void scaleShuffleMask(size_t Scale, ArrayRef< T > Mask, SmallVectorImpl< T > &ScaledMask)
Helper function to scale a shuffle or target shuffle mask, replacing each mask index with the scaled ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
static bool classof(const SDNode *N)
X86MaskedStoreSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
LOCK-prefixed arithmetic read-modify-write instructions.
Extract a 16-bit value from a vector and zero extend it to i32, corresponds to X86::PEXTRW.
bool hasVectorBlend() const override
Return true if the target has a vector blend instruction.
Blend where the selector is an immediate.
X86MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
This instruction implements a truncating store from FP stack slots.
Combined add and sub on an FP vector.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
This instruction grabs the address of the next argument from a va_list.
LLVM Value Representation.
Bitwise logical OR of floating point values.
const SDValue & getBasePtr() const
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
bool isZeroNode(SDValue Elt)
Returns true if Elt is a constant zero or floating point constant +0.0.
StringRef - Represent a constant reference to a string, i.e.
bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, bool hasSymbolicDisplacement=true)
Returns true of the given offset can be fit into displacement field of the instruction.
const SDValue & getPassThru() const
bool hasBitPreservingFPLogic(EVT VT) const override
Return true if it is safe to transform an integer-domain bitwise operation into the equivalent floati...
Compute Sum of Absolute Differences.
Scalar intrinsic floating point max and min.
MVT getScalarShiftAmountTy(const DataLayout &, EVT VT) const override
EVT is not used in-tree, but is used by out-of-tree target.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
BRIND node with NoTrack prefix.
Shuffle 16 8-bit values within a vector.
Wrapper class representing virtual and physical registers.
This file describes how to lower LLVM code to machine code.
Special wrapper used under X86-64 PIC mode for RIP relative displacements.
This class is used to represent ISD::LOAD nodes.