Go to the documentation of this file.
18 #ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
19 #define LLVM_CODEGEN_SELECTIONDAGNODES_H
62 class MachineBasicBlock;
63 class MachineConstantPoolValue;
96 bool BuildVectorOnly =
false);
102 bool BuildVectorOnly =
false);
159 return Node ==
O.Node && ResNo ==
O.ResNo;
165 return std::tie(
Node, ResNo) < std::tie(
O.Node,
O.ResNo);
167 explicit operator bool()
const {
168 return Node !=
nullptr;
211 inline void dump()
const;
213 inline void dumpr()
const;
222 unsigned Depth = 2)
const;
245 return ((
unsigned)((uintptr_t)Val.
getNode() >> 4) ^
283 SDUse **Prev =
nullptr;
284 SDUse *Next =
nullptr;
292 operator const SDValue&()
const {
return Val; }
339 inline void setInitial(
const SDValue &V);
342 inline void setNode(
SDNode *
N);
346 if (Next) Next->Prev = &Next;
351 void removeFromList() {
353 if (Next) Next->Prev = Prev;
372 bool NoUnsignedWrap : 1;
373 bool NoSignedWrap : 1;
377 bool NoSignedZeros : 1;
378 bool AllowReciprocal : 1;
379 bool AllowContract : 1;
380 bool ApproximateFuncs : 1;
381 bool AllowReassociation : 1;
395 AllowContract(
false), ApproximateFuncs(
false),
396 AllowReassociation(
false), NoFPExcept(
false) {}
438 NoUnsignedWrap &= Flags.NoUnsignedWrap;
439 NoSignedWrap &= Flags.NoSignedWrap;
440 Exact &= Flags.Exact;
441 NoNaNs &= Flags.NoNaNs;
442 NoInfs &= Flags.NoInfs;
443 NoSignedZeros &= Flags.NoSignedZeros;
444 AllowReciprocal &= Flags.AllowReciprocal;
445 AllowContract &= Flags.AllowContract;
446 ApproximateFuncs &= Flags.ApproximateFuncs;
447 AllowReassociation &= Flags.AllowReassociation;
448 NoFPExcept &= Flags.NoFPExcept;
464 #if defined(_AIX) && (!defined(__GNUC__) || defined(__clang__))
467 #define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")
468 #define END_TWO_BYTE_PACK() _Pragma("pack(pop)")
470 #define BEGIN_TWO_BYTE_PACK()
471 #define END_TWO_BYTE_PACK()
567 #undef BEGIN_TWO_BYTE_PACK
568 #undef END_TWO_BYTE_PACK
589 SDUse *OperandList =
nullptr;
593 const EVT *ValueList;
596 SDUse *UseList =
nullptr;
599 unsigned short NumOperands = 0;
600 unsigned short NumValues;
613 static const EVT *getValueTypeList(
EVT VT);
675 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
676 case ISD::STRICT_##DAGN:
677 #include "llvm/IR/ConstrainedOps.def"
760 assert(
Op &&
"Cannot increment end iterator!");
771 assert(
Op &&
"Cannot dereference end iterator!");
772 return Op->getUser();
781 assert(
Op &&
"Cannot dereference end iterator!");
782 return (
unsigned)(
Op -
Op->getUser()->OperandList);
818 return N->hasPredecessor(
this);
839 unsigned int MaxSteps = 0,
840 bool TopologicalPrune =
false) {
856 int NId =
N->getNodeId();
862 while (!Worklist.empty()) {
864 int MId =
M->getNodeId();
866 (MId > 0) && (MId < NId)) {
867 DeferredNodes.push_back(
M);
870 for (
const SDValue &OpV :
M->op_values()) {
873 Worklist.push_back(
Op);
879 if (MaxSteps != 0 && Visited.
size() >= MaxSteps)
883 Worklist.
append(DeferredNodes.begin(), DeferredNodes.end());
885 if (MaxSteps != 0 && Visited.
size() >= MaxSteps)
899 return std::numeric_limits<decltype(SDNode::NumOperands)>
::max();
909 assert(Num < NumOperands &&
"Invalid child # of SDNode!");
910 return OperandList[Num];
922 std::random_access_iterator_tag, SDValue,
923 ptrdiff_t, value_op_iterator *,
924 value_op_iterator *> {
954 if (UI.getUse().get().getValueType() ==
MVT::Glue)
971 assert(ResNo < NumValues &&
"Illegal result number!");
972 return ValueList[ResNo];
1020 unsigned depth = 100)
const;
1050 unsigned depth = 100)
const;
1069 : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
1070 IROrder(Order), debugLoc(
std::
move(dl)) {
1074 "NumValues wasn't wide enough for its operands!");
1100 assert(Order >= 0 &&
"bad IROrder");
1102 DL =
I->getDebugLoc();
1116 assert((!
Node || !ResNo || ResNo < Node->getNumValues()) &&
1117 "Invalid result number for the given node!");
1118 assert(ResNo < -2U &&
"Cannot use result numbers reserved for DenseMaps.");
1122 return Node->getOpcode();
1126 return Node->getValueType(ResNo);
1130 return Node->getNumOperands();
1134 return Node->getOperand(
i);
1138 return Node->getConstantOperandVal(
i);
1142 return Node->getConstantOperandAPInt(
i);
1146 return Node->isTargetOpcode();
1150 return Node->isTargetMemoryOpcode();
1154 return Node->isMachineOpcode();
1158 return Node->getMachineOpcode();
1162 return Node->isUndef();
1166 return !
Node->hasAnyUseOfValue(ResNo);
1170 return Node->hasNUsesOfValue(1, ResNo);
1174 return Node->getDebugLoc();
1186 return Node->dumpr();
1190 return Node->dumpr(
G);
1195 inline void SDUse::set(
const SDValue &V) {
1196 if (Val.
getNode()) removeFromList();
1202 inline void SDUse::setInitial(
const SDValue &V) {
1207 inline void SDUse::setNode(SDNode *
N) {
1208 if (Val.
getNode()) removeFromList();
1210 if (
N)
N->addUse(*
this);
1243 unsigned SrcAddrSpace;
1244 unsigned DestAddrSpace;
1248 unsigned SrcAS,
unsigned DestAS);
1371 case ISD::VP_SCATTER:
1372 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
1386 switch (
N->getOpcode()) {
1414 case ISD::VP_GATHER:
1415 case ISD::VP_SCATTER:
1416 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
1417 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
1420 return N->isMemIntrinsic() ||
N->isTargetMemoryOpcode();
1432 MMO->
isAtomic()) &&
"then why are we using an AtomicSDNode?");
1492 return N->isMemIntrinsic() ||
1494 N->isTargetMemoryOpcode();
1531 assert(
isSplat() &&
"Cannot get splat index for non-splat!");
1547 unsigned NumElems =
Mask.size();
1548 for (
unsigned i = 0;
i != NumElems; ++
i) {
1552 else if (idx < (
int)NumElems)
1553 Mask[
i] = idx + NumElems;
1555 Mask[
i] = idx - NumElems;
1582 return Value->getLimitedValue(Limit);
1606 return cast<ConstantSDNode>(
getOperand(Num))->getZExtValue();
1610 return cast<ConstantSDNode>(
getOperand(Num))->getAPIntValue();
1648 return Value->getValueAPF().isExactlyValue(V);
1689 bool isBitwiseNot(SDValue V,
bool AllowUndefs =
false);
1698 bool AllowTruncation =
false);
1703 bool AllowUndefs =
false,
1704 bool AllowTruncation =
false);
1712 bool AllowUndefs =
false);
1731 return isa<ConstantSDNode>(V) || isa<ConstantFPSDNode>(V);
1739 unsigned TargetFlags;
1791 return cast<FrameIndexSDNode>(
getOperand(1))->getIndex();
1841 unsigned TargetFlags;
1867 unsigned TargetFlags;
1870 Align Alignment,
unsigned TF)
1873 Offset(o), Alignment(Alignment), TargetFlags(TF) {
1874 assert(Offset >= 0 &&
"Offset is too large");
1878 ConstantPoolSDNode(
bool isTarget, MachineConstantPoolValue *v, EVT VT,
int o,
1879 Align Alignment,
unsigned TF)
1882 Offset(o), Alignment(Alignment), TargetFlags(TF) {
1883 assert(Offset >= 0 &&
"Offset is too large");
1884 Val.MachineCPVal = v;
1885 Offset |= 1 << (
sizeof(unsigned)*CHAR_BIT-1);
1895 return Val.ConstVal;
1900 return Val.MachineCPVal;
1904 return Offset & ~(1 << (
sizeof(unsigned)*CHAR_BIT-1));
1924 unsigned TargetFlags;
1978 unsigned &SplatBitSize,
bool &HasAnyUndefs,
1979 unsigned MinSplatBits = 0,
1989 BitVector *UndefElements =
nullptr)
const;
2010 BitVector *UndefElements =
nullptr)
const;
2022 BitVector *UndefElements =
nullptr)
const;
2032 BitVector *UndefElements =
nullptr)
const;
2050 BitVector *UndefElements =
nullptr)
const;
2081 static void recastRawBits(
bool IsLittleEndian,
unsigned DstEltSizeInBits,
2170 unsigned TargetFlags;
2173 int64_t o,
unsigned Flags)
2175 BA(ba),
Offset(o), TargetFlags(Flags) {}
2211 unsigned TargetFlags;
2216 Symbol(Sym), TargetFlags(TF) {}
2322 assert(
readMem() &&
"Load MachineMemOperand is not a load!");
2412 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
2414 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
2425 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
2427 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
2445 return N->getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_LOAD ||
2446 N->getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_STORE ||
2447 N->getOpcode() == ISD::VP_LOAD ||
N->getOpcode() == ISD::VP_STORE;
2474 return N->getOpcode() == ISD::VP_LOAD;
2504 return N->getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_LOAD;
2540 return N->getOpcode() == ISD::VP_STORE;
2553 VTs, AM, MemVT,
MMO) {
2577 return N->getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_STORE;
2706 return !cast<ConstantSDNode>(
getScale())->isOne();
2731 return N->getOpcode() == ISD::VP_GATHER ||
2732 N->getOpcode() == ISD::VP_SCATTER;
2748 return N->getOpcode() == ISD::VP_GATHER;
2766 return N->getOpcode() == ISD::VP_SCATTER;
2790 return !cast<ConstantSDNode>(
getScale())->isOne();
2901 if (NumMemRefs == 0)
2903 if (NumMemRefs == 1)
2920 return N->isMachineOpcode();
2954 return Operand ==
x.Operand;
2959 return Node->getOperand(Operand).getNode();
2972 "Cannot compare iterators of two different nodes!");
2973 return Operand -
Other.Operand;
3004 using LargestSDNode = AlignedCharArrayUnion<AtomicSDNode, TargetIndexSDNode,
3006 GlobalAddressSDNode,
3023 return isa<LoadSDNode>(
N) &&
3029 return isa<LoadSDNode>(
N) &&
3035 return isa<LoadSDNode>(
N) &&
3041 return isa<LoadSDNode>(
N) &&
3047 return isa<LoadSDNode>(
N) &&
3061 return isa<StoreSDNode>(
N) &&
3070 bool AllowUndefs =
false);
3079 bool AllowUndefs =
false,
bool AllowTypeMismatch =
false);
3084 unsigned Opc =
Op.getOpcode();
3085 return (
Op.getResNo() == 1 &&
3094 #endif // LLVM_CODEGEN_SELECTIONDAGNODES_H
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT, unsigned SrcAS, unsigned DestAS)
bool isUNINDEXEDStore(const SDNode *N)
Returns true if the specified node is an unindexed store.
bool hasAllowReciprocal() const
static bool classof(const SDNode *N)
const SDValue & getOffset() const
We currently generate a but we really shouldn eax ecx xorl edx divl ecx eax divl ecx movl eax ret A similar code sequence works for division We currently compile i32 v2 eax eax jo LBB1_2 atomic and others It is also currently not done for read modify write instructions It is also current not done if the OF or CF flags are needed The shift operators have the complication that when the shift count is EFLAGS is not set
StoreSDNodeBitfields StoreSDNodeBits
int getSplatIndex() const
ISD::LoadExtType getExtensionType() const
void Profile(FoldingSetNodeID &ID) const
Gather unique data for the node.
const SDValue & getBasePtr() const
ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
bool isOverflowIntrOpRes(SDValue Op)
Returns true if the specified value is the overflow result from one of the overflow intrinsic nodes.
Represents a use of a SDNode.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
unsigned getSrcAddressSpace() const
ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
bool isTruncatingStore() const
Return true if this is a truncating store.
const SDValue & getMask() const
This class is used to represent an MGATHER node.
const SDValue & getOffset() const
static bool classof(const SDNode *N)
This is an optimization pass for GlobalISel generic memory operations.
unsigned getIROrder() const
Return the node ordering.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void setNoSignedZeros(bool b)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
const SDValue & operator*() const
const SDValue & getVectorLength() const
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
const SDValue & getVectorLength() const
const SDValue & getMask() const
static bool classof(const SDNode *N)
static SimpleType getSimplifiedValue(SDValue &Val)
void setIROrder(unsigned Order)
Set the node ordering.
Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
const APInt & getAPIntValue() const
bool use_empty() const
Return true if there are no nodes using value ResNo of Node.
bool isNegative() const
Return true if the value is negative.
static bool classof(const SDNode *N)
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
static bool classof(const SDNode *N)
SDNode * getNode() const
get the SDNode which holds the desired result
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
size_t use_size() const
Return the number of uses of this node.
const SDValue & getOffset() const
bool isIndexScaled() const
@ LIFETIME_START
This corresponds to the llvm.lifetime.
static constexpr size_t getMaxNumOperands()
Return the maximum number of operands that a SDNode can hold.
void setAllowContract(bool b)
void intersectFlagsWith(const SDNodeFlags Flags)
Clear any flags in this node that aren't also set in Flags.
bool atEnd() const
Return true if this iterator is at the end of uses list.
bool isUndef() const
Return true if the type of the node type undefined.
bool isTargetStrictFPOpcode() const
Test if this node has a target-specific opcode that may raise FP exceptions (in the <target>ISD names...
int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, uint32_t BitWidth) const
If this is a constant FP splat and the splatted constant FP is an exact power or 2,...
int64_t getOffset() const
static bool isEqual(const SDValue &LHS, const SDValue &RHS)
iterator_range< value_op_iterator > op_values() const
bool isCompareAndSwap() const
Returns true if this SDNode represents cmpxchg atomic operation, false otherwise.
const SDValue & getOffset() const
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID for this memory operation.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
bool isMinSignedConstant(SDValue V)
Returns true if V is a constant min signed integer value.
AtomicOrdering getMergedOrdering() const
Return a single atomic ordering that is at least as strong as both the success and failure orderings ...
static bool classof(const SDNode *N)
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
@ TargetIndex
TargetIndex - Like a constant pool entry, but with completely target-dependent semantics.
An SDNode that represents everything that will be needed to construct a MachineInstr.
static bool classof(const SDNode *N)
SDNodeFlags()
Default constructor turns off all optimization flags.
bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
Reg
All possible values of the reg field in the ModR/M byte.
static bool classof(const SDNode *N)
pointer operator->() const
static bool classof(const SDNode *N)
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
mmo_iterator memoperands_begin() const
bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
This class is used to form a handle around another node that is persistent and is updated across invo...
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
EVT getMemoryVT() const
Return the type of the in-memory value.
const SDValue & getChain() const
static bool classof(const SDNode *N)
Represents one node in the SelectionDAG.
unsigned getTargetFlags() const
size_t operator-(SDNodeIterator Other) const
This class is used to represent ISD::LOAD nodes.
static bool classof(const SDNode *N)
This class provides iterator support for SDUse operands that use a specific SDNode.
static bool classof(const SDNode *N)
const SDValue & getMask() const
bool memoperands_empty() const
Abstract base class for all machine specific constantpool value subclasses.
const SDValue & getOffset() const
static bool classof(const SDNode *N)
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
const SDValue & getBasePtr() const
The instances of the Type class are immutable: once they are created, they are never changed.
const SDValue & getBasePtr() const
A description of a memory reference used in the backend.
CRTP base class for adapting an iterator to a different type.
const SDValue & getBasePtr() const
MaskedLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, ISD::MemIndexedMode AM, ISD::LoadExtType ETy, bool IsExpanding, EVT MemVT, MachineMemOperand *MMO)
AlignedCharArrayUnion< AtomicSDNode, TargetIndexSDNode, BlockAddressSDNode, GlobalAddressSDNode, PseudoProbeSDNode > LargestSDNode
A representation of the largest SDNode, for use in sizeof().
This class is used to represent an MSTORE node.
bool isTargetOpcode() const
static Optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
MCSymbol * getLabel() const
static bool classof(const SDNode *N)
bool isPredecessorOf(const SDNode *N) const
Return true if this node is a predecessor of N.
MemSDNodeBitfields MemSDNodeBits
This is an abstract virtual class for memory operations.
VPLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, ISD::MemIndexedMode AM, ISD::LoadExtType ETy, bool isExpanding, EVT MemVT, MachineMemOperand *MMO)
void setAllowReassociation(bool b)
void printrFull(raw_ostream &O, const SelectionDAG *G=nullptr) const
Print a SelectionDAG node and all children down to the leaves.
unsigned getOperand() const
bool isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are 0 o...
bool isExpandingLoad() const
static bool classof(const SDNode *N)
static SimpleType getSimplifiedValue(const SDValue &Val)
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
static bool classof(const SDNode *N)
void printr(raw_ostream &OS, const SelectionDAG *G=nullptr) const
LLVM_NODISCARD T pop_back_val()
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
bool hasNoNaNs() const
Test if this operation's arguments and results are assumed not-NaN.
std::forward_iterator_tag iterator_category
This is a base class used to represent MGATHER and MSCATTER nodes.
alloca< 16 x float >, align 16 %tmp2=alloca< 16 x float >, align 16 store< 16 x float > %A,< 16 x float > *%tmp %s=bitcast< 16 x float > *%tmp to i8 *%s2=bitcast< 16 x float > *%tmp2 to i8 *call void @llvm.memcpy.i64(i8 *%s, i8 *%s2, i64 64, i32 16) %R=load< 16 x float > *%tmp2 ret< 16 x float > %R } declare void @llvm.memcpy.i64(i8 *nocapture, i8 *nocapture, i64, i32) nounwind which compiles to:_foo:subl $140, %esp movaps %xmm3, 112(%esp) movaps %xmm2, 96(%esp) movaps %xmm1, 80(%esp) movaps %xmm0, 64(%esp) movl 60(%esp), %eax movl %eax, 124(%esp) movl 56(%esp), %eax movl %eax, 120(%esp) movl 52(%esp), %eax< many many more 32-bit copies > movaps(%esp), %xmm0 movaps 16(%esp), %xmm1 movaps 32(%esp), %xmm2 movaps 48(%esp), %xmm3 addl $140, %esp ret On Nehalem, it may even be cheaper to just use movups when unaligned than to fall back to lower-granularity chunks. Implement processor-specific optimizations for parity with GCC on these processors. GCC does two optimizations:1. ix86_pad_returns inserts a noop before ret instructions if immediately preceded by a conditional branch or is the target of a jump. 2. ix86_avoid_jump_misspredicts inserts noops in cases where a 16-byte block of code contains more than 3 branches. The first one is done for all AMDs, Core2, and "Generic" The second one is done for:Atom, Pentium Pro, all AMDs, Pentium 4, Nocona, Core 2, and "Generic" Testcase:int x(int a) { return(a &0xf0)> >4 tmp
const SDValue & getValue() const
std::ptrdiff_t difference_type
SDNode * operator*() const
Retrieve a pointer to the current user node.
std::forward_iterator_tag iterator_category
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
bool isUnordered() const
Returns true if this memory operation doesn't have any ordering constraints other than normal aliasin...
const_pointer const_iterator
static ChildIteratorType child_end(NodeRef N)
char RawSDNodeBits[sizeof(uint16_t)]
This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
T get() const
Returns the value of the specified pointer type.
the resulting code requires compare and branches when and if * p
const SDValue & get() const
If implicit conversion to SDValue doesn't work, the get() method returns the SDValue.
bool isTruncatingStore() const
Return true if the op does a truncation before store.
const SDValue & getValue() const
const ConstantInt * getConstantIntValue() const
const SDValue & getVectorLength() const
unsigned getResNo() const
Convenience function for get().getResNo().
SDVTList getVTList() const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX)
static bool classof(const SDNode *N)
bool isUnordered() const
Returns true if the memory operation doesn't imply any ordering constraints on surrounding memory ope...
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
bool hasNoInfs() const
Test if this operation's arguments and results are assumed not-infinite.
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
bool isNaN() const
Return true if the value is a NaN.
LLVM Basic Block Representation.
@ PSEUDO_PROBE
Pseudo probe for AutoFDO, as a place holder in a basic block to improve the sample counts quality.
static SDNodeIterator begin(const SDNode *N)
value_iterator value_begin() const
void setApproximateFuncs(bool b)
const SDValue & getPassThru() const
const SDValue & getPassThru() const
static bool isValueValidForType(EVT VT, const APFloat &Val)
op_iterator op_end() const
void copyFMF(const FPMathOperator &FPMO)
Propagate the fast-math-flags from an IR FPMathOperator.
This base class is used to represent MLOAD and MSTORE nodes.
SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
Create an SDNode.
const MachinePointerInfo & getPointerInfo() const
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
VPStridedStoreSDNode(unsigned Order, const DebugLoc &DL, SDVTList VTs, ISD::MemIndexedMode AM, bool IsTrunc, bool IsCompressing, EVT MemVT, MachineMemOperand *MMO)
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
int64_t getOffset() const
const SDValue & getValue() const
This is the shared class of boolean and integer constants.
An information struct used to provide DenseMap with the various necessary components for a given valu...
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LoadSDNodeBitfields LoadSDNodeBits
void setTruncatingStore(bool Truncating)
unsigned getTargetFlags() const
SDNodeBitfields SDNodeBits
EVT getValueType() const
Return the ValueType of the referenced return value.
bool operator!=(const SDValue &O) const
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Align getBaseAlign() const
Return the minimum known alignment in bytes of the base address, without the offset.
static bool classof(const SDNode *N)
bool hasOneUse() const
Return true if there is exactly one use of this node.
static bool areOnlyUsersOf(ArrayRef< const SDNode * > Nodes, const SDNode *N)
Return true if all the users of N are contained in Nodes.
bool isBuildVectorOfConstantFPSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantFPSDNode or undef.
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
static bool classof(const SDNode *N)
void setFlags(SDNodeFlags NewFlags)
bool isDereferenceable() const
bool operator==(const SDValue &O) const
SDNodeIterator operator++(int)
bool isStrictFPOpcode()
Test if this node is a strict floating point pseudo-op.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
AtomicOrdering getFailureOrdering() const
For cmpxchg atomic operations, return the atomic ordering requirements when store does not occur.
static bool classof(const SDNode *N)
MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl, SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT, MachineMemOperand *MMO)
MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
static bool classof(const SDNode *N)
const SDValue & getVectorLength() const
unsigned getIROrder() const
the resulting code requires compare and branches when and if the revised code is with conditional branches instead of More there is a byte word extend before each where there should be only and the condition codes are not remembered when the same two values are compared twice More LSR enhancements i8 and i32 load store addressing modes are identical int b
unsigned getRawSubclassData() const
Return the SubclassData value, without HasDebugValue.
void dumprWithDepth(const SelectionDAG *G=nullptr, unsigned depth=100) const
printrWithDepth to dbgs().
static bool classof(const SDNode *N)
MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
This class is used to represent an VP_SCATTER node.
static bool classof(const SDNode *N)
Define a template that can be specialized by smart pointers to reflect the fact that they are automat...
const Value * getValue() const
Return the contained Value.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
bool isUNINDEXEDLoad(const SDNode *N)
Returns true if the specified node is an unindexed load.
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
iterator_range< use_iterator > uses()
use_iterator & operator++()
const SDValue & getOffset() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
@ MDNODE_SDNODE
MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to reference metadata in the IR.
static bool classof(const SDNode *N)
ArrayRef< int > getMask() const
SDValue peekThroughOneUseBitcasts(SDValue V)
Return the non-bitcasted and one-use source operand of V if it exists.
bool operator==(const SDNodeIterator &x) const
SDValue getSplatValue(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted value or a null value if this is not a splat.
bool use_empty() const
Return true if there are no uses of this node.
static unsigned getHashValue(const SDValue &Val)
bool isTargetOpcode() const
Test if this node has a target-specific opcode (in the <target>ISD namespace).
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
const SDValue & getBasePtr() const
bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
This class implements an extremely fast bulk output stream that can only output to a stream.
ConstantFP - Floating Point Values [float, double].
const SDValue & getValue() const
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
bool isMinSignedValue() const
MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ETy)
@ AssertAlign
AssertAlign - These nodes record if a register contains a value that has a known alignment and the tr...
value_op_iterator(SDUse *U=nullptr)
static bool classof(const SDNode *N)
@ SMULO
Same for multiplication.
const SDValue & getVectorLength() const
bool isNonTemporal() const
@ TargetGlobalAddress
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
static bool classof(const SDNode *N)
const GlobalValue * getGlobal() const
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
bool hasNoSignedZeros() const
Test if this operation can ignore the sign of zero.
This struct is a compact representation of a valid (non-zero power of two) alignment.
static SDValue getEmptyKey()
uint64_t getConstantOperandVal(unsigned i) const
therefore end up llgh r3 lr r0 br r14 but truncating the load would lh r3 br r14 Functions ret i64 and ought to be implemented ngr r0 br r14 but two address optimizations reverse the order of the AND and force
the resulting code requires compare and branches when and if the revised code is with conditional branches instead of More there is a byte word extend before each where there should be only and the condition codes are not remembered when the same two values are compared twice More LSR enhancements i8 and i32 load store addressing modes are identical int int c
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOffset() const
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID for this memory operation.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
bool isTruncatingStore() const
Return true if the op does a truncation before store.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
unsigned getTargetFlags() const
void setDebugLoc(DebugLoc dl)
Set source location info.
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
ArrayRef< MachineMemOperand * > memoperands() const
Iterator for directly iterating over the operand SDValue's.
static bool classof(const SDNode *N)
SDNode * getGluedUser() const
If this node has a glue value with a user, return the user (there is at most one).
MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexType IndexType, bool IsTrunc)
DEMANGLE_DUMP_METHOD void dump() const
const SDValue & getOffset() const
SDNodeIterator & operator++()
void checkForCycles(const SelectionDAG *DAG, bool force=false)
An SDNode that records if a register contains a value that is guaranteed to be aligned accordingly.
AtomicOrdering
Atomic ordering for LLVM's memory model.
bool matchUnaryPredicate(SDValue Op, std::function< bool(ConstantSDNode *)> Match, bool AllowUndefs=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant BUI...
const SDValue & getValue() const
The initial backend is deliberately restricted to z10 We should add support for later architectures at some point If an asm ties an i32 r result to an i64 the input will be treated as an leaving the upper bits uninitialised For i64 store i32 val
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
bool isVPOpcode(unsigned Opcode)
Whether this is a vector-predicated Opcode.
unsigned getNumOperands() const
bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
SDUse * getNext() const
Get the next SDUse in the use list.
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
This is an important base class in LLVM.
ConstantFPSDNode * getConstantFPSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant FP or null if this is not a constant FP splat.
static bool classof(const SDNode *N)
const SDValue & getScale() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
void refineAlignment(const MachineMemOperand *NewMMO)
Update this MemSDNode's MachineMemOperand information to reflect the alignment of NewMMO,...
static bool classof(const SDNode *N)
This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate the offet and size that ar...
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
static const int FIRST_TARGET_STRICTFP_OPCODE
FIRST_TARGET_STRICTFP_OPCODE - Target-specific pre-isel operations which cannot raise FP exceptions s...
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
EVT getValueType() const
Convenience function for get().getValueType().
void print_details(raw_ostream &OS, const SelectionDAG *G) const
bool hasPredecessor(const SDNode *N) const
Return true if N is a predecessor of this node.
bool isTargetMemoryOpcode() const
@ SRCVALUE
SRCVALUE - This is a node type that holds a Value* that is used to make reference to a value in the L...
This class is used to represent a VP_LOAD node.
bool isTruncatingStore() const
Return true if the op does a truncation before store.
MaybeAlign getMaybeAlignValue() const
bool hasAllowReciprocal() const
Test if this operation can use reciprocal multiply instead of division.
This class contains a discriminated union of information about pointers in memory operands,...
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
BuildVectorSDNode()=delete
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
const SDValue & getOffset() const
void setNodeId(int Id)
Set unique node id.
const SDValue & getMask() const
const DebugLoc & getDebugLoc() const
bool isIndexScaled() const
const SDValue & getOperand(unsigned Num) const
MaskedStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, ISD::MemIndexedMode AM, bool isTrunc, bool isCompressing, EVT MemVT, MachineMemOperand *MMO)
bool isExpandingLoad() const
pointer operator*() const
void setAllowReciprocal(bool b)
static void recastRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits, SmallVectorImpl< APInt > &DstBitElements, ArrayRef< APInt > SrcBitElements, BitVector &DstUndefElements, const BitVector &SrcUndefElements)
Recast bit data SrcBitElements to DstEltSizeInBits wide elements.
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper node
std::ptrdiff_t difference_type
ISD::LoadExtType getExtensionType() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
void dump() const
Dump this node, for debugging.
Align getOriginalAlign() const
Returns alignment and volatility of the memory access.
ArrayRef< MachineMemOperand * >::const_iterator mmo_iterator
uint64_t getScalarValueSizeInBits() const
ConstantSDNodeBitfields ConstantSDNodeBits
#define END_TWO_BYTE_PACK()
int64_t getOffset() const
unsigned getTargetFlags() const
static bool classof(const SDNode *N)
This class is used to represent ISD::STORE nodes.
SDValue getValue(unsigned R) const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool hasNoUnsignedWrap() const
bool hasAllowReassoc() const
Test if this operation may be simplified with reassociative transforms.
Utility class for floating point operations which can have information about relaxed accuracy require...
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
int64_t getOffset() const
int getNodeId() const
Return the unique node id.
SDLoc(const Instruction *I, int Order)
<%struct.s * > cast struct s *S to sbyte *< sbyte * > sbyte uint cast struct s *agg result to sbyte *< sbyte * > sbyte uint cast struct s *memtmp to sbyte *< sbyte * > sbyte uint ret void llc ends up issuing two memcpy or custom lower memcpy(of small size) to be ldmia/stmia. I think option 2 is better but the current register allocator cannot allocate a chunk of registers at a time. A feasible temporary solution is to use specific physical registers at the lowering time for small(<
uint64_t getZExtValue() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
void print(raw_ostream &OS, const SelectionDAG *G=nullptr) const
This class is used to represent an MLOAD node.
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
const SDValue & getOffset() const
@ BasicBlock
Various leaf nodes.
print Print MemDeps of function
MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT memvt, MachineMemOperand *MMO)
unsigned getTargetFlags() const
static SDValue getTombstoneKey()
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
bool operator==(const use_iterator &x) const
void dumpr() const
Dump (recursively) this node and its use-def subgraph.
ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
const SDValue & getValue() const
TypeSize getValueSizeInBits(unsigned ResNo) const
Returns MVT::getSizeInBits(getValueType(ResNo)).
AssertAlignSDNode(unsigned Order, const DebugLoc &DL, EVT VT, Align A)
bool hasTrivialDestructor() const
Check whether this has a trivial destructor.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
bool hasApproxFunc() const
Test if this operation allows approximations of math library functions or intrinsics.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
iterator_range< value_iterator > values() const
Class for arbitrary precision integers.
const SDValue & getBasePtr() const
@ ANNOTATION_LABEL
ANNOTATION_LABEL - Represents a mid basic block label used by annotations.
@ HANDLENODE
HANDLENODE node - Used as a handle for various purposes.
bool hasSingleElement(ContainerTy &&C)
Returns true if the given container only contains a single element.
LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl, SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT, MachineMemOperand *MMO)
MIPS Relocation Principles In there are several elements of the llvm::ISD::NodeType enum that deal with addresses and or relocations These are defined in include llvm Target TargetSelectionDAG td ConstantPool
SDNode * getUser()
This returns the SDNode that contains this Use.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
The address of a basic block.
SDNode * operator->() const
bool isAtomic() const
Return true if the memory operation ordering is Unordered or higher.
const SDValue & getStride() const
int64_t getFrameIndex() const
void print_types(raw_ostream &OS, const SelectionDAG *G) const
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
const BlockAddress * getBlockAddress() const
static bool classof(const SDNode *N)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
static use_iterator use_end()
SmallVector< MachineOperand, 4 > Cond
void intersectWith(const SDNodeFlags Flags)
Clear any flags in this flag set that aren't also set in Flags.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
Node - This class is used to maintain the singly linked bucket list in a folding set.
void setNoFPExcept(bool b)
bool hasApproximateFuncs() const
int getMaskElt(unsigned Idx) const
iterator_range< use_iterator > uses() const
const Constant * getConstVal() const
unsigned getMachineOpcode() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This class is used to represent an VP_GATHER node.
bool isIndexSigned() const
MachineBasicBlock * getBasicBlock() const
uint64_t getIndex() const
use_iterator operator++(int)
bool isOperandOf(const SDNode *N) const
Return true if this node is an operand of N.
const SDValue & getBasePtr() const
const uint32_t * getRegMask() const
const SDValue & getOperand(unsigned i) const
LSBaseSDNodeBitfields LSBaseSDNodeBits
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define BEGIN_TWO_BYTE_PACK()
This class is used to represent EVT's, which are used to parameterize some operations.
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
static const char * getIndexedModeName(ISD::MemIndexedMode AM)
FoldingSetNodeID - This class is used to gather all the unique data bits of a node.
int64_t getSExtValue() const
@ UNDEF
UNDEF - An undefined node.
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
const SDValue & getBasePtr() const
static bool classof(const SDNode *N)
bool isIndexTypeSigned(MemIndexType IndexType)
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
ArrayRef< SDUse > ops() const
constexpr char IsVolatile[]
Key for Kernel::Arg::Metadata::mIsVolatile.
SDValue peekThroughExtractSubvectors(SDValue V)
Return the non-extracted vector source operand of V if it exists.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
const SDValue & getMask() const
bool hasAllowContract() const
Test if this operation can be floating-point contracted (FMA).
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
const SDValue & getIndex() const
unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
Wrapper class representing virtual and physical registers.
const SDValue & getBasePtr() const
static bool classof(const SDNode *N)
unsigned getDestAddressSpace() const
std::string getOperationName(const SelectionDAG *G=nullptr) const
Return the opcode of this operation for printing.
bool operator<(const SDValue &V) const
Convenience function for get().operator<.
bool isMemIntrinsic() const
Test if this node is a memory intrinsic (with valid pointer information).
This is an SDNode representing atomic operations.
void dumprFull(const SelectionDAG *G=nullptr) const
printrFull to dbgs().
bool isVPOpcode() const
Test if this node is a vector predication operation.
const SDValue & getMask() const
bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
MIPS Relocation Principles In there are several elements of the llvm::ISD::NodeType enum that deal with addresses and or relocations These are defined in include llvm Target TargetSelectionDAG td ExternalSymbol
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
const SDValue & getScale() const
VPBaseLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &DL, SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT, MachineMemOperand *MMO)
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
void clearMemRefs()
Clear out the memory reference descriptor list.
const ConstantFP * getConstantFPValue() const
bool operator<(const SDValue &O) const
const SDValue & getBasePtr() const
VPGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
bool operator!=(const SDNodeIterator &x) const
VPStridedLoadSDNode(unsigned Order, const DebugLoc &DL, SDVTList VTs, ISD::MemIndexedMode AM, ISD::LoadExtType ETy, bool IsExpanding, EVT MemVT, MachineMemOperand *MMO)
VPScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
unsigned getNumOperands() const
Return the number of values used by this operation.
static NodeRef getEntryNode(SDNode *N)
bool hasNoSignedWrap() const
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
bool isMaxSignedValue() const
static bool classof(const SDNode *N)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
SDValue getBitwiseNotOperand(SDValue V, SDValue Mask, bool AllowUndefs)
If V is a bitwise not, returns the inverted operand.
unsigned getAddressSpace() const
Return the address space for the associated pointer.
ConstantSDNode * getConstantSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant or null if this is not a constant splat.
uint64_t value() const
This is a hole in the type system and should not be abused.
MCSymbol * getMCSymbol() const
const MDNode * getMD() const
unsigned getAlignment() const
void printrWithDepth(raw_ostream &O, const SelectionDAG *G=nullptr, unsigned depth=100) const
Print a SelectionDAG node and children up to depth "depth." The given SelectionDAG allows target-spec...
MIPS Relocation Principles In there are several elements of the llvm::ISD::NodeType enum that deal with addresses and or relocations These are defined in include llvm Target TargetSelectionDAG td JumpTable
bool isInfinity() const
Return true if the value is an infinity.
bool isExpandingLoad() const
Completely target-dependent object reference.
static bool classof(const SDNode *N)
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
MachineMemOperand * MMO
Memory reference information.
const First * getAddrOfPtr1() const
If the union is set to the first pointer type get an address pointing to it.
constexpr unsigned BitWidth
bool operator!=(const use_iterator &x) const
const Constant * ConstVal
mmo_iterator memoperands_end() const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * operator->() const
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
void addUse(SDUse &U)
This method should only be used by the SDUse class.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
bool operator!=(const SDValue &V) const
Convenience function for get().operator!=.
ISD::LoadExtType getExtensionType() const
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
An SDNode that holds an arbitrary LLVM IR Value.
static bool classof(const SDNode *N)
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
static bool classof(const SDNode *N)
These are IR-level optimization flags that may be propagated to SDNodes.
const DebugLoc & getDebugLoc() const
Return the source location info.
AtomicOrdering getMergedOrdering() const
Return a single atomic ordering that is at least as strong as both the success and failure orderings ...
void refineAlignment(const MachineMemOperand *MMO)
Update this MachineMemOperand to reflect the alignment of MMO, if it has a greater alignment.
This is a base class used to represent VP_GATHER and VP_SCATTER nodes.
bool hasAllowReassociation() const
@ SSUBO
Same for subtraction.
bool isSimple() const
Returns true if the memory operation is neither atomic or volatile.
const SDValue & getVal() const
const char * getSymbol() const
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
uint16_t PersistentId
Unique and persistent id per SDNode in the DAG.
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
const SDValue & getMask() const
Definition