99void SelectionDAG::DAGNodeDeletedListener::anchor() {}
100void SelectionDAG::DAGNodeInsertedListener::anchor() {}
102#define DEBUG_TYPE "selectiondag"
106 cl::desc(
"Gang up loads and stores generated by inlining of memcpy"));
109 cl::desc(
"Number limit for gluing ld/st of memcpy."),
125 return getValueAPF().bitwiseIsEqual(V);
148 N->getValueType(0).getVectorElementType().getSizeInBits();
149 if (
auto *Op0 = dyn_cast<ConstantSDNode>(
N->getOperand(0))) {
150 SplatVal = Op0->getAPIntValue().
trunc(EltSize);
153 if (
auto *Op0 = dyn_cast<ConstantFPSDNode>(
N->getOperand(0))) {
154 SplatVal = Op0->getValueAPF().bitcastToAPInt().
trunc(EltSize);
159 auto *BV = dyn_cast<BuildVectorSDNode>(
N);
164 unsigned SplatBitSize;
166 unsigned EltSize =
N->getValueType(0).getVectorElementType().getSizeInBits();
171 const bool IsBigEndian =
false;
172 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
173 EltSize, IsBigEndian) &&
174 EltSize == SplatBitSize;
183 N =
N->getOperand(0).getNode();
192 unsigned i = 0, e =
N->getNumOperands();
195 while (i != e &&
N->getOperand(i).isUndef())
199 if (i == e)
return false;
210 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
212 if (CN->getAPIntValue().countr_one() < EltSize)
215 if (CFPN->getValueAPF().bitcastToAPInt().countr_one() < EltSize)
223 for (++i; i != e; ++i)
224 if (
N->getOperand(i) != NotZero && !
N->getOperand(i).isUndef())
232 N =
N->getOperand(0).getNode();
241 bool IsAllUndef =
true;
254 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
256 if (CN->getAPIntValue().countr_zero() < EltSize)
259 if (CFPN->getValueAPF().bitcastToAPInt().countr_zero() < EltSize)
286 if (!isa<ConstantSDNode>(
Op))
299 if (!isa<ConstantFPSDNode>(
Op))
307 assert(
N->getValueType(0).isVector() &&
"Expected a vector!");
309 unsigned EltSize =
N->getValueType(0).getScalarSizeInBits();
310 if (EltSize <= NewEltSize)
314 return (
N->getOperand(0).getValueType().getScalarSizeInBits() <=
319 return (
N->getOperand(0).getValueType().getScalarSizeInBits() <=
329 if (!isa<ConstantSDNode>(
Op))
332 APInt C =
Op->getAsAPIntVal().trunc(EltSize);
333 if (
Signed &&
C.trunc(NewEltSize).sext(EltSize) !=
C)
335 if (!
Signed &&
C.trunc(NewEltSize).zext(EltSize) !=
C)
346 if (
N->getNumOperands() == 0)
352 return N->getOpcode() ==
ISD::FREEZE &&
N->getOperand(0).isUndef();
355template <
typename ConstNodeType>
357 std::function<
bool(ConstNodeType *)>
Match,
360 if (
auto *
C = dyn_cast<ConstNodeType>(
Op))
368 EVT SVT =
Op.getValueType().getScalarType();
370 if (AllowUndefs &&
Op.getOperand(i).isUndef()) {
376 auto *Cst = dyn_cast<ConstNodeType>(
Op.getOperand(i));
377 if (!Cst || Cst->getValueType(0) != SVT || !
Match(Cst))
383template bool ISD::matchUnaryPredicateImpl<ConstantSDNode>(
385template bool ISD::matchUnaryPredicateImpl<ConstantFPSDNode>(
391 bool AllowUndefs,
bool AllowTypeMismatch) {
392 if (!AllowTypeMismatch &&
LHS.getValueType() !=
RHS.getValueType())
396 if (
auto *LHSCst = dyn_cast<ConstantSDNode>(
LHS))
397 if (
auto *RHSCst = dyn_cast<ConstantSDNode>(
RHS))
398 return Match(LHSCst, RHSCst);
401 if (
LHS.getOpcode() !=
RHS.getOpcode() ||
406 EVT SVT =
LHS.getValueType().getScalarType();
407 for (
unsigned i = 0, e =
LHS.getNumOperands(); i != e; ++i) {
410 bool LHSUndef = AllowUndefs && LHSOp.
isUndef();
411 bool RHSUndef = AllowUndefs && RHSOp.
isUndef();
412 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
413 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
414 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
416 if (!AllowTypeMismatch && (LHSOp.
getValueType() != SVT ||
419 if (!
Match(LHSCst, RHSCst))
426 switch (VecReduceOpcode) {
431 case ISD::VP_REDUCE_FADD:
432 case ISD::VP_REDUCE_SEQ_FADD:
436 case ISD::VP_REDUCE_FMUL:
437 case ISD::VP_REDUCE_SEQ_FMUL:
440 case ISD::VP_REDUCE_ADD:
443 case ISD::VP_REDUCE_MUL:
446 case ISD::VP_REDUCE_AND:
449 case ISD::VP_REDUCE_OR:
452 case ISD::VP_REDUCE_XOR:
455 case ISD::VP_REDUCE_SMAX:
458 case ISD::VP_REDUCE_SMIN:
461 case ISD::VP_REDUCE_UMAX:
464 case ISD::VP_REDUCE_UMIN:
467 case ISD::VP_REDUCE_FMAX:
470 case ISD::VP_REDUCE_FMIN:
483#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) \
486#include "llvm/IR/VPIntrinsics.def"
494#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) case ISD::VPSD:
495#define VP_PROPERTY_BINARYOP return true;
496#define END_REGISTER_VP_SDNODE(VPSD) break;
497#include "llvm/IR/VPIntrinsics.def"
506#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) case ISD::VPSD:
507#define VP_PROPERTY_REDUCTION(STARTPOS, ...) return true;
508#define END_REGISTER_VP_SDNODE(VPSD) break;
509#include "llvm/IR/VPIntrinsics.def"
519#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, ...) \
522#include "llvm/IR/VPIntrinsics.def"
531#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \
534#include "llvm/IR/VPIntrinsics.def"
544#define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) case ISD::VPOPC:
545#define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) return ISD::SDOPC;
546#define END_REGISTER_VP_SDNODE(VPOPC) break;
547#include "llvm/IR/VPIntrinsics.def"
556#define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) break;
557#define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) case ISD::SDOPC:
558#define END_REGISTER_VP_SDNODE(VPOPC) return ISD::VPOPC;
559#include "llvm/IR/VPIntrinsics.def"
606 bool isIntegerLike) {
631 bool IsInteger =
Type.isInteger();
636 unsigned Op = Op1 | Op2;
652 bool IsInteger =
Type.isInteger();
687 ID.AddPointer(VTList.
VTs);
693 for (
const auto &
Op : Ops) {
694 ID.AddPointer(
Op.getNode());
695 ID.AddInteger(
Op.getResNo());
702 for (
const auto &
Op : Ops) {
703 ID.AddPointer(
Op.getNode());
704 ID.AddInteger(
Op.getResNo());
717 switch (
N->getOpcode()) {
726 ID.AddPointer(
C->getConstantIntValue());
727 ID.AddBoolean(
C->isOpaque());
732 ID.AddPointer(cast<ConstantFPSDNode>(
N)->getConstantFPValue());
748 ID.AddInteger(cast<RegisterSDNode>(
N)->
getReg());
751 ID.AddPointer(cast<RegisterMaskSDNode>(
N)->getRegMask());
754 ID.AddPointer(cast<SrcValueSDNode>(
N)->getValue());
758 ID.AddInteger(cast<FrameIndexSDNode>(
N)->getIndex());
762 if (cast<LifetimeSDNode>(
N)->hasOffset()) {
763 ID.AddInteger(cast<LifetimeSDNode>(
N)->
getSize());
768 ID.AddInteger(cast<PseudoProbeSDNode>(
N)->getGuid());
769 ID.AddInteger(cast<PseudoProbeSDNode>(
N)->getIndex());
770 ID.AddInteger(cast<PseudoProbeSDNode>(
N)->getAttributes());
774 ID.AddInteger(cast<JumpTableSDNode>(
N)->getIndex());
775 ID.AddInteger(cast<JumpTableSDNode>(
N)->getTargetFlags());
780 ID.AddInteger(CP->getAlign().value());
781 ID.AddInteger(CP->getOffset());
782 if (CP->isMachineConstantPoolEntry())
783 CP->getMachineCPVal()->addSelectionDAGCSEId(
ID);
785 ID.AddPointer(CP->getConstVal());
786 ID.AddInteger(CP->getTargetFlags());
798 ID.AddInteger(LD->getMemoryVT().getRawBits());
799 ID.AddInteger(LD->getRawSubclassData());
800 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
801 ID.AddInteger(LD->getMemOperand()->getFlags());
806 ID.AddInteger(ST->getMemoryVT().getRawBits());
807 ID.AddInteger(ST->getRawSubclassData());
808 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
809 ID.AddInteger(ST->getMemOperand()->getFlags());
820 case ISD::VP_STORE: {
828 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD: {
835 case ISD::EXPERIMENTAL_VP_STRIDED_STORE: {
842 case ISD::VP_GATHER: {
850 case ISD::VP_SCATTER: {
939 if (
auto *MN = dyn_cast<MemIntrinsicSDNode>(
N)) {
940 ID.AddInteger(MN->getRawSubclassData());
941 ID.AddInteger(MN->getPointerInfo().getAddrSpace());
942 ID.AddInteger(MN->getMemOperand()->getFlags());
943 ID.AddInteger(MN->getMemoryVT().getRawBits());
966 if (
N->getValueType(0) == MVT::Glue)
969 switch (
N->getOpcode()) {
977 for (
unsigned i = 1, e =
N->getNumValues(); i != e; ++i)
978 if (
N->getValueType(i) == MVT::Glue)
995 if (Node.use_empty())
1010 while (!DeadNodes.
empty()) {
1019 DUL->NodeDeleted(
N,
nullptr);
1022 RemoveNodeFromCSEMaps(
N);
1053 RemoveNodeFromCSEMaps(
N);
1057 DeleteNodeNotInCSEMaps(
N);
1060void SelectionDAG::DeleteNodeNotInCSEMaps(
SDNode *
N) {
1061 assert(
N->getIterator() != AllNodes.begin() &&
1062 "Cannot delete the entry node!");
1063 assert(
N->use_empty() &&
"Cannot delete a node that is not dead!");
1072 assert(!(V->isVariadic() && isParameter));
1074 ByvalParmDbgValues.push_back(V);
1076 DbgValues.push_back(V);
1077 for (
const SDNode *Node : V->getSDNodes())
1079 DbgValMap[Node].push_back(V);
1084 if (
I == DbgValMap.end())
1086 for (
auto &Val:
I->second)
1087 Val->setIsInvalidated();
1091void SelectionDAG::DeallocateNode(
SDNode *
N) {
1115 switch (
N->getOpcode()) {
1119 EVT VT =
N->getValueType(0);
1120 assert(
N->getNumValues() == 1 &&
"Too many results!");
1122 "Wrong return type!");
1123 assert(
N->getNumOperands() == 2 &&
"Wrong number of operands!");
1124 assert(
N->getOperand(0).getValueType() ==
N->getOperand(1).getValueType() &&
1125 "Mismatched operand types!");
1127 "Wrong operand type!");
1129 "Wrong return type size");
1133 assert(
N->getNumValues() == 1 &&
"Too many results!");
1134 assert(
N->getValueType(0).isVector() &&
"Wrong return type!");
1135 assert(
N->getNumOperands() ==
N->getValueType(0).getVectorNumElements() &&
1136 "Wrong number of operands!");
1137 EVT EltVT =
N->getValueType(0).getVectorElementType();
1139 assert((
Op.getValueType() == EltVT ||
1140 (EltVT.
isInteger() &&
Op.getValueType().isInteger() &&
1141 EltVT.
bitsLE(
Op.getValueType()))) &&
1142 "Wrong operand type!");
1143 assert(
Op.getValueType() ==
N->getOperand(0).getValueType() &&
1144 "Operands must all have the same type");
1156void SelectionDAG::InsertNode(
SDNode *
N) {
1157 AllNodes.push_back(
N);
1159 N->PersistentId = NextPersistentId++;
1162 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1163 DUL->NodeInserted(
N);
1170bool SelectionDAG::RemoveNodeFromCSEMaps(
SDNode *
N) {
1171 bool Erased =
false;
1172 switch (
N->getOpcode()) {
1175 assert(CondCodeNodes[cast<CondCodeSDNode>(
N)->
get()] &&
1176 "Cond code doesn't exist!");
1177 Erased = CondCodeNodes[cast<CondCodeSDNode>(
N)->get()] !=
nullptr;
1178 CondCodeNodes[cast<CondCodeSDNode>(
N)->get()] =
nullptr;
1181 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(
N)->getSymbol());
1185 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>(
1190 auto *MCSN = cast<MCSymbolSDNode>(
N);
1191 Erased = MCSymbols.erase(MCSN->getMCSymbol());
1195 EVT VT = cast<VTSDNode>(
N)->getVT();
1197 Erased = ExtendedValueTypeNodes.erase(VT);
1208 Erased = CSEMap.RemoveNode(
N);
1215 if (!Erased &&
N->getValueType(
N->getNumValues()-1) != MVT::Glue &&
1230SelectionDAG::AddModifiedNodeToCSEMaps(
SDNode *
N) {
1234 SDNode *Existing = CSEMap.GetOrInsertNode(
N);
1235 if (Existing !=
N) {
1242 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1243 DUL->NodeDeleted(
N, Existing);
1244 DeleteNodeNotInCSEMaps(
N);
1250 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1251 DUL->NodeUpdated(
N);
1269 Node->intersectFlagsWith(
N->getFlags());
1289 Node->intersectFlagsWith(
N->getFlags());
1307 Node->intersectFlagsWith(
N->getFlags());
1320 : TM(tm), OptLevel(OL), EntryNode(ISD::EntryToken, 0,
DebugLoc(),
1323 InsertNode(&EntryNode);
1334 SDAGISelPass = PassPtr;
1338 LibInfo = LibraryInfo;
1343 FnVarLocs = VarLocs;
1347 assert(!UpdateListeners &&
"Dangling registered DAGUpdateListeners");
1349 OperandRecycler.clear(OperandAllocator);
1358void SelectionDAG::allnodes_clear() {
1359 assert(&*AllNodes.begin() == &EntryNode);
1360 AllNodes.remove(AllNodes.begin());
1361 while (!AllNodes.empty())
1362 DeallocateNode(&AllNodes.front());
1364 NextPersistentId = 0;
1370 SDNode *
N = CSEMap.FindNodeOrInsertPos(
ID, InsertPos);
1372 switch (
N->getOpcode()) {
1377 "debug location. Use another overload.");
1384 const SDLoc &
DL,
void *&InsertPos) {
1385 SDNode *
N = CSEMap.FindNodeOrInsertPos(
ID, InsertPos);
1387 switch (
N->getOpcode()) {
1393 if (
N->getDebugLoc() !=
DL.getDebugLoc())
1400 if (
DL.getIROrder() &&
DL.getIROrder() <
N->getIROrder())
1401 N->setDebugLoc(
DL.getDebugLoc());
1410 OperandRecycler.clear(OperandAllocator);
1411 OperandAllocator.
Reset();
1414 ExtendedValueTypeNodes.clear();
1415 ExternalSymbols.clear();
1416 TargetExternalSymbols.clear();
1419 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
1421 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
1422 static_cast<SDNode*
>(
nullptr));
1424 EntryNode.UseList =
nullptr;
1425 InsertNode(&EntryNode);
1431 return VT.
bitsGT(
Op.getValueType())
1437std::pair<SDValue, SDValue>
1441 "Strict no-op FP extend/round not allowed.");
1448 return std::pair<SDValue, SDValue>(Res,
SDValue(Res.
getNode(), 1));
1452 return VT.
bitsGT(
Op.getValueType()) ?
1458 return VT.
bitsGT(
Op.getValueType()) ?
1464 return VT.
bitsGT(
Op.getValueType()) ?
1472 auto Type =
Op.getValueType();
1476 auto Size =
Op.getValueSizeInBits();
1487 auto Type =
Op.getValueType();
1491 auto Size =
Op.getValueSizeInBits();
1502 auto Type =
Op.getValueType();
1506 auto Size =
Op.getValueSizeInBits();
1524 EVT OpVT =
Op.getValueType();
1526 "Cannot getZeroExtendInReg FP types");
1528 "getZeroExtendInReg type should be vector iff the operand "
1532 "Vector element counts must match in getZeroExtendInReg");
1570 return getNode(ISD::VP_XOR,
DL, VT, Val, TrueValue, Mask, EVL);
1581 return getNode(ISD::VP_ZERO_EXTEND,
DL, VT,
Op, Mask, EVL);
1583 return getNode(ISD::VP_TRUNCATE,
DL, VT,
Op, Mask, EVL);
1603 bool isT,
bool isO) {
1607 "getConstant with a uint64_t value that doesn't fit in the type!");
1612 bool isT,
bool isO) {
1613 return getConstant(*ConstantInt::get(*Context, Val),
DL, VT, isT, isO);
1617 EVT VT,
bool isT,
bool isO) {
1635 Elt = ConstantInt::get(*
getContext(), NewVal);
1654 "Can only handle an even split!");
1658 for (
unsigned i = 0; i != Parts; ++i)
1660 NewVal.
extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits),
DL,
1661 ViaEltVT, isT, isO));
1666 unsigned ViaVecNumElts = VT.
getSizeInBits() / ViaEltSizeInBits;
1677 NewVal.
extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits),
DL,
1678 ViaEltVT, isT, isO));
1683 std::reverse(EltParts.
begin(), EltParts.
end());
1702 "APInt size does not match type size!");
1710 if ((
N = FindNodeOrInsertPos(
ID,
DL, IP)))
1715 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT);
1716 CSEMap.InsertNode(
N, IP);
1733 const SDLoc &
DL,
bool LegalTypes) {
1740 const SDLoc &
DL,
bool LegalTypes) {
1756 EVT VT,
bool isTarget) {
1770 if ((
N = FindNodeOrInsertPos(
ID,
DL, IP)))
1775 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT);
1776 CSEMap.InsertNode(
N, IP);
1790 if (EltVT == MVT::f32)
1792 if (EltVT == MVT::f64)
1794 if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1795 EltVT == MVT::f16 || EltVT == MVT::bf16) {
1806 EVT VT, int64_t
Offset,
bool isTargetGA,
1807 unsigned TargetFlags) {
1808 assert((TargetFlags == 0 || isTargetGA) &&
1809 "Cannot set target flags on target-independent globals");
1826 ID.AddInteger(TargetFlags);
1828 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
1831 auto *
N = newSDNode<GlobalAddressSDNode>(
1832 Opc,
DL.getIROrder(),
DL.getDebugLoc(), GV, VT,
Offset, TargetFlags);
1833 CSEMap.InsertNode(
N, IP);
1844 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
1847 auto *
N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget);
1848 CSEMap.InsertNode(
N, IP);
1854 unsigned TargetFlags) {
1855 assert((TargetFlags == 0 || isTarget) &&
1856 "Cannot set target flags on target-independent jump tables");
1861 ID.AddInteger(TargetFlags);
1863 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
1866 auto *
N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags);
1867 CSEMap.InsertNode(
N, IP);
1881 bool isTarget,
unsigned TargetFlags) {
1882 assert((TargetFlags == 0 || isTarget) &&
1883 "Cannot set target flags on target-independent globals");
1891 ID.AddInteger(Alignment->value());
1894 ID.AddInteger(TargetFlags);
1896 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
1899 auto *
N = newSDNode<ConstantPoolSDNode>(isTarget,
C, VT,
Offset, *Alignment,
1901 CSEMap.InsertNode(
N, IP);
1910 bool isTarget,
unsigned TargetFlags) {
1911 assert((TargetFlags == 0 || isTarget) &&
1912 "Cannot set target flags on target-independent globals");
1918 ID.AddInteger(Alignment->value());
1920 C->addSelectionDAGCSEId(
ID);
1921 ID.AddInteger(TargetFlags);
1923 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
1926 auto *
N = newSDNode<ConstantPoolSDNode>(isTarget,
C, VT,
Offset, *Alignment,
1928 CSEMap.InsertNode(
N, IP);
1938 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
1941 auto *
N = newSDNode<BasicBlockSDNode>(
MBB);
1942 CSEMap.InsertNode(
N, IP);
1949 ValueTypeNodes.size())
1956 N = newSDNode<VTSDNode>(VT);
1964 N = newSDNode<ExternalSymbolSDNode>(
false,
Sym, 0, VT);
1973 N = newSDNode<MCSymbolSDNode>(
Sym, VT);
1979 unsigned TargetFlags) {
1981 TargetExternalSymbols[std::pair<std::string, unsigned>(
Sym, TargetFlags)];
1983 N = newSDNode<ExternalSymbolSDNode>(
true,
Sym, TargetFlags, VT);
1989 if ((
unsigned)
Cond >= CondCodeNodes.size())
1990 CondCodeNodes.resize(
Cond+1);
1992 if (!CondCodeNodes[
Cond]) {
1993 auto *
N = newSDNode<CondCodeSDNode>(
Cond);
1994 CondCodeNodes[
Cond] =
N;
2004 "APInt size does not match type size!");
2022 if (EC.isScalable())
2035 const APInt &StepVal) {
2059 "Must have the same number of vector elements as mask elements!");
2061 "Invalid VECTOR_SHUFFLE");
2069 int NElts = Mask.size();
2071 [&](
int M) {
return M < (NElts * 2) && M >= -1; }) &&
2072 "Index out of range");
2080 for (
int i = 0; i != NElts; ++i)
2081 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
2097 for (
int i = 0; i < NElts; ++i) {
2098 if (MaskVec[i] <
Offset || MaskVec[i] >= (
Offset + NElts))
2102 if (UndefElements[MaskVec[i] -
Offset]) {
2108 if (!UndefElements[i])
2112 if (
auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
2113 BlendSplat(N1BV, 0);
2114 if (
auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
2115 BlendSplat(N2BV, NElts);
2120 bool AllLHS =
true, AllRHS =
true;
2122 for (
int i = 0; i != NElts; ++i) {
2123 if (MaskVec[i] >= NElts) {
2128 }
else if (MaskVec[i] >= 0) {
2132 if (AllLHS && AllRHS)
2134 if (AllLHS && !N2Undef)
2147 bool Identity =
true, AllSame =
true;
2148 for (
int i = 0; i != NElts; ++i) {
2149 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity =
false;
2150 if (MaskVec[i] != MaskVec[0]) AllSame =
false;
2152 if (Identity && NElts)
2162 V = V->getOperand(0);
2165 if (
auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
2185 if (AllSame && SameNumElts) {
2186 EVT BuildVT = BV->getValueType(0);
2202 for (
int i = 0; i != NElts; ++i)
2203 ID.AddInteger(MaskVec[i]);
2206 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
2212 int *MaskAlloc = OperandAllocator.
Allocate<
int>(NElts);
2215 auto *
N = newSDNode<ShuffleVectorSDNode>(VT, dl.
getIROrder(),
2217 createOperands(
N, Ops);
2219 CSEMap.InsertNode(
N, IP);
2239 ID.AddInteger(RegNo);
2241 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2244 auto *
N = newSDNode<RegisterSDNode>(RegNo, VT);
2246 CSEMap.InsertNode(
N, IP);
2254 ID.AddPointer(RegMask);
2256 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2259 auto *
N = newSDNode<RegisterMaskSDNode>(RegMask);
2260 CSEMap.InsertNode(
N, IP);
2275 ID.AddPointer(Label);
2277 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2282 createOperands(
N, Ops);
2284 CSEMap.InsertNode(
N, IP);
2290 int64_t
Offset,
bool isTarget,
2291 unsigned TargetFlags) {
2298 ID.AddInteger(TargetFlags);
2300 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2303 auto *
N = newSDNode<BlockAddressSDNode>(Opc, VT, BA,
Offset, TargetFlags);
2304 CSEMap.InsertNode(
N, IP);
2315 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2318 auto *
N = newSDNode<SrcValueSDNode>(V);
2319 CSEMap.InsertNode(
N, IP);
2330 if (
SDNode *E = FindNodeOrInsertPos(
ID, IP))
2333 auto *
N = newSDNode<MDNodeSDNode>(MD);
2334 CSEMap.InsertNode(
N, IP);
2340 if (VT == V.getValueType())
2347 unsigned SrcAS,
unsigned DestAS) {
2351 ID.AddInteger(SrcAS);
2352 ID.AddInteger(DestAS);
2355 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
2360 createOperands(
N, Ops);
2362 CSEMap.InsertNode(
N, IP);
2374 EVT OpTy =
Op.getValueType();
2376 if (OpTy == ShTy || OpTy.
isVector())
return Op;
2384 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2385 EVT VT = Node->getValueType(0);
2386 SDValue Tmp1 = Node->getOperand(0);
2387 SDValue Tmp2 = Node->getOperand(1);
2388 const MaybeAlign MA(Node->getConstantOperandVal(3));
2420 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
2421 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
2432 Align RedAlign = UseABI ?
DL.getABITypeAlign(Ty) :
DL.getPrefTypeAlign(Ty);
2442 if (RedAlign > StackAlign) {
2445 unsigned NumIntermediates;
2447 NumIntermediates, RegisterVT);
2449 Align RedAlign2 = UseABI ?
DL.getABITypeAlign(Ty) :
DL.getPrefTypeAlign(Ty);
2450 if (RedAlign2 < RedAlign)
2451 RedAlign = RedAlign2;
2466 false,
nullptr, StackID);
2481 "Don't know how to choose the maximum size when creating a stack "
2490 Align Align = std::max(
DL.getPrefTypeAlign(Ty1),
DL.getPrefTypeAlign(Ty2));
2498 auto GetUndefBooleanConstant = [&]() {
2537 return GetUndefBooleanConstant();
2542 return GetUndefBooleanConstant();
2551 const APInt &C2 = N2C->getAPIntValue();
2553 const APInt &C1 = N1C->getAPIntValue();
2560 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
2561 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
2563 if (N1CFP && N2CFP) {
2568 return GetUndefBooleanConstant();
2573 return GetUndefBooleanConstant();
2579 return GetUndefBooleanConstant();
2584 return GetUndefBooleanConstant();
2589 return GetUndefBooleanConstant();
2595 return GetUndefBooleanConstant();
2624 return getSetCC(dl, VT, N2, N1, SwappedCond);
2625 }
else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
2640 return GetUndefBooleanConstant();
2651 unsigned BitWidth =
Op.getScalarValueSizeInBits();
2659 unsigned Depth)
const {
2667 const APInt &DemandedElts,
2668 unsigned Depth)
const {
2675 unsigned Depth )
const {
2681 unsigned Depth)
const {
2686 const APInt &DemandedElts,
2687 unsigned Depth)
const {
2688 EVT VT =
Op.getValueType();
2695 for (
unsigned EltIdx = 0; EltIdx != NumElts; ++EltIdx) {
2696 if (!DemandedElts[EltIdx])
2700 KnownZeroElements.
setBit(EltIdx);
2702 return KnownZeroElements;
2712 unsigned Opcode = V.getOpcode();
2713 EVT VT = V.getValueType();
2716 "scalable demanded bits are ignored");
2728 UndefElts = V.getOperand(0).isUndef()
2737 APInt UndefLHS, UndefRHS;
2742 UndefElts = UndefLHS | UndefRHS;
2772 for (
unsigned i = 0; i != NumElts; ++i) {
2778 if (!DemandedElts[i])
2780 if (Scl && Scl !=
Op)
2790 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask();
2791 for (
int i = 0; i != (int)NumElts; ++i) {
2797 if (!DemandedElts[i])
2799 if (M < (
int)NumElts)
2802 DemandedRHS.
setBit(M - NumElts);
2814 auto CheckSplatSrc = [&](
SDValue Src,
const APInt &SrcElts) {
2816 return (SrcElts.popcount() == 1) ||
2818 (SrcElts & SrcUndefs).
isZero());
2820 if (!DemandedLHS.
isZero())
2821 return CheckSplatSrc(V.getOperand(0), DemandedLHS);
2822 return CheckSplatSrc(V.getOperand(1), DemandedRHS);
2826 SDValue Src = V.getOperand(0);
2828 if (Src.getValueType().isScalableVector())
2831 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2844 SDValue Src = V.getOperand(0);
2846 if (Src.getValueType().isScalableVector())
2848 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2850 APInt DemandedSrcElts = DemandedElts.
zext(NumSrcElts);
2852 UndefElts = UndefSrcElts.
trunc(NumElts);
2858 SDValue Src = V.getOperand(0);
2859 EVT SrcVT = Src.getValueType();
2869 if ((
BitWidth % SrcBitWidth) == 0) {
2871 unsigned Scale =
BitWidth / SrcBitWidth;
2873 APInt ScaledDemandedElts =
2875 for (
unsigned I = 0;
I != Scale; ++
I) {
2879 SubDemandedElts &= ScaledDemandedElts;
2883 if (!SubUndefElts.
isZero())
2897 EVT VT = V.getValueType();
2907 (AllowUndefs || !UndefElts);
2913 EVT VT = V.getValueType();
2914 unsigned Opcode = V.getOpcode();
2935 SplatIdx = (UndefElts & DemandedElts).
countr_one();
2949 auto *SVN = cast<ShuffleVectorSDNode>(V);
2950 if (!SVN->isSplat())
2952 int Idx = SVN->getSplatIndex();
2953 int NumElts = V.getValueType().getVectorNumElements();
2954 SplatIdx =
Idx % NumElts;
2955 return V.getOperand(
Idx / NumElts);
2971 if (LegalSVT.
bitsLT(SVT))
2982 const APInt &DemandedElts)
const {
2985 "Unknown shift node");
2986 unsigned BitWidth = V.getScalarValueSizeInBits();
2989 const APInt &ShAmt = SA->getAPIntValue();
3000 "Unknown shift node");
3003 unsigned BitWidth = V.getScalarValueSizeInBits();
3004 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
3007 const APInt *MinShAmt =
nullptr;
3008 for (
unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
3009 if (!DemandedElts[i])
3011 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
3015 const APInt &ShAmt = SA->getAPIntValue();
3018 if (MinShAmt && MinShAmt->
ule(ShAmt))
3029 "Unknown shift node");
3032 unsigned BitWidth = V.getScalarValueSizeInBits();
3033 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
3036 const APInt *MaxShAmt =
nullptr;
3037 for (
unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
3038 if (!DemandedElts[i])
3040 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
3044 const APInt &ShAmt = SA->getAPIntValue();
3047 if (MaxShAmt && MaxShAmt->
uge(ShAmt))
3058 EVT VT =
Op.getValueType();
3073 unsigned Depth)
const {
3074 unsigned BitWidth =
Op.getScalarValueSizeInBits();
3078 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
3082 if (
auto *
C = dyn_cast<ConstantFPSDNode>(
Op)) {
3092 assert((!
Op.getValueType().isFixedLengthVector() ||
3093 NumElts ==
Op.getValueType().getVectorNumElements()) &&
3094 "Unexpected vector size");
3099 unsigned Opcode =
Op.getOpcode();
3107 "Expected SPLAT_VECTOR implicit truncation");
3114 unsigned ScalarSize =
Op.getOperand(0).getScalarValueSizeInBits();
3116 "Expected SPLAT_VECTOR_PARTS scalars to cover element width");
3123 const APInt &Step =
Op.getConstantOperandAPInt(0);
3132 const APInt MinNumElts =
3138 .
umul_ov(MinNumElts, Overflow);
3142 const APInt MaxValue = (MaxNumElts - 1).
umul_ov(Step, Overflow);
3150 assert(!
Op.getValueType().isScalableVector());
3154 if (!DemandedElts[i])
3163 "Expected BUILD_VECTOR implicit truncation");
3176 assert(!
Op.getValueType().isScalableVector());
3179 APInt DemandedLHS, DemandedRHS;
3183 DemandedLHS, DemandedRHS))
3188 if (!!DemandedLHS) {
3196 if (!!DemandedRHS) {
3205 const APInt &Multiplier =
Op.getConstantOperandAPInt(0);
3210 if (
Op.getValueType().isScalableVector())
3214 EVT SubVectorVT =
Op.getOperand(0).getValueType();
3217 for (
unsigned i = 0; i != NumSubVectors; ++i) {
3219 DemandedElts.
extractBits(NumSubVectorElts, i * NumSubVectorElts);
3220 if (!!DemandedSub) {
3232 if (
Op.getValueType().isScalableVector())
3241 APInt DemandedSrcElts = DemandedElts;
3246 if (!!DemandedSubElts) {
3251 if (!!DemandedSrcElts) {
3261 if (
Op.getValueType().isScalableVector() || Src.getValueType().isScalableVector())
3264 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3270 if (
Op.getValueType().isScalableVector())
3274 if (DemandedElts != 1)
3285 if (
Op.getValueType().isScalableVector())
3305 if ((
BitWidth % SubBitWidth) == 0) {
3312 unsigned SubScale =
BitWidth / SubBitWidth;
3313 APInt SubDemandedElts(NumElts * SubScale, 0);
3314 for (
unsigned i = 0; i != NumElts; ++i)
3315 if (DemandedElts[i])
3316 SubDemandedElts.
setBit(i * SubScale);
3318 for (
unsigned i = 0; i != SubScale; ++i) {
3321 unsigned Shifts = IsLE ? i : SubScale - 1 - i;
3322 Known.
insertBits(Known2, SubBitWidth * Shifts);
3327 if ((SubBitWidth %
BitWidth) == 0) {
3328 assert(
Op.getValueType().isVector() &&
"Expected bitcast to vector");
3333 unsigned SubScale = SubBitWidth /
BitWidth;
3334 APInt SubDemandedElts =
3339 for (
unsigned i = 0; i != NumElts; ++i)
3340 if (DemandedElts[i]) {
3341 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
3372 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3376 Op.getOperand(0), DemandedElts,
false,
Depth + 1);
3382 if (
Op->getFlags().hasNoSignedWrap() &&
3383 Op.getOperand(0) ==
Op.getOperand(1) &&
3401 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3404 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3405 if (
Op.getResNo() == 0)
3412 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3415 bool SelfMultiply =
Op.getOperand(0) ==
Op.getOperand(1);
3416 if (
Op.getResNo() == 0)
3455 if (
Op.getResNo() != 1)
3470 unsigned OpNo =
Op->isStrictFPOpcode() ? 1 : 0;
3484 if (
const APInt *ShMinAmt =
3492 Op->getFlags().hasExact());
3495 if (
const APInt *ShMinAmt =
3503 Op->getFlags().hasExact());
3508 unsigned Amt =
C->getAPIntValue().urem(
BitWidth);
3514 DemandedElts,
Depth + 1);
3539 assert((
Op.getResNo() == 0 ||
Op.getResNo() == 1) &&
"Unknown result");
3542 unsigned LoBits =
Op.getOperand(0).getScalarValueSizeInBits();
3543 unsigned HiBits =
Op.getOperand(1).getScalarValueSizeInBits();
3546 Known = Known2.
concat(Known);
3560 if (
Op.getResNo() == 0)
3568 EVT EVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
3609 !
Op.getValueType().isScalableVector()) {
3623 for (
unsigned i = 0; i != NumElts; ++i) {
3624 if (!DemandedElts[i])
3627 if (
auto *CInt = dyn_cast<ConstantInt>(Elt)) {
3633 if (
auto *CFP = dyn_cast<ConstantFP>(Elt)) {
3634 APInt Value = CFP->getValueAPF().bitcastToAPInt();
3645 if (
auto *CInt = dyn_cast<ConstantInt>(Cst)) {
3647 }
else if (
auto *CFP = dyn_cast<ConstantFP>(Cst)) {
3653 }
else if (
Op.getResNo() == 0) {
3654 KnownBits Known0(!LD->getMemoryVT().isScalableVT()
3655 ? LD->getMemoryVT().getFixedSizeInBits()
3657 EVT VT =
Op.getValueType();
3664 if (
const MDNode *MD = LD->getRanges()) {
3675 if (LD->getMemoryVT().isVector())
3676 Known0 = Known0.
trunc(LD->getMemoryVT().getScalarSizeInBits());
3693 if (
Op.getValueType().isScalableVector())
3695 EVT InVT =
Op.getOperand(0).getValueType();
3707 if (
Op.getValueType().isScalableVector())
3709 EVT InVT =
Op.getOperand(0).getValueType();
3725 if (
Op.getValueType().isScalableVector())
3727 EVT InVT =
Op.getOperand(0).getValueType();
3744 EVT VT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
3747 Known.
Zero |= (~InMask);
3748 Known.
One &= (~Known.Zero);
3752 unsigned LogOfAlign =
Log2(cast<AssertAlignSDNode>(
Op)->
getAlign());
3772 Op.getOpcode() ==
ISD::ADD, Flags.hasNoSignedWrap(),
3773 Flags.hasNoUnsignedWrap(), Known, Known2);
3780 if (
Op.getResNo() == 1) {
3791 "We only compute knownbits for the difference here.");
3798 Borrow = Borrow.
trunc(1);
3812 if (
Op.getResNo() == 1) {
3823 assert(
Op.getResNo() == 0 &&
"We only compute knownbits for the sum here.");
3833 Carry = Carry.
trunc(1);
3869 const unsigned Index =
Op.getConstantOperandVal(1);
3870 const unsigned EltBitWidth =
Op.getValueSizeInBits();
3877 Known = Known.
trunc(EltBitWidth);
3893 Known = Known.
trunc(EltBitWidth);
3898 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
3899 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
3909 if (
Op.getValueType().isScalableVector())
3918 bool DemandedVal =
true;
3919 APInt DemandedVecElts = DemandedElts;
3920 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
3921 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
3922 unsigned EltIdx = CEltNo->getZExtValue();
3923 DemandedVal = !!DemandedElts[EltIdx];
3932 if (!!DemandedVecElts) {
3950 Known = Known2.
abs();
3981 if (CstLow && CstHigh) {
3986 const APInt &ValueHigh = CstHigh->getAPIntValue();
3987 if (ValueLow.
sle(ValueHigh)) {
3990 unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
4013 if (IsMax && CstLow) {
4037 EVT VT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
4042 if (
Op.getResNo() == 1) {
4069 cast<AtomicSDNode>(
Op)->getMemoryVT().getScalarSizeInBits();
4071 if (
Op.getResNo() == 0) {
4095 if (
Op.getValueType().isScalableVector())
4242 return C->getAPIntValue().zextOrTrunc(
BitWidth).isPowerOf2();
4250 if (
C &&
C->getAPIntValue() == 1)
4260 if (
C &&
C->getAPIntValue().isSignMask())
4272 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
4273 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
4281 if (
C->getAPIntValue().zextOrTrunc(
BitWidth).isPowerOf2())
4318 EVT VT =
Op.getValueType();
4330 unsigned Depth)
const {
4331 EVT VT =
Op.getValueType();
4336 unsigned FirstAnswer = 1;
4338 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
4339 const APInt &Val =
C->getAPIntValue();
4349 unsigned Opcode =
Op.getOpcode();
4353 Tmp = cast<VTSDNode>(
Op.getOperand(1))->getVT().getSizeInBits();
4354 return VTBits-Tmp+1;
4356 Tmp = cast<VTSDNode>(
Op.getOperand(1))->getVT().getSizeInBits();
4363 unsigned NumSrcBits =
Op.getOperand(0).getValueSizeInBits();
4365 if (NumSrcSignBits > (NumSrcBits - VTBits))
4366 return NumSrcSignBits - (NumSrcBits - VTBits);
4373 if (!DemandedElts[i])
4380 APInt T =
C->getAPIntValue().trunc(VTBits);
4381 Tmp2 =
T.getNumSignBits();
4385 if (
SrcOp.getValueSizeInBits() != VTBits) {
4387 "Expected BUILD_VECTOR implicit truncation");
4388 unsigned ExtraBits =
SrcOp.getValueSizeInBits() - VTBits;
4389 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
4392 Tmp = std::min(Tmp, Tmp2);
4399 APInt DemandedLHS, DemandedRHS;
4403 DemandedLHS, DemandedRHS))
4406 Tmp = std::numeric_limits<unsigned>::max();
4409 if (!!DemandedRHS) {
4411 Tmp = std::min(Tmp, Tmp2);
4416 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
4432 if (VTBits == SrcBits)
4438 if ((SrcBits % VTBits) == 0) {
4441 unsigned Scale = SrcBits / VTBits;
4442 APInt SrcDemandedElts =
4452 for (
unsigned i = 0; i != NumElts; ++i)
4453 if (DemandedElts[i]) {
4454 unsigned SubOffset = i % Scale;
4455 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
4456 SubOffset = SubOffset * VTBits;
4457 if (Tmp <= SubOffset)
4459 Tmp2 = std::min(Tmp2, Tmp - SubOffset);
4468 Tmp = cast<VTSDNode>(
Op.getOperand(1))->getVT().getScalarSizeInBits();
4469 return VTBits - Tmp + 1;
4471 Tmp = VTBits -
Op.getOperand(0).getScalarValueSizeInBits();
4475 Tmp = cast<VTSDNode>(
Op.getOperand(1))->getVT().getScalarSizeInBits();
4478 return std::max(Tmp, Tmp2);
4483 EVT SrcVT = Src.getValueType();
4491 if (
const APInt *ShAmt =
4493 Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits);
4496 if (
const APInt *ShAmt =
4500 if (ShAmt->ult(Tmp))
4501 return Tmp - ShAmt->getZExtValue();
4511 FirstAnswer = std::min(Tmp, Tmp2);
4521 if (Tmp == 1)
return 1;
4523 return std::min(Tmp, Tmp2);
4526 if (Tmp == 1)
return 1;
4528 return std::min(Tmp, Tmp2);
4540 if (CstLow && CstHigh) {
4545 Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
4546 return std::min(Tmp, Tmp2);
4555 return std::min(Tmp, Tmp2);
4563 return std::min(Tmp, Tmp2);
4574 if (
Op.getResNo() != 1)
4588 unsigned OpNo =
Op->isStrictFPOpcode() ? 1 : 0;
4605 unsigned RotAmt =
C->getAPIntValue().urem(VTBits);
4609 RotAmt = (VTBits - RotAmt) % VTBits;
4613 if (Tmp > (RotAmt + 1))
return (Tmp - RotAmt);
4621 if (Tmp == 1)
return 1;
4626 if (CRHS->isAllOnes()) {
4632 if ((Known.
Zero | 1).isAllOnes())
4642 if (Tmp2 == 1)
return 1;
4643 return std::min(Tmp, Tmp2) - 1;
4646 if (Tmp2 == 1)
return 1;
4651 if (CLHS->isZero()) {
4656 if ((Known.
Zero | 1).isAllOnes())
4670 if (Tmp == 1)
return 1;
4671 return std::min(Tmp, Tmp2) - 1;
4675 if (SignBitsOp0 == 1)
4678 if (SignBitsOp1 == 1)
4680 unsigned OutValidBits =
4681 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
4682 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
4692 unsigned NumSrcBits =
Op.getOperand(0).getScalarValueSizeInBits();
4694 if (NumSrcSignBits > (NumSrcBits - VTBits))
4695 return NumSrcSignBits - (NumSrcBits - VTBits);
4702 const int BitWidth =
Op.getValueSizeInBits();
4703 const int Items =
Op.getOperand(0).getValueSizeInBits() /
BitWidth;
4707 const int rIndex = Items - 1 -
Op.getConstantOperandVal(1);
4722 bool DemandedVal =
true;
4723 APInt DemandedVecElts = DemandedElts;
4724 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
4725 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
4726 unsigned EltIdx = CEltNo->getZExtValue();
4727 DemandedVal = !!DemandedElts[EltIdx];
4730 Tmp = std::numeric_limits<unsigned>::max();
4736 Tmp = std::min(Tmp, Tmp2);
4738 if (!!DemandedVecElts) {
4740 Tmp = std::min(Tmp, Tmp2);
4742 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
4753 const unsigned BitWidth =
Op.getValueSizeInBits();
4754 const unsigned EltBitWidth =
Op.getOperand(0).getScalarValueSizeInBits();
4766 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
4767 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
4777 if (Src.getValueType().isScalableVector())
4780 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
4789 Tmp = std::numeric_limits<unsigned>::max();
4790 EVT SubVectorVT =
Op.getOperand(0).getValueType();
4793 for (
unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
4795 DemandedElts.
extractBits(NumSubVectorElts, i * NumSubVectorElts);
4799 Tmp = std::min(Tmp, Tmp2);
4801 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
4814 APInt DemandedSrcElts = DemandedElts;
4817 Tmp = std::numeric_limits<unsigned>::max();
4818 if (!!DemandedSubElts) {
4823 if (!!DemandedSrcElts) {
4825 Tmp = std::min(Tmp, Tmp2);
4827 assert(Tmp <= VTBits &&
"Failed to determine minimum sign bits");
4832 if (
const MDNode *Ranges = LD->getRanges()) {
4833 if (DemandedElts != 1)
4838 switch (LD->getExtensionType()) {
4873 Tmp = cast<AtomicSDNode>(
Op)->getMemoryVT().getScalarSizeInBits();
4875 if (
Op.getResNo() == 0) {
4879 return VTBits - Tmp + 1;
4881 return VTBits - Tmp;
4885 return VTBits - Tmp + 1;
4887 return VTBits - Tmp;
4895 if (
Op.getResNo() == 0) {
4898 unsigned ExtType = LD->getExtensionType();
4902 Tmp = LD->getMemoryVT().getScalarSizeInBits();
4903 return VTBits - Tmp + 1;
4905 Tmp = LD->getMemoryVT().getScalarSizeInBits();
4906 return VTBits - Tmp;
4911 Type *CstTy = Cst->getType();
4916 for (
unsigned i = 0; i != NumElts; ++i) {
4917 if (!DemandedElts[i])
4920 if (
auto *CInt = dyn_cast<ConstantInt>(Elt)) {
4922 Tmp = std::min(Tmp,
Value.getNumSignBits());
4925 if (
auto *CFP = dyn_cast<ConstantFP>(Elt)) {
4926 APInt Value = CFP->getValueAPF().bitcastToAPInt();
4927 Tmp = std::min(Tmp,
Value.getNumSignBits());
4953 FirstAnswer = std::max(FirstAnswer, NumBits);
4964 unsigned Depth)
const {
4966 return Op.getScalarValueSizeInBits() - SignBits + 1;
4970 const APInt &DemandedElts,
4971 unsigned Depth)
const {
4973 return Op.getScalarValueSizeInBits() - SignBits + 1;
4977 unsigned Depth)
const {
4983 EVT VT =
Op.getValueType();
4994 const APInt &DemandedElts,
4996 unsigned Depth)
const {
4997 unsigned Opcode =
Op.getOpcode();
5023 if (!DemandedElts[i])
5052 return isGuaranteedNotToBeUndefOrPoison(V, PoisonOnly, Depth + 1);
5058 unsigned Depth)
const {
5060 EVT VT =
Op.getValueType();
5073 unsigned Depth)
const {
5075 EVT VT =
Op.getValueType();
5079 unsigned Opcode =
Op.getOpcode();
5106 if (
Op.getOperand(0).getValueType().isInteger())
5114 if (((
unsigned)CCCode & 0x10U))
5120 (
Op->getFlags().hasNoNaNs() ||
Op->getFlags().hasNoInfs()));
5125 return ConsiderFlags &&
Op->getFlags().hasNonNeg();
5131 return ConsiderFlags && (
Op->getFlags().hasNoSignedWrap() ||
5132 Op->getFlags().hasNoUnsignedWrap());
5140 return ConsiderFlags && (
Op->getFlags().hasNoSignedWrap() ||
5141 Op->getFlags().hasNoUnsignedWrap());
5145 return ConsiderFlags &&
Op->getFlags().hasDisjoint();
5149 EVT VecVT =
Op.getOperand(0).getValueType();
5168 unsigned Opcode =
Op.getOpcode();
5170 return Op->getFlags().hasDisjoint() ||
5179 !isa<ConstantSDNode>(
Op.getOperand(1)))
5199 return !
C->getValueAPF().isNaN() ||
5200 (SNaN && !
C->getValueAPF().isSignaling());
5203 unsigned Opcode =
Op.getOpcode();
5311 assert(
Op.getValueType().isFloatingPoint() &&
5312 "Floating point type expected");
5323 assert(!
Op.getValueType().isFloatingPoint() &&
5324 "Floating point types unsupported - use isKnownNeverZeroFloat");
5333 switch (
Op.getOpcode()) {
5347 if (
Op->getFlags().hasNoSignedWrap() ||
Op->getFlags().hasNoUnsignedWrap())
5351 if (ValKnown.
One[0])
5417 if (
Op->getFlags().hasExact())
5433 if (
Op->getFlags().hasExact())
5438 if (
Op->getFlags().hasNoUnsignedWrap())
5449 std::optional<bool> ne =
5456 if (
Op->getFlags().hasNoSignedWrap() ||
Op->getFlags().hasNoUnsignedWrap())
5472 if (
A ==
B)
return true;
5477 if (CA->isZero() && CB->isZero())
return true;
5486 return V.getOperand(0);
5493 SDValue ExtArg = V.getOperand(0);
5512 NotOperand = NotOperand->getOperand(0);
5514 if (
Other == NotOperand)
5517 return NotOperand ==
Other->getOperand(0) ||
5518 NotOperand ==
Other->getOperand(1);
5524 A =
A->getOperand(0);
5527 B =
B->getOperand(0);
5530 return MatchNoCommonBitsPattern(
A->getOperand(0),
A->getOperand(1),
B) ||
5531 MatchNoCommonBitsPattern(
A->getOperand(1),
A->getOperand(0),
B);
5537 assert(
A.getValueType() ==
B.getValueType() &&
5538 "Values must have the same type");
5548 if (cast<ConstantSDNode>(Step)->
isZero())
5557 int NumOps = Ops.
size();
5558 assert(NumOps != 0 &&
"Can't build an empty vector!");
5560 "BUILD_VECTOR cannot be used with scalable types");
5562 "Incorrect element count in BUILD_VECTOR!");
5570 bool IsIdentity =
true;
5571 for (
int i = 0; i != NumOps; ++i) {
5573 Ops[i].getOperand(0).getValueType() != VT ||
5574 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) ||
5575 !isa<ConstantSDNode>(Ops[i].getOperand(1)) ||
5576 Ops[i].getConstantOperandAPInt(1) != i) {
5580 IdentitySrc = Ops[i].getOperand(0);
5593 assert(!Ops.
empty() &&
"Can't concatenate an empty list of vectors!");
5596 return Ops[0].getValueType() ==
Op.getValueType();
5598 "Concatenation of vectors with inconsistent value types!");
5599 assert((Ops[0].getValueType().getVectorElementCount() * Ops.
size()) ==
5601 "Incorrect element count in vector concatenation!");
5603 if (Ops.
size() == 1)
5614 bool IsIdentity =
true;
5615 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i) {
5617 unsigned IdentityIndex = i *
Op.getValueType().getVectorMinNumElements();
5619 Op.getOperand(0).getValueType() != VT ||
5620 (IdentitySrc &&
Op.getOperand(0) != IdentitySrc) ||
5621 Op.getConstantOperandVal(1) != IdentityIndex) {
5625 assert((!IdentitySrc || IdentitySrc ==
Op.getOperand(0)) &&
5626 "Unexpected identity source vector for concat of extracts");
5627 IdentitySrc =
Op.getOperand(0);
5630 assert(IdentitySrc &&
"Failed to set source vector of extracts");
5645 EVT OpVT =
Op.getValueType();
5657 SVT = (SVT.
bitsLT(
Op.getValueType()) ?
Op.getValueType() : SVT);
5680 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
5683 auto *
N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(),
5685 CSEMap.InsertNode(
N, IP);
5698 return getNode(Opcode,
DL, VT, N1, Flags);
5749 "STEP_VECTOR can only be used with scalable types");
5752 "Unexpected step operand");
5774 "Invalid FP cast!");
5778 "Vector element count mismatch!");
5796 "Invalid SIGN_EXTEND!");
5798 "SIGN_EXTEND result type type should be vector iff the operand "
5803 "Vector element count mismatch!");
5817 "Invalid ZERO_EXTEND!");
5819 "ZERO_EXTEND result type type should be vector iff the operand "
5824 "Vector element count mismatch!");
5855 "Invalid ANY_EXTEND!");
5857 "ANY_EXTEND result type type should be vector iff the operand "
5862 "Vector element count mismatch!");
5887 "Invalid TRUNCATE!");
5889 "TRUNCATE result type type should be vector iff the operand "
5894 "Vector element count mismatch!");
5917 assert(VT.
isVector() &&
"This DAG node is restricted to vector types.");
5919 "The input must be the same size or smaller than the result.");
5922 "The destination vector type must have fewer lanes than the input.");
5932 "BSWAP types must be a multiple of 16 bits!");
5946 "Cannot BITCAST between types of different sizes!");
5959 "Illegal SCALAR_TO_VECTOR node!");
6012 if (VT != MVT::Glue) {
6016 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
6017 E->intersectFlagsWith(Flags);
6021 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
6023 createOperands(
N, Ops);
6024 CSEMap.InsertNode(
N, IP);
6026 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
6027 createOperands(
N, Ops);
6061 if (!C2.getBoolValue())
6065 if (!C2.getBoolValue())
6069 if (!C2.getBoolValue())
6073 if (!C2.getBoolValue())
6101 return std::nullopt;
6107 bool IsUndef1,
const APInt &C2,
6109 if (!(IsUndef1 || IsUndef2))
6117 return std::nullopt;
6127 auto *C2 = dyn_cast<ConstantSDNode>(N2);
6130 int64_t
Offset = C2->getSExtValue();
6148 assert(Ops.
size() == 2 &&
"Div/rem should have 2 operands");
6155 [](
SDValue V) { return V.isUndef() ||
6156 isNullConstant(V); });
6176 unsigned NumOps = Ops.
size();
6192 if (
auto *
C = dyn_cast<ConstantSDNode>(N1)) {
6193 const APInt &Val =
C->getAPIntValue();
6197 C->isTargetOpcode(),
C->isOpaque());
6204 C->isTargetOpcode(),
C->isOpaque());
6209 C->isTargetOpcode(),
C->isOpaque());
6211 C->isTargetOpcode(),
C->isOpaque());
6258 if (VT == MVT::f16 &&
C->getValueType(0) == MVT::i16)
6260 if (VT == MVT::f32 &&
C->getValueType(0) == MVT::i32)
6262 if (VT == MVT::f64 &&
C->getValueType(0) == MVT::i64)
6264 if (VT == MVT::f128 &&
C->getValueType(0) == MVT::i128)
6271 if (
auto *
C = dyn_cast<ConstantFPSDNode>(N1)) {
6325 return getConstant(V.bitcastToAPInt().getZExtValue(),
DL, VT);
6328 if (VT == MVT::i16 &&
C->getValueType(0) == MVT::f16)
6331 if (VT == MVT::i16 &&
C->getValueType(0) == MVT::bf16)
6334 if (VT == MVT::i32 &&
C->getValueType(0) == MVT::f32)
6337 if (VT == MVT::i64 &&
C->getValueType(0) == MVT::f64)
6338 return getConstant(V.bitcastToAPInt().getZExtValue(),
DL, VT);
6353 if (
auto *C1 = dyn_cast<ConstantSDNode>(Ops[0])) {
6354 if (
auto *C2 = dyn_cast<ConstantSDNode>(Ops[1])) {
6355 if (C1->isOpaque() || C2->isOpaque())
6358 std::optional<APInt> FoldAttempt =
6359 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue());
6365 "Can't fold vectors ops with scalar operands");
6386 Ops[0].getValueType() == VT && Ops[1].getValueType() == VT &&
6391 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
6392 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2);
6399 if (BV1->getConstantRawBits(IsLE, EltBits, RawBits1, UndefElts1) &&
6400 BV2->getConstantRawBits(IsLE, EltBits, RawBits2, UndefElts2)) {
6404 Opcode, RawBits1[
I], UndefElts1[
I], RawBits2[
I], UndefElts2[
I]);
6415 DstBits, RawBits, DstUndefs,
6417 EVT BVEltVT = BV1->getOperand(0).getValueType();
6420 for (
unsigned I = 0, E = DstBits.
size();
I != E; ++
I) {
6438 ? Ops[0].getConstantOperandAPInt(0) * RHSVal
6439 : Ops[0].getConstantOperandAPInt(0) << RHSVal;
6444 auto IsScalarOrSameVectorSize = [NumElts](
const SDValue &
Op) {
6445 return !
Op.getValueType().isVector() ||
6446 Op.getValueType().getVectorElementCount() == NumElts;
6449 auto IsBuildVectorSplatVectorOrUndef = [](
const SDValue &
Op) {
6458 if (!
llvm::all_of(Ops, IsBuildVectorSplatVectorOrUndef) ||
6487 for (
unsigned I = 0;
I != NumVectorElts;
I++) {
6490 EVT InSVT =
Op.getValueType().getScalarType();
6512 !isa<ConstantSDNode>(ScalarOp) &&
6526 if (LegalSVT != SVT)
6527 ScalarResult =
getNode(ExtendCode,
DL, LegalSVT, ScalarResult);
6545 if (Ops.
size() != 2)
6556 if (N1CFP && N2CFP) {
6603 if (N1C && N1C->getValueAPF().isNegZero() && N2.
isUndef())
6631 ID.AddInteger(
A.value());
6634 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
6637 auto *
N = newSDNode<AssertAlignSDNode>(
DL.getIROrder(),
DL.getDebugLoc(),
6639 createOperands(
N, {Val});
6641 CSEMap.InsertNode(
N, IP);
6654 return getNode(Opcode,
DL, VT, N1, N2, Flags);
6668 if ((N1C && !N2C) || (N1CFP && !N2CFP))
6682 "Operand is DELETED_NODE!");
6686 auto *N1C = dyn_cast<ConstantSDNode>(N1);
6687 auto *N2C = dyn_cast<ConstantSDNode>(N2);
6698 N2.
getValueType() == MVT::Other &&
"Invalid token factor!");
6702 if (N1 == N2)
return N1;
6718 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
6720 N1.
getValueType() == VT &&
"Binary operator types must match!");
6723 if (N2CV && N2CV->
isZero())
6732 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
6734 N1.
getValueType() == VT &&
"Binary operator types must match!");
6737 if (N2CV && N2CV->
isZero())
6744 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
6746 N1.
getValueType() == VT &&
"Binary operator types must match!");
6751 const APInt &N2CImm = N2C->getAPIntValue();
6765 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
6767 N1.
getValueType() == VT &&
"Binary operator types must match!");
6779 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
6781 N1.
getValueType() == VT &&
"Binary operator types must match!");
6785 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
6787 N1.
getValueType() == VT &&
"Binary operator types must match!");
6793 assert(VT.
isInteger() &&
"This operator does not apply to FP types!");
6795 N1.
getValueType() == VT &&
"Binary operator types must match!");
6806 N1.
getValueType() == VT &&
"Binary operator types must match!");
6814 "Invalid FCOPYSIGN!");
6819 const APInt &ShiftImm = N2C->getAPIntValue();
6831 "Shift operators return type must be the same as their first arg");
6833 "Shifts only work on integers");
6835 "Vector shift amounts must be in the same as their first arg");
6842 "Invalid use of small shift amount with oversized value!");
6849 if (N2CV && N2CV->
isZero())
6856 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
6857 "Invalid FP_ROUND!");
6862 EVT EVT = cast<VTSDNode>(N2)->getVT();
6865 "Cannot *_EXTEND_INREG FP types");
6867 "AssertSExt/AssertZExt type should be the vector element type "
6868 "rather than the vector type!");
6874 EVT EVT = cast<VTSDNode>(N2)->getVT();
6877 "Cannot *_EXTEND_INREG FP types");
6879 "SIGN_EXTEND_INREG type should be vector iff the operand "
6883 "Vector element counts must match in SIGN_EXTEND_INREG");
6885 if (
EVT == VT)
return N1;
6895 const APInt &Val = N1C->getAPIntValue();
6896 return SignExtendInReg(Val, VT);
6909 APInt Val =
C->getAPIntValue();
6910 Ops.
push_back(SignExtendInReg(Val, OpVT));
6928 "FP_TO_*INT_SAT type should be vector iff the operand type is "
6932 "Vector element counts must match in FP_TO_*INT_SAT");
6933 assert(!cast<VTSDNode>(N2)->getVT().isVector() &&
6934 "Type to saturate to must be a scalar.");
6941 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
6942 element type of the vector.");
6974 "BUILD_VECTOR used for scalable vectors");
6997 if (N1Op2C && N2C) {
7027 assert(N2C && (
unsigned)N2C->getZExtValue() < 2 &&
"Bad EXTRACT_ELEMENT!");
7031 "Wrong types for EXTRACT_ELEMENT!");
7042 unsigned Shift = ElementSize * N2C->getZExtValue();
7043 const APInt &Val = N1C->getAPIntValue();
7050 "Extract subvector VTs must be vectors!");
7052 "Extract subvector VTs must have the same element type!");
7054 "Cannot extract a scalable vector from a fixed length vector!");
7057 "Extract subvector must be from larger vector to smaller vector!");
7058 assert(N2C &&
"Extract subvector index must be a constant");
7062 "Extract subvector overflow!");
7063 assert(N2C->getAPIntValue().getBitWidth() ==
7065 "Constant index for EXTRACT_SUBVECTOR has an invalid size");
7080 return N1.
getOperand(N2C->getZExtValue() / Factor);
7148 if (VT != MVT::Glue) {
7152 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
7153 E->intersectFlagsWith(Flags);
7157 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
7159 createOperands(
N, Ops);
7160 CSEMap.InsertNode(
N, IP);
7162 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
7163 createOperands(
N, Ops);
7177 return getNode(Opcode,
DL, VT, N1, N2, N3, Flags);
7186 "Operand is DELETED_NODE!");
7197 if (N1CFP && N2CFP && N3CFP) {
7226 "SETCC operands must have the same type!");
7228 "SETCC type should be vector iff the operand type is vector!");
7231 "SETCC vector element counts must match!");
7251 if (cast<ConstantSDNode>(N3)->
isZero())
7281 "Dest and insert subvector source types must match!");
7283 "Insert subvector VTs must be vectors!");
7285 "Insert subvector VTs must have the same element type!");
7287 "Cannot insert a scalable vector into a fixed length vector!");
7290 "Insert subvector must be from smaller vector to larger vector!");
7291 assert(isa<ConstantSDNode>(N3) &&
7292 "Insert subvector index must be constant");
7296 "Insert subvector overflow!");
7299 "Constant index for INSERT_SUBVECTOR has an invalid size");
7317 case ISD::VP_TRUNCATE:
7318 case ISD::VP_SIGN_EXTEND:
7319 case ISD::VP_ZERO_EXTEND:
7330 if (VT != MVT::Glue) {
7334 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
7335 E->intersectFlagsWith(Flags);
7339 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
7341 createOperands(
N, Ops);
7342 CSEMap.InsertNode(
N, IP);
7344 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
7345 createOperands(
N, Ops);
7356 SDValue Ops[] = { N1, N2, N3, N4 };
7363 SDValue Ops[] = { N1, N2, N3, N4, N5 };
7381 if (FI->getIndex() < 0)
7396 assert(
C->getAPIntValue().getBitWidth() == 8);
7401 return DAG.
getConstant(Val, dl, VT,
false, IsOpaque);
7407 assert(
Value.getValueType() == MVT::i8 &&
"memset with non-byte fill value?");
7423 if (VT !=
Value.getValueType())
7436 if (Slice.
Array ==
nullptr) {
7439 if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
7454 unsigned NumVTBytes = NumVTBits / 8;
7455 unsigned NumBytes = std::min(NumVTBytes,
unsigned(Slice.
Length));
7457 APInt Val(NumVTBits, 0);
7459 for (
unsigned i = 0; i != NumBytes; ++i)
7462 for (
unsigned i = 0; i != NumBytes; ++i)
7463 Val |= (
uint64_t)(
unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
7482 APInt(
Base.getValueSizeInBits().getFixedValue(),
7483 Offset.getKnownMinValue()));
7494 EVT BasePtrVT =
Ptr.getValueType();
7503 G = cast<GlobalAddressSDNode>(Src);
7504 else if (Src.getOpcode() ==
ISD::ADD &&
7507 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
7508 SrcDelta = Src.getConstantOperandVal(1);
7514 SrcDelta +
G->getOffset());
7530 assert(OutLoadChains.
size() &&
"Missing loads in memcpy inlining");
7531 assert(OutStoreChains.
size() &&
"Missing stores in memcpy inlining");
7533 for (
unsigned i =
From; i < To; ++i) {
7535 GluedLoadChains.
push_back(OutLoadChains[i]);
7542 for (
unsigned i =
From; i < To; ++i) {
7543 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]);
7545 ST->getBasePtr(), ST->getMemoryVT(),
7546 ST->getMemOperand());
7554 bool isVol,
bool AlwaysInline,
7570 std::vector<EVT> MemOps;
7571 bool DstAlignCanChange =
false;
7577 DstAlignCanChange =
true;
7579 if (!SrcAlign || Alignment > *SrcAlign)
7580 SrcAlign = Alignment;
7581 assert(SrcAlign &&
"SrcAlign must be set");
7585 bool isZeroConstant = CopyFromConstant && Slice.
Array ==
nullptr;
7587 const MemOp Op = isZeroConstant
7591 *SrcAlign, isVol, CopyFromConstant);
7597 if (DstAlignCanChange) {
7598 Type *Ty = MemOps[0].getTypeForEVT(
C);
7599 Align NewAlign =
DL.getABITypeAlign(Ty);
7605 if (!
TRI->hasStackRealignment(MF))
7606 while (NewAlign > Alignment &&
DL.exceedsNaturalStackAlignment(NewAlign))
7609 if (NewAlign > Alignment) {
7613 Alignment = NewAlign;
7621 const Value *SrcVal = dyn_cast_if_present<const Value *>(SrcPtrInfo.
V);
7631 unsigned NumMemOps = MemOps.
size();
7633 for (
unsigned i = 0; i != NumMemOps; ++i) {
7638 if (VTSize >
Size) {
7641 assert(i == NumMemOps-1 && i != 0);
7642 SrcOff -= VTSize -
Size;
7643 DstOff -= VTSize -
Size;
7646 if (CopyFromConstant &&
7654 if (SrcOff < Slice.
Length) {
7656 SubSlice.
move(SrcOff);
7659 SubSlice.
Array =
nullptr;
7661 SubSlice.
Length = VTSize;
7664 if (
Value.getNode()) {
7668 DstPtrInfo.
getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
7673 if (!Store.getNode()) {
7682 bool isDereferenceable =
7685 if (isDereferenceable)
7700 DstPtrInfo.
getWithOffset(DstOff), VT, Alignment, MMOFlags, NewAAInfo);
7710 unsigned NumLdStInMemcpy = OutStoreChains.
size();
7712 if (NumLdStInMemcpy) {
7718 for (
unsigned i = 0; i < NumLdStInMemcpy; ++i) {
7724 if (NumLdStInMemcpy <= GluedLdStLimit) {
7726 NumLdStInMemcpy, OutLoadChains,
7729 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit;
7730 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit;
7731 unsigned GlueIter = 0;
7733 for (
unsigned cnt = 0; cnt < NumberLdChain; ++cnt) {
7734 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit;
7735 unsigned IndexTo = NumLdStInMemcpy - GlueIter;
7738 OutLoadChains, OutStoreChains);
7739 GlueIter += GluedLdStLimit;
7743 if (RemainingLdStInMemcpy) {
7745 RemainingLdStInMemcpy, OutLoadChains,
7757 bool isVol,
bool AlwaysInline,
7771 std::vector<EVT> MemOps;
7772 bool DstAlignCanChange =
false;
7778 DstAlignCanChange =
true;
7780 if (!SrcAlign || Alignment > *SrcAlign)
7781 SrcAlign = Alignment;
7782 assert(SrcAlign &&
"SrcAlign must be set");
7792 if (DstAlignCanChange) {
7793 Type *Ty = MemOps[0].getTypeForEVT(
C);
7794 Align NewAlign =
DL.getABITypeAlign(Ty);
7800 if (!
TRI->hasStackRealignment(MF))
7801 while (NewAlign > Alignment &&
DL.exceedsNaturalStackAlignment(NewAlign))
7804 if (NewAlign > Alignment) {
7808 Alignment = NewAlign;
7822 unsigned NumMemOps = MemOps.
size();
7823 for (
unsigned i = 0; i < NumMemOps; i++) {
7828 bool isDereferenceable =
7831 if (isDereferenceable)
7837 SrcPtrInfo.
getWithOffset(SrcOff), *SrcAlign, SrcMMOFlags, NewAAInfo);
7844 for (
unsigned i = 0; i < NumMemOps; i++) {
7850 Chain, dl, LoadValues[i],
7852 DstPtrInfo.
getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
7892 std::vector<EVT> MemOps;
7893 bool DstAlignCanChange =
false;
7899 DstAlignCanChange =
true;
7905 MemOp::Set(
Size, DstAlignCanChange, Alignment, IsZeroVal, isVol),
7909 if (DstAlignCanChange) {
7912 Align NewAlign =
DL.getABITypeAlign(Ty);
7918 if (!
TRI->hasStackRealignment(MF))
7919 while (NewAlign > Alignment &&
DL.exceedsNaturalStackAlignment(NewAlign))
7922 if (NewAlign > Alignment) {
7926 Alignment = NewAlign;
7932 unsigned NumMemOps = MemOps.size();
7935 EVT LargestVT = MemOps[0];
7936 for (
unsigned i = 1; i < NumMemOps; i++)
7937 if (MemOps[i].bitsGT(LargestVT))
7938 LargestVT = MemOps[i];
7945 for (
unsigned i = 0; i < NumMemOps; i++) {
7948 if (VTSize >
Size) {
7951 assert(i == NumMemOps-1 && i != 0);
7952 DstOff -= VTSize -
Size;
7959 if (VT.
bitsLT(LargestVT)) {
7980 assert(
Value.getValueType() == VT &&
"Value with wrong type.");
8007 bool isVol,
bool AlwaysInline,
bool isTailCall,
8016 if (ConstantSize->
isZero())
8020 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
8021 isVol,
false, DstPtrInfo, SrcPtrInfo, AAInfo, AA);
8022 if (Result.getNode())
8030 *
this, dl, Chain, Dst, Src,
Size, Alignment, isVol, AlwaysInline,
8031 DstPtrInfo, SrcPtrInfo);
8032 if (Result.getNode())
8039 assert(ConstantSize &&
"AlwaysInline requires a constant size!");
8041 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
8042 isVol,
true, DstPtrInfo, SrcPtrInfo, AAInfo, AA);
8058 Entry.Node = Dst; Args.push_back(Entry);
8059 Entry.Node = Src; Args.push_back(Entry);
8062 Entry.Node =
Size; Args.push_back(Entry);
8068 Dst.getValueType().getTypeForEVT(*
getContext()),
8075 std::pair<SDValue,SDValue> CallResult = TLI->
LowerCallTo(CLI);
8076 return CallResult.second;
8081 Type *SizeTy,
unsigned ElemSz,
8090 Args.push_back(Entry);
8093 Args.push_back(Entry);
8097 Args.push_back(Entry);
8101 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
8115 std::pair<SDValue, SDValue> CallResult = TLI->
LowerCallTo(CLI);
8116 return CallResult.second;
8121 bool isVol,
bool isTailCall,
8130 if (ConstantSize->
isZero())
8134 *
this, dl, Chain, Dst, Src, ConstantSize->
getZExtValue(), Alignment,
8135 isVol,
false, DstPtrInfo, SrcPtrInfo, AAInfo);
8136 if (Result.getNode())
8145 Alignment, isVol, DstPtrInfo, SrcPtrInfo);
8146 if (Result.getNode())
8160 Entry.Node = Dst; Args.push_back(Entry);
8161 Entry.Node = Src; Args.push_back(Entry);
8164 Entry.Node =
Size; Args.push_back(Entry);
8170 Dst.getValueType().getTypeForEVT(*
getContext()),
8177 std::pair<SDValue,SDValue> CallResult = TLI->
LowerCallTo(CLI);
8178 return CallResult.second;
8183 Type *SizeTy,
unsigned ElemSz,
8192 Args.push_back(Entry);
8195 Args.push_back(Entry);
8199 Args.push_back(Entry);
8203 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
8217 std::pair<SDValue, SDValue> CallResult = TLI->
LowerCallTo(CLI);
8218 return CallResult.second;
8223 bool isVol,
bool AlwaysInline,
bool isTailCall,
8231 if (ConstantSize->
isZero())
8236 isVol,
false, DstPtrInfo, AAInfo);
8238 if (Result.getNode())
8246 *
this, dl, Chain, Dst, Src,
Size, Alignment, isVol, AlwaysInline, DstPtrInfo);
8247 if (Result.getNode())
8254 assert(ConstantSize &&
"AlwaysInline requires a constant size!");
8257 isVol,
true, DstPtrInfo, AAInfo);
8259 "getMemsetStores must return a valid sequence when AlwaysInline");
8276 const auto CreateEntry = [](
SDValue Node,
Type *Ty) {
8287 Args.push_back(CreateEntry(
Size,
DL.getIntPtrType(Ctx)));
8294 Args.push_back(CreateEntry(Src, Src.getValueType().getTypeForEVT(Ctx)));
8295 Args.push_back(CreateEntry(
Size,
DL.getIntPtrType(Ctx)));
8297 Dst.getValueType().getTypeForEVT(Ctx),
8305 std::pair<SDValue, SDValue> CallResult = TLI->
LowerCallTo(CLI);
8306 return CallResult.second;
8311 Type *SizeTy,
unsigned ElemSz,
8319 Args.push_back(Entry);
8323 Args.push_back(Entry);
8327 Args.push_back(Entry);
8331 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
8345 std::pair<SDValue, SDValue> CallResult = TLI->
LowerCallTo(CLI);
8346 return CallResult.second;
8358 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
8359 cast<AtomicSDNode>(E)->refineAlignment(MMO);
8364 VTList, MemVT, MMO);
8365 createOperands(
N, Ops);
8367 CSEMap.InsertNode(
N, IP);
8381 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
8406 "Invalid Atomic Op");
8413 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
8423 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
8428 if (Ops.
size() == 1)
8443 if (
Size.hasValue() && !
Size.getValue())
8460 (Opcode <= (
unsigned)std::numeric_limits<int>::max() &&
8462 "Opcode is not a memory-accessing opcode!");
8466 if (VTList.
VTs[VTList.
NumVTs-1] != MVT::Glue) {
8469 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
8470 Opcode, dl.
getIROrder(), VTList, MemVT, MMO));
8475 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
8476 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
8481 VTList, MemVT, MMO);
8482 createOperands(
N, Ops);
8484 CSEMap.InsertNode(
N, IP);
8487 VTList, MemVT, MMO);
8488 createOperands(
N, Ops);
8497 SDValue Chain,
int FrameIndex,
8509 ID.AddInteger(FrameIndex);
8513 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
8518 createOperands(
N, Ops);
8519 CSEMap.InsertNode(
N, IP);
8534 ID.AddInteger(Guid);
8537 if (
SDNode *E = FindNodeOrInsertPos(
ID, Dl, IP))
8540 auto *
N = newSDNode<PseudoProbeSDNode>(
8542 createOperands(
N, Ops);
8543 CSEMap.InsertNode(
N, IP);
8564 !isa<ConstantSDNode>(
Ptr.getOperand(1)) ||
8565 !isa<FrameIndexSDNode>(
Ptr.getOperand(0)))
8568 int FI = cast<FrameIndexSDNode>(
Ptr.getOperand(0))->getIndex();
8571 Offset + cast<ConstantSDNode>(
Ptr.getOperand(1))->getSExtValue());
8582 if (
ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
8597 "Invalid chain type");
8609 Alignment, AAInfo, Ranges);
8620 assert(VT == MemVT &&
"Non-extending load from different memory type!");
8624 "Should only be an extending load, not truncating!");
8626 "Cannot convert from FP to Int or Int -> FP!");
8628 "Cannot use an ext load to convert to or from a vector!");
8631 "Cannot use an ext load to change the number of vector elements!");
8643 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
8644 dl.
getIROrder(), VTs, AM, ExtType, MemVT, MMO));
8648 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
8649 cast<LoadSDNode>(E)->refineAlignment(MMO);
8653 ExtType, MemVT, MMO);
8654 createOperands(
N, Ops);
8656 CSEMap.InsertNode(
N, IP);
8670 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
8688 MemVT, Alignment, MMOFlags, AAInfo);
8703 assert(LD->getOffset().isUndef() &&
"Load is already a indexed load!");
8706 LD->getMemOperand()->getFlags() &
8709 LD->getChain(),
Base,
Offset, LD->getPointerInfo(),
8710 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo());
8736 "Invalid chain type");
8744 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
8749 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
8750 cast<StoreSDNode>(E)->refineAlignment(MMO);
8755 createOperands(
N, Ops);
8757 CSEMap.InsertNode(
N, IP);
8770 "Invalid chain type");
8791 "Invalid chain type");
8796 "Should only be a truncating store, not extending!");
8798 "Can't do FP-INT conversion!");
8800 "Cannot use trunc store to convert to or from a vector!");
8803 "Cannot use trunc store to change the number of vector elements!");
8811 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
8816 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
8817 cast<StoreSDNode>(E)->refineAlignment(MMO);
8822 createOperands(
N, Ops);
8824 CSEMap.InsertNode(
N, IP);
8835 assert(ST->getOffset().isUndef() &&
"Store is already a indexed store!");
8840 ID.AddInteger(ST->getMemoryVT().getRawBits());
8841 ID.AddInteger(ST->getRawSubclassData());
8842 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
8843 ID.AddInteger(ST->getMemOperand()->getFlags());
8845 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
8849 ST->isTruncatingStore(), ST->getMemoryVT(),
8850 ST->getMemOperand());
8851 createOperands(
N, Ops);
8853 CSEMap.InsertNode(
N, IP);
8865 const MDNode *Ranges,
bool IsExpanding) {
8878 Alignment, AAInfo, Ranges);
8879 return getLoadVP(AM, ExtType, VT, dl, Chain,
Ptr,
Offset, Mask, EVL, MemVT,
8898 ID.AddInteger(getSyntheticNodeSubclassData<VPLoadSDNode>(
8899 dl.
getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
8903 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
8904 cast<VPLoadSDNode>(E)->refineAlignment(MMO);
8908 ExtType, IsExpanding, MemVT, MMO);
8909 createOperands(
N, Ops);
8911 CSEMap.InsertNode(
N, IP);
8927 Mask, EVL, PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges,
8936 Mask, EVL, VT, MMO, IsExpanding);
8945 const AAMDNodes &AAInfo,
bool IsExpanding) {
8948 EVL, PtrInfo, MemVT, Alignment, MMOFlags, AAInfo,
nullptr,
8958 EVL, MemVT, MMO, IsExpanding);
8964 auto *LD = cast<VPLoadSDNode>(OrigLoad);
8965 assert(LD->getOffset().isUndef() &&
"Load is already a indexed load!");
8968 LD->getMemOperand()->getFlags() &
8972 LD->getVectorLength(), LD->getPointerInfo(),
8973 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo(),
8974 nullptr, LD->isExpandingLoad());
8981 bool IsCompressing) {
8991 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
8992 dl.
getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
8996 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
8997 cast<VPStoreSDNode>(E)->refineAlignment(MMO);
9001 IsTruncating, IsCompressing, MemVT, MMO);
9002 createOperands(
N, Ops);
9004 CSEMap.InsertNode(
N, IP);
9017 bool IsCompressing) {
9038 bool IsCompressing) {
9045 false, IsCompressing);
9048 "Should only be a truncating store, not extending!");
9051 "Cannot use trunc store to convert to or from a vector!");
9054 "Cannot use trunc store to change the number of vector elements!");
9058 SDValue Ops[] = {Chain, Val,
Ptr, Undef, Mask, EVL};
9062 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
9067 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9068 cast<VPStoreSDNode>(E)->refineAlignment(MMO);
9074 createOperands(
N, Ops);
9076 CSEMap.InsertNode(
N, IP);
9086 auto *ST = cast<VPStoreSDNode>(OrigStore);
9087 assert(ST->getOffset().isUndef() &&
"Store is already an indexed store!");
9089 SDValue Ops[] = {ST->getChain(), ST->getValue(),
Base,
9090 Offset, ST->getMask(), ST->getVectorLength()};
9093 ID.AddInteger(ST->getMemoryVT().getRawBits());
9094 ID.AddInteger(ST->getRawSubclassData());
9095 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
9096 ID.AddInteger(ST->getMemOperand()->getFlags());
9098 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
9101 auto *
N = newSDNode<VPStoreSDNode>(
9103 ST->isCompressingStore(), ST->getMemoryVT(), ST->getMemOperand());
9104 createOperands(
N, Ops);
9106 CSEMap.InsertNode(
N, IP);
9126 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedLoadSDNode>(
9127 DL.getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
9131 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
9132 cast<VPStridedLoadSDNode>(E)->refineAlignment(MMO);
9137 newSDNode<VPStridedLoadSDNode>(
DL.getIROrder(),
DL.getDebugLoc(), VTs, AM,
9138 ExtType, IsExpanding, MemVT, MMO);
9139 createOperands(
N, Ops);
9140 CSEMap.InsertNode(
N, IP);
9154 Undef, Stride, Mask, EVL, VT, MMO, IsExpanding);
9163 Stride, Mask, EVL, MemVT, MMO, IsExpanding);
9172 bool IsTruncating,
bool IsCompressing) {
9182 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>(
9183 DL.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
9186 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
9187 cast<VPStridedStoreSDNode>(E)->refineAlignment(MMO);
9190 auto *
N = newSDNode<VPStridedStoreSDNode>(
DL.getIROrder(),
DL.getDebugLoc(),
9191 VTs, AM, IsTruncating,
9192 IsCompressing, MemVT, MMO);
9193 createOperands(
N, Ops);
9195 CSEMap.InsertNode(
N, IP);
9207 bool IsCompressing) {
9214 false, IsCompressing);
9217 "Should only be a truncating store, not extending!");
9220 "Cannot use trunc store to convert to or from a vector!");
9223 "Cannot use trunc store to change the number of vector elements!");
9227 SDValue Ops[] = {Chain, Val,
Ptr, Undef, Stride, Mask, EVL};
9231 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>(
9235 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
9236 cast<VPStridedStoreSDNode>(E)->refineAlignment(MMO);
9239 auto *
N = newSDNode<VPStridedStoreSDNode>(
DL.getIROrder(),
DL.getDebugLoc(),
9241 IsCompressing, SVT, MMO);
9242 createOperands(
N, Ops);
9244 CSEMap.InsertNode(
N, IP);
9254 assert(Ops.
size() == 6 &&
"Incompatible number of operands");
9259 ID.AddInteger(getSyntheticNodeSubclassData<VPGatherSDNode>(
9264 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9265 cast<VPGatherSDNode>(E)->refineAlignment(MMO);
9270 VT, MMO, IndexType);
9271 createOperands(
N, Ops);
9273 assert(
N->getMask().getValueType().getVectorElementCount() ==
9274 N->getValueType(0).getVectorElementCount() &&
9275 "Vector width mismatch between mask and data");
9276 assert(
N->getIndex().getValueType().getVectorElementCount().isScalable() ==
9277 N->getValueType(0).getVectorElementCount().isScalable() &&
9278 "Scalable flags of index and data do not match");
9280 N->getIndex().getValueType().getVectorElementCount(),
9281 N->getValueType(0).getVectorElementCount()) &&
9282 "Vector width mismatch between index and data");
9283 assert(isa<ConstantSDNode>(
N->getScale()) &&
9284 N->getScale()->getAsAPIntVal().isPowerOf2() &&
9285 "Scale should be a constant power of 2");
9287 CSEMap.InsertNode(
N, IP);
9298 assert(Ops.
size() == 7 &&
"Incompatible number of operands");
9303 ID.AddInteger(getSyntheticNodeSubclassData<VPScatterSDNode>(
9308 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9309 cast<VPScatterSDNode>(E)->refineAlignment(MMO);
9313 VT, MMO, IndexType);
9314 createOperands(
N, Ops);
9316 assert(
N->getMask().getValueType().getVectorElementCount() ==
9317 N->getValue().getValueType().getVectorElementCount() &&
9318 "Vector width mismatch between mask and data");
9320 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
9321 N->getValue().getValueType().getVectorElementCount().isScalable() &&
9322 "Scalable flags of index and data do not match");
9324 N->getIndex().getValueType().getVectorElementCount(),
9325 N->getValue().getValueType().getVectorElementCount()) &&
9326 "Vector width mismatch between index and data");
9327 assert(isa<ConstantSDNode>(
N->getScale()) &&
9328 N->getScale()->getAsAPIntVal().isPowerOf2() &&
9329 "Scale should be a constant power of 2");
9331 CSEMap.InsertNode(
N, IP);
9346 "Unindexed masked load with an offset!");
9353 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
9354 dl.
getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO));
9358 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9359 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
9363 AM, ExtTy, isExpanding, MemVT, MMO);
9364 createOperands(
N, Ops);
9366 CSEMap.InsertNode(
N, IP);
9377 assert(LD->getOffset().isUndef() &&
"Masked load is already a indexed load!");
9379 Offset, LD->getMask(), LD->getPassThru(),
9380 LD->getMemoryVT(), LD->getMemOperand(), AM,
9381 LD->getExtensionType(), LD->isExpandingLoad());
9389 bool IsCompressing) {
9391 "Invalid chain type");
9394 "Unindexed masked store with an offset!");
9401 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
9402 dl.
getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
9406 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9407 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
9412 IsTruncating, IsCompressing, MemVT, MMO);
9413 createOperands(
N, Ops);
9415 CSEMap.InsertNode(
N, IP);
9426 assert(ST->getOffset().isUndef() &&
9427 "Masked store is already a indexed store!");
9429 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(),
9430 AM, ST->isTruncatingStore(), ST->isCompressingStore());
9438 assert(Ops.
size() == 6 &&
"Incompatible number of operands");
9443 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
9444 dl.
getIROrder(), VTs, MemVT, MMO, IndexType, ExtTy));
9448 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9449 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
9454 VTs, MemVT, MMO, IndexType, ExtTy);
9455 createOperands(
N, Ops);
9457 assert(
N->getPassThru().getValueType() ==
N->getValueType(0) &&
9458 "Incompatible type of the PassThru value in MaskedGatherSDNode");
9459 assert(
N->getMask().getValueType().getVectorElementCount() ==
9460 N->getValueType(0).getVectorElementCount() &&
9461 "Vector width mismatch between mask and data");
9462 assert(
N->getIndex().getValueType().getVectorElementCount().isScalable() ==
9463 N->getValueType(0).getVectorElementCount().isScalable() &&
9464 "Scalable flags of index and data do not match");
9466 N->getIndex().getValueType().getVectorElementCount(),
9467 N->getValueType(0).getVectorElementCount()) &&
9468 "Vector width mismatch between index and data");
9469 assert(isa<ConstantSDNode>(
N->getScale()) &&
9470 N->getScale()->getAsAPIntVal().isPowerOf2() &&
9471 "Scale should be a constant power of 2");
9473 CSEMap.InsertNode(
N, IP);
9485 assert(Ops.
size() == 6 &&
"Incompatible number of operands");
9490 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
9491 dl.
getIROrder(), VTs, MemVT, MMO, IndexType, IsTrunc));
9495 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP)) {
9496 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
9501 VTs, MemVT, MMO, IndexType, IsTrunc);
9502 createOperands(
N, Ops);
9504 assert(
N->getMask().getValueType().getVectorElementCount() ==
9505 N->getValue().getValueType().getVectorElementCount() &&
9506 "Vector width mismatch between mask and data");
9508 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
9509 N->getValue().getValueType().getVectorElementCount().isScalable() &&
9510 "Scalable flags of index and data do not match");
9512 N->getIndex().getValueType().getVectorElementCount(),
9513 N->getValue().getValueType().getVectorElementCount()) &&
9514 "Vector width mismatch between index and data");
9515 assert(isa<ConstantSDNode>(
N->getScale()) &&
9516 N->getScale()->getAsAPIntVal().isPowerOf2() &&
9517 "Scale should be a constant power of 2");
9519 CSEMap.InsertNode(
N, IP);
9534 ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>(
9539 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
9544 createOperands(
N, Ops);
9546 CSEMap.InsertNode(
N, IP);
9561 ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>(
9566 if (
SDNode *E = FindNodeOrInsertPos(
ID, dl, IP))
9571 createOperands(
N, Ops);
9573 CSEMap.InsertNode(
N, IP);
9593 if (
auto *CondC = dyn_cast<ConstantSDNode>(
Cond))
9594 return CondC->isZero() ?
F :
T;
9600 if (CondC->isZero())
9626 return !Val || Val->getAPIntValue().uge(
X.getScalarValueSizeInBits());
9632 if (
X.getValueType().getScalarType() == MVT::i1)
9645 bool HasNan = (XC && XC->getValueAPF().isNaN()) ||
9647 bool HasInf = (XC && XC->getValueAPF().isInfinity()) ||
9650 if (Flags.hasNoNaNs() && (HasNan ||
X.isUndef() ||
Y.isUndef()))
9653 if (Flags.hasNoInfs() && (HasInf ||
X.isUndef() ||
Y.isUndef()))
9676 if (Opcode ==
ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros())
9691 switch (Ops.
size()) {
9693 case 1:
return getNode(Opcode,
DL, VT,
static_cast<const SDValue>(Ops[0]));
9694 case 2:
return getNode(Opcode,
DL, VT, Ops[0], Ops[1]);
9695 case 3:
return getNode(Opcode,
DL, VT, Ops[0], Ops[1], Ops[2]);
9710 return getNode(Opcode,
DL, VT, Ops, Flags);
9715 unsigned NumOps = Ops.
size();
9718 case 1:
return getNode(Opcode,
DL, VT, Ops[0], Flags);
9719 case 2:
return getNode(Opcode,
DL, VT, Ops[0], Ops[1], Flags);
9720 case 3:
return getNode(Opcode,
DL, VT, Ops[0], Ops[1], Ops[2], Flags);
9725 for (
const auto &
Op : Ops)
9727 "Operand is DELETED_NODE!");
9742 assert(NumOps == 5 &&
"SELECT_CC takes 5 operands!");
9744 "LHS and RHS of condition must have same type!");
9746 "True and False arms of SelectCC must have same type!");
9748 "select_cc node must be of same type as true and false value!");
9752 "Expected select_cc with vector result to have the same sized "
9753 "comparison type!");
9756 assert(NumOps == 5 &&
"BR_CC takes 5 operands!");
9758 "LHS/RHS of comparison should match types!");
9764 Opcode = ISD::VP_XOR;
9769 Opcode = ISD::VP_AND;
9771 case ISD::VP_REDUCE_MUL:
9774 Opcode = ISD::VP_REDUCE_AND;
9776 case ISD::VP_REDUCE_ADD:
9779 Opcode = ISD::VP_REDUCE_XOR;
9781 case ISD::VP_REDUCE_SMAX:
9782 case ISD::VP_REDUCE_UMIN:
9786 Opcode = ISD::VP_REDUCE_AND;
9788 case ISD::VP_REDUCE_SMIN:
9789 case ISD::VP_REDUCE_UMAX:
9793 Opcode = ISD::VP_REDUCE_OR;
9801 if (VT != MVT::Glue) {
9806 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
9809 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
9810 createOperands(
N, Ops);
9812 CSEMap.InsertNode(
N, IP);
9814 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
9815 createOperands(
N, Ops);
9835 return getNode(Opcode,
DL, VTList, Ops, Flags);
9844 for (
const auto &
Op : Ops)
9846 "Operand is DELETED_NODE!");
9855 "Invalid add/sub overflow op!");
9857 Ops[0].getValueType() == Ops[1].getValueType() &&
9858 Ops[0].getValueType() == VTList.
VTs[0] &&
9859 "Binary operator types must match!");
9860 SDValue N1 = Ops[0], N2 = Ops[1];
9866 if (N2CV && N2CV->
isZero()) {
9897 VTList.
VTs[0] == Ops[0].getValueType() &&
9898 VTList.
VTs[0] == Ops[1].getValueType() &&
9899 "Binary operator types must match!");
9905 unsigned OutWidth = Width * 2;
9909 Val = Val.
sext(OutWidth);
9910 Mul =
Mul.sext(OutWidth);
9912 Val = Val.
zext(OutWidth);
9913 Mul =
Mul.zext(OutWidth);
9927 VTList.
VTs[0] == Ops[0].getValueType() &&
"frexp type mismatch");
9943 "Invalid STRICT_FP_EXTEND!");
9945 Ops[1].getValueType().isFloatingPoint() &&
"Invalid FP cast!");
9947 "STRICT_FP_EXTEND result type should be vector iff the operand "
9951 Ops[1].getValueType().getVectorElementCount()) &&
9952 "Vector element count mismatch!");
9954 "Invalid fpext node, dst <= src!");
9957 assert(VTList.
NumVTs == 2 && Ops.
size() == 3 &&
"Invalid STRICT_FP_ROUND!");
9959 "STRICT_FP_ROUND result type should be vector iff the operand "
9963 Ops[1].getValueType().getVectorElementCount()) &&
9964 "Vector element count mismatch!");
9966 Ops[1].getValueType().isFloatingPoint() &&
9967 VTList.
VTs[0].
bitsLT(Ops[1].getValueType()) &&
9968 isa<ConstantSDNode>(Ops[2]) &&
9969 (Ops[2]->getAsZExtVal() == 0 || Ops[2]->getAsZExtVal() == 1) &&
9970 "Invalid STRICT_FP_ROUND!");
9980 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
9981 return getNode(Opcode,
DL, VT, N1, N2, N3.getOperand(0));
9982 else if (N3.getOpcode() ==
ISD::AND)
9983 if (
ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
9987 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
9988 return getNode(Opcode,
DL, VT, N1, N2, N3.getOperand(0));
9996 if (VTList.
VTs[VTList.
NumVTs-1] != MVT::Glue) {
10000 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP))
10003 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTList);
10004 createOperands(
N, Ops);
10005 CSEMap.InsertNode(
N, IP);
10007 N = newSDNode<SDNode>(Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTList);
10008 createOperands(
N, Ops);
10011 N->setFlags(Flags);
10020 return getNode(Opcode,
DL, VTList, std::nullopt);
10026 return getNode(Opcode,
DL, VTList, Ops);
10032 return getNode(Opcode,
DL, VTList, Ops);
10037 SDValue Ops[] = { N1, N2, N3 };
10038 return getNode(Opcode,
DL, VTList, Ops);
10043 SDValue Ops[] = { N1, N2, N3, N4 };
10044 return getNode(Opcode,
DL, VTList, Ops);
10050 SDValue Ops[] = { N1, N2, N3, N4, N5 };
10051 return getNode(Opcode,
DL, VTList, Ops);
10055 return makeVTList(SDNode::getValueTypeList(VT), 1);
10064 void *IP =
nullptr;
10070 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 2);
10071 VTListMap.InsertNode(Result, IP);
10073 return Result->getSDVTList();
10083 void *IP =
nullptr;
10090 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 3);
10091 VTListMap.InsertNode(Result, IP);
10093 return Result->getSDVTList();
10104 void *IP =
nullptr;
10112 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, 4);
10113 VTListMap.InsertNode(Result, IP);
10115 return Result->getSDVTList();
10119 unsigned NumVTs = VTs.
size();
10121 ID.AddInteger(NumVTs);
10122 for (
unsigned index = 0; index < NumVTs; index++) {
10123 ID.AddInteger(VTs[index].getRawBits());
10126 void *IP =
nullptr;
10131 Result =
new (Allocator)
SDVTListNode(
ID.Intern(Allocator), Array, NumVTs);
10132 VTListMap.InsertNode(Result, IP);
10134 return Result->getSDVTList();
10145 assert(
N->getNumOperands() == 1 &&
"Update with wrong number of operands");
10148 if (
Op ==
N->getOperand(0))
return N;
10151 void *InsertPos =
nullptr;
10152 if (
SDNode *Existing = FindModifiedNodeSlot(
N,
Op, InsertPos))
10157 if (!RemoveNodeFromCSEMaps(
N))
10158 InsertPos =
nullptr;
10161 N->OperandList[0].set(
Op);
10165 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
10170 assert(
N->getNumOperands() == 2 &&
"Update with wrong number of operands");
10173 if (Op1 ==
N->getOperand(0) && Op2 ==
N->getOperand(1))
10177 void *InsertPos =
nullptr;
10178 if (
SDNode *Existing = FindModifiedNodeSlot(
N, Op1, Op2, InsertPos))
10183 if (!RemoveNodeFromCSEMaps(
N))
10184 InsertPos =
nullptr;
10187 if (
N->OperandList[0] != Op1)
10188 N->OperandList[0].set(Op1);
10189 if (
N->OperandList[1] != Op2)
10190 N->OperandList[1].set(Op2);
10194 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
10200 SDValue Ops[] = { Op1, Op2, Op3 };
10207 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
10214 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
10220 unsigned NumOps = Ops.
size();
10221 assert(
N->getNumOperands() == NumOps &&
10222 "Update with wrong number of operands");
10225 if (std::equal(Ops.
begin(), Ops.
end(),
N->op_begin()))
10229 void *InsertPos =
nullptr;
10230 if (
SDNode *Existing = FindModifiedNodeSlot(
N, Ops, InsertPos))
10235 if (!RemoveNodeFromCSEMaps(
N))
10236 InsertPos =
nullptr;
10239 for (
unsigned i = 0; i != NumOps; ++i)
10240 if (
N->OperandList[i] != Ops[i])
10241 N->OperandList[i].set(Ops[i]);
10245 if (InsertPos) CSEMap.InsertNode(
N, InsertPos);
10262 if (NewMemRefs.
empty()) {
10268 if (NewMemRefs.
size() == 1) {
10269 N->MemRefs = NewMemRefs[0];
10275 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.
size());
10277 N->MemRefs = MemRefsBuffer;
10278 N->NumMemRefs =
static_cast<int>(NewMemRefs.
size());
10301 SDValue Ops[] = { Op1, Op2 };
10309 SDValue Ops[] = { Op1, Op2, Op3 };
10342 SDValue Ops[] = { Op1, Op2 };
10350 New->setNodeId(-1);
10370 unsigned Order = std::min(
N->getIROrder(), OLoc.
getIROrder());
10371 N->setIROrder(Order);
10394 void *IP =
nullptr;
10395 if (VTs.
VTs[VTs.
NumVTs-1] != MVT::Glue) {
10399 return UpdateSDLocOnMergeSDNode(ON,
SDLoc(
N));
10402 if (!RemoveNodeFromCSEMaps(
N))
10407 N->ValueList = VTs.
VTs;
10417 if (Used->use_empty())
10418 DeadNodeSet.
insert(Used);
10423 MN->clearMemRefs();
10427 createOperands(
N, Ops);
10431 if (!DeadNodeSet.
empty()) {
10433 for (
SDNode *
N : DeadNodeSet)
10434 if (
N->use_empty())
10440 CSEMap.InsertNode(
N, IP);
10445 unsigned OrigOpc = Node->getOpcode();
10450#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
10451 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break;
10452#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
10453 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break;
10454#include "llvm/IR/ConstrainedOps.def"
10457 assert(Node->getNumValues() == 2 &&
"Unexpected number of results!");
10460 SDValue InputChain = Node->getOperand(0);
10465 for (
unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
10508 SDValue Ops[] = { Op1, Op2 };
10516 SDValue Ops[] = { Op1, Op2, Op3 };
10530 SDValue Ops[] = { Op1, Op2 };
10538 SDValue Ops[] = { Op1, Op2, Op3 };
10553 SDValue Ops[] = { Op1, Op2 };
10562 SDValue Ops[] = { Op1, Op2, Op3 };
10583 bool DoCSE = VTs.
VTs[VTs.
NumVTs-1] != MVT::Glue;
10585 void *IP =
nullptr;
10591 if (
SDNode *E = FindNodeOrInsertPos(
ID,
DL, IP)) {
10592 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E,
DL));
10597 N = newSDNode<MachineSDNode>(~Opcode,
DL.getIROrder(),
DL.getDebugLoc(), VTs);
10598 createOperands(
N, Ops);
10601 CSEMap.InsertNode(
N, IP);
10614 VT, Operand, SRIdxVal);
10624 VT, Operand, Subreg, SRIdxVal);
10641 if (VTList.
VTs[VTList.
NumVTs - 1] != MVT::Glue) {
10644 void *IP =
nullptr;
10646 E->intersectFlagsWith(Flags);
10656 if (VTList.
VTs[VTList.
NumVTs - 1] != MVT::Glue) {
10659 void *IP =
nullptr;
10660 if (FindNodeOrInsertPos(
ID,
SDLoc(), IP))
10670 SDNode *
N,
unsigned R,
bool IsIndirect,
10672 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
10673 "Expected inlined-at fields to agree");
10676 {}, IsIndirect,
DL, O,
10685 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
10686 "Expected inlined-at fields to agree");
10699 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
10700 "Expected inlined-at fields to agree");
10711 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
10712 "Expected inlined-at fields to agree");
10715 Dependencies, IsIndirect,
DL, O,
10721 unsigned VReg,
bool IsIndirect,
10723 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
10724 "Expected inlined-at fields to agree");
10727 {}, IsIndirect,
DL, O,
10735 unsigned O,
bool IsVariadic) {
10736 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(
DL) &&
10737 "Expected inlined-at fields to agree");
10740 DL, O, IsVariadic);
10744 unsigned OffsetInBits,
unsigned SizeInBits,
10745 bool InvalidateDbg) {
10748 assert(FromNode && ToNode &&
"Can't modify dbg values");
10753 if (
From == To || FromNode == ToNode)
10765 if (Dbg->isInvalidated())
10772 bool Changed =
false;
10773 auto NewLocOps = Dbg->copyLocationOps();
10775 NewLocOps.begin(), NewLocOps.end(),
10777 bool Match = Op == FromLocOp;
10787 auto *Expr = Dbg->getExpression();
10793 if (
auto FI = Expr->getFragmentInfo())
10794 if (OffsetInBits + SizeInBits > FI->SizeInBits)
10803 auto AdditionalDependencies = Dbg->getAdditionalDependencies();
10806 Var, Expr, NewLocOps, AdditionalDependencies, Dbg->isIndirect(),
10807 Dbg->getDebugLoc(), std::max(ToNode->
getIROrder(), Dbg->getOrder()),
10808 Dbg->isVariadic());
10811 if (InvalidateDbg) {
10813 Dbg->setIsInvalidated();
10814 Dbg->setIsEmitted();
10820 "Transferred DbgValues should depend on the new SDNode");
10826 if (!
N.getHasDebugValue())
10831 if (DV->isInvalidated())
10833 switch (
N.getOpcode()) {
10839 if (!isa<ConstantSDNode>(N0)) {
10840 bool RHSConstant = isa<ConstantSDNode>(N1);
10843 Offset =
N.getConstantOperandVal(1);
10846 if (!RHSConstant && DV->isIndirect())
10853 auto *DIExpr = DV->getExpression();
10854 auto NewLocOps = DV->copyLocationOps();
10855 bool Changed =
false;
10856 size_t OrigLocOpsSize = NewLocOps.size();
10857 for (
size_t i = 0; i < OrigLocOpsSize; ++i) {
10862 NewLocOps[i].getSDNode() != &
N)
10873 const auto *TmpDIExpr =
10881 NewLocOps.push_back(
RHS);
10887 assert(Changed &&
"Salvage target doesn't use N");
10890 DV->isVariadic() || OrigLocOpsSize != NewLocOps.size();
10892 auto AdditionalDependencies = DV->getAdditionalDependencies();
10894 DV->getVariable(), DIExpr, NewLocOps, AdditionalDependencies,
10895 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder(), IsVariadic);
10897 DV->setIsInvalidated();
10898 DV->setIsEmitted();
10900 N0.
getNode()->dumprFull(
this);
10901 dbgs() <<
" into " << *DIExpr <<
'\n');
10908 TypeSize ToSize =
N.getValueSizeInBits(0);
10912 auto NewLocOps = DV->copyLocationOps();
10913 bool Changed =
false;
10914 for (
size_t i = 0; i < NewLocOps.size(); ++i) {
10916 NewLocOps[i].getSDNode() != &
N)
10923 assert(Changed &&
"Salvage target doesn't use N");
10928 DV->getAdditionalDependencies(), DV->isIndirect(),
10929 DV->getDebugLoc(), DV->getOrder(), DV->isVariadic());
10932 DV->setIsInvalidated();
10933 DV->setIsEmitted();
10935 dbgs() <<
" into " << *DbgExpression <<
'\n');
10942 assert(!Dbg->getSDNodes().empty() &&
10943 "Salvaged DbgValue should depend on a new SDNode");
10951 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(
DL) &&
10952 "Expected inlined-at fields to agree");
10968 while (UI != UE &&
N == *UI)
10976 :
SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
10989 "Cannot replace with this method!");
11005 RAUWUpdateListener Listener(*
this, UI, UE);
11010 RemoveNodeFromCSEMaps(
User);
11022 }
while (UI != UE && *UI ==
User);
11025 AddModifiedNodeToCSEMaps(
User);
11041 for (
unsigned i = 0, e =
From->getNumValues(); i != e; ++i)
11044 "Cannot use this version of ReplaceAllUsesWith!");
11052 for (
unsigned i = 0, e =
From->getNumValues(); i != e; ++i)
11053 if (
From->hasAnyUseOfValue(i)) {
11054 assert((i < To->getNumValues()) &&
"Invalid To location");
11063 RAUWUpdateListener Listener(*
this, UI, UE);
11068 RemoveNodeFromCSEMaps(
User);
11080 }
while (UI != UE && *UI ==
User);
11084 AddModifiedNodeToCSEMaps(
User);
11098 if (
From->getNumValues() == 1)
11101 for (
unsigned i = 0, e =
From->getNumValues(); i != e; ++i) {
11111 RAUWUpdateListener Listener(*
this, UI, UE);
11116 RemoveNodeFromCSEMaps(
User);
11122 bool To_IsDivergent =
false;
11129 }
while (UI != UE && *UI ==
User);
11131 if (To_IsDivergent !=
From->isDivergent())
11136 AddModifiedNodeToCSEMaps(
User);
11149 if (
From == To)
return;
11152 if (
From.getNode()->getNumValues() == 1) {
11164 UE =
From.getNode()->use_end();
11165 RAUWUpdateListener Listener(*
this, UI, UE);
11168 bool UserRemovedFromCSEMaps =
false;
11178 if (
Use.getResNo() !=
From.getResNo()) {
11185 if (!UserRemovedFromCSEMaps) {
11186 RemoveNodeFromCSEMaps(
User);
11187 UserRemovedFromCSEMaps =
true;
11194 }
while (UI != UE && *UI ==
User);
11197 if (!UserRemovedFromCSEMaps)
11202 AddModifiedNodeToCSEMaps(
User);
11221bool operator<(
const UseMemo &L,
const UseMemo &R) {
11222 return (intptr_t)L.User < (intptr_t)R.User;
11232 for (UseMemo &Memo :
Uses)
11233 if (Memo.User ==
N)
11234 Memo.User =
nullptr;
11247 "Conflicting divergence information!");
11252 for (
const auto &
Op :
N->ops()) {
11253 if (
Op.Val.getValueType() != MVT::Other &&
Op.getNode()->isDivergent())
11264 if (
N->SDNodeBits.IsDivergent != IsDivergent) {
11265 N->SDNodeBits.IsDivergent = IsDivergent;
11268 }
while (!Worklist.
empty());
11271void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
11273 Order.
reserve(AllNodes.size());
11275 unsigned NOps =
N.getNumOperands();
11278 Order.push_back(&
N);
11280 for (
size_t I = 0;
I != Order.size(); ++
I) {
11282 for (
auto *U :
N->uses()) {
11283 unsigned &UnsortedOps = Degree[U];
11284 if (0 == --UnsortedOps)
11285 Order.push_back(U);
11292 std::vector<SDNode *> TopoOrder;
11293 CreateTopologicalOrder(TopoOrder);
11294 for (
auto *
N : TopoOrder) {
11296 "Divergence bit inconsistency detected");
11319 for (
unsigned i = 0; i != Num; ++i) {
11320 unsigned FromResNo =
From[i].getResNo();
11323 E = FromNode->
use_end(); UI != E; ++UI) {
11325 if (
Use.getResNo() == FromResNo) {
11326 UseMemo Memo = { *UI, i, &
Use };
11327 Uses.push_back(Memo);
11334 RAUOVWUpdateListener Listener(*
this,
Uses);
11336 for (
unsigned UseIndex = 0, UseIndexEnd =
Uses.size();
11337 UseIndex != UseIndexEnd; ) {
11343 if (
User ==
nullptr) {
11349 RemoveNodeFromCSEMaps(
User);
11356 unsigned i =
Uses[UseIndex].Index;
11361 }
while (UseIndex != UseIndexEnd &&
Uses[UseIndex].
User ==
User);
11365 AddModifiedNodeToCSEMaps(
User);
11373 unsigned DAGSize = 0;
11389 unsigned Degree =
N.getNumOperands();
11392 N.setNodeId(DAGSize++);
11394 if (Q != SortedPos)
11395 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
11396 assert(SortedPos != AllNodes.end() &&
"Overran node list");
11400 N.setNodeId(Degree);
11412 unsigned Degree =
P->getNodeId();
11413 assert(Degree != 0 &&
"Invalid node degree");
11417 P->setNodeId(DAGSize++);
11418 if (
P->getIterator() != SortedPos)
11419 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(
P));
11420 assert(SortedPos != AllNodes.end() &&
"Overran node list");
11424 P->setNodeId(Degree);
11427 if (Node.getIterator() == SortedPos) {
11431 dbgs() <<
"Overran sorted position:\n";
11433 dbgs() <<
"Checking if this is due to cycles\n";
11440 assert(SortedPos == AllNodes.end() &&
11441 "Topological sort incomplete!");
11443 "First node in topological sort is not the entry token!");
11444 assert(AllNodes.front().getNodeId() == 0 &&
11445 "First node in topological sort has non-zero id!");
11446 assert(AllNodes.front().getNumOperands() == 0 &&
11447 "First node in topological sort has operands!");
11448 assert(AllNodes.back().getNodeId() == (
int)DAGSize-1 &&
11449 "Last node in topologic sort has unexpected id!");
11450 assert(AllNodes.back().use_empty() &&
11451 "Last node in topologic sort has users!");
11459 for (
SDNode *SD : DB->getSDNodes()) {
11463 SD->setHasDebugValue(
true);
11465 DbgInfo->
add(DB, isParameter);
11472 assert(isa<MemSDNode>(NewMemOpChain) &&
"Expected a memop node");
11478 if (OldChain == NewMemOpChain || OldChain.
use_empty())
11479 return NewMemOpChain;
11482 OldChain, NewMemOpChain);
11485 return TokenFactor;
11490 assert(isa<MemSDNode>(NewMemOp.
getNode()) &&
"Expected a memop node");
11498 assert(isa<ExternalSymbolSDNode>(
Op) &&
"Node should be an ExternalSymbol");
11500 auto *Symbol = cast<ExternalSymbolSDNode>(
Op)->getSymbol();
11504 if (OutFunction !=
nullptr)
11512 std::string ErrorStr;
11514 ErrorFormatter <<
"Undefined external symbol ";
11515 ErrorFormatter <<
'"' << Symbol <<
'"';
11525 return Const !=
nullptr && Const->isZero();
11530 return Const !=
nullptr && Const->isZero() && !Const->isNegative();
11535 return Const !=
nullptr && Const->isAllOnes();
11540 return Const !=
nullptr && Const->isOne();
11545 return Const !=
nullptr && Const->isMinSignedValue();
11549 unsigned OperandNo) {
11558 return Const->isZero();
11560 return Const->isOne();
11563 return Const->isAllOnes();
11565 return Const->isMinSignedValue();
11567 return Const->isMaxSignedValue();
11572 return OperandNo == 1 && Const->isZero();
11575 return OperandNo == 1 && Const->isOne();
11580 return ConstFP->isZero() &&
11581 (Flags.hasNoSignedZeros() || ConstFP->isNegative());
11583 return OperandNo == 1 && ConstFP->isZero() &&
11584 (Flags.hasNoSignedZeros() || !ConstFP->isNegative());
11586 return ConstFP->isExactlyValue(1.0);
11588 return OperandNo == 1 && ConstFP->isExactlyValue(1.0);
11592 EVT VT = V.getValueType();
11594 APFloat NeutralAF = !Flags.hasNoNaNs()
11596 : !Flags.hasNoInfs()
11602 return ConstFP->isExactlyValue(NeutralAF);
11611 V = V.getOperand(0);
11616 while (V.getOpcode() ==
ISD::BITCAST && V.getOperand(0).hasOneUse())
11617 V = V.getOperand(0);
11623 V = V.getOperand(0);
11629 V = V.getOperand(0);
11637 unsigned NumBits = V.getScalarValueSizeInBits();
11640 return C && (
C->getAPIntValue().countr_one() >= NumBits);
11644 bool AllowTruncation) {
11645 EVT VT =
N.getValueType();
11654 bool AllowTruncation) {
11661 EVT VecEltVT =
N->getValueType(0).getVectorElementType();
11662 if (
auto *CN = dyn_cast<ConstantSDNode>(
N->getOperand(0))) {
11663 EVT CVT = CN->getValueType(0);
11664 assert(CVT.
bitsGE(VecEltVT) &&
"Illegal splat_vector element extension");
11665 if (AllowTruncation || CVT == VecEltVT)
11672 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements);
11677 if (CN && (UndefElements.
none() || AllowUndefs)) {
11679 EVT NSVT =
N.getValueType().getScalarType();
11680 assert(CVT.
bitsGE(NSVT) &&
"Illegal build vector element extension");
11681 if (AllowTruncation || (CVT == NSVT))
11690 EVT VT =
N.getValueType();
11698 const APInt &DemandedElts,
11699 bool AllowUndefs) {
11706 BV->getConstantFPSplatNode(DemandedElts, &UndefElements);
11708 if (CN && (UndefElements.
none() || AllowUndefs))
11723 return C &&
C->isZero();
11729 return C &&
C->isOne();
11734 unsigned BitWidth =
N.getScalarValueSizeInBits();
11736 return C &&
C->isAllOnes() &&
C->getValueSizeInBits(0) ==
BitWidth;
11743GlobalAddressSDNode::GlobalAddressSDNode(
unsigned Opc,
unsigned Order,
11746 int64_t o,
unsigned TF)
11747 :
SDNode(Opc, Order,
DL, getSDVTList(VT)),
Offset(o), TargetFlags(TF) {
11752 EVT VT,
unsigned SrcAS,
11754 :
SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)),
11755 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
11759 :
SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
11783 std::vector<EVT> VTs;
11796const EVT *SDNode::getValueTypeList(
EVT VT) {
11797 static std::set<EVT, EVT::compareRawBits> EVTs;
11798 static EVTArray SimpleVTArray;
11803 return &(*EVTs.insert(VT).first);
11817 if (UI.getUse().getResNo() ==
Value) {
11834 if (UI.getUse().getResNo() ==
Value)
11872 return any_of(
N->op_values(),
11873 [
this](
SDValue Op) { return this == Op.getNode(); });
11887 unsigned Depth)
const {
11888 if (*
this == Dest)
return true;
11892 if (
Depth == 0)
return false;
11912 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
11917 if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(*
this)) {
11918 if (Ld->isUnordered())
11919 return Ld->getChain().reachesChainWithoutSideEffects(Dest,
Depth-1);
11938 bool AllowPartials) {
11947 return Op.getOpcode() ==
unsigned(BinOp);
11953 unsigned CandidateBinOp =
Op.getOpcode();
11954 if (
Op.getValueType().isFloatingPoint()) {
11956 switch (CandidateBinOp) {
11958 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation())
11968 auto PartialReduction = [&](
SDValue Op,
unsigned NumSubElts) {
11969 if (!AllowPartials || !
Op)
11971 EVT OpVT =
Op.getValueType();
11994 unsigned Stages =
Log2_32(
Op.getValueType().getVectorNumElements());
11996 for (
unsigned i = 0; i < Stages; ++i) {
11997 unsigned MaskEnd = (1 << i);
11999 if (
Op.getOpcode() != CandidateBinOp)
12000 return PartialReduction(PrevOp, MaskEnd);
12009 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1);
12016 return PartialReduction(PrevOp, MaskEnd);
12021 return PartialReduction(PrevOp, MaskEnd);
12028 while (
Op.getOpcode() == CandidateBinOp) {
12029 unsigned NumElts =
Op.getValueType().getVectorNumElements();
12037 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
12038 if (NumSrcElts != (2 * NumElts))
12053 EVT VT =
N->getValueType(0);
12062 else if (NE > ResNE)
12065 if (
N->getNumValues() == 2) {
12068 EVT VT1 =
N->getValueType(1);
12072 for (i = 0; i != NE; ++i) {
12073 for (
unsigned j = 0, e =
N->getNumOperands(); j != e; ++j) {
12074 SDValue Operand =
N->getOperand(j);
12093 assert(
N->getNumValues() == 1 &&
12094 "Can't unroll a vector with multiple results!");
12100 for (i= 0; i != NE; ++i) {
12101 for (
unsigned j = 0, e =
N->getNumOperands(); j != e; ++j) {
12102 SDValue Operand =
N->getOperand(j);
12115 switch (
N->getOpcode()) {
12142 for (; i < ResNE; ++i)
12151 unsigned Opcode =
N->getOpcode();
12155 "Expected an overflow opcode");
12157 EVT ResVT =
N->getValueType(0);
12158 EVT OvVT =
N->getValueType(1);
12167 else if (NE > ResNE)
12179 for (
unsigned i = 0; i < NE; ++i) {
12180 SDValue Res =
getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]);
12203 if (LD->isVolatile() ||
Base->isVolatile())
12206 if (!LD->isSimple())
12208 if (LD->isIndexed() ||
Base->isIndexed())
12210 if (LD->getChain() !=
Base->getChain())
12212 EVT VT = LD->getMemoryVT();
12220 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *
this,
Offset))
12221 return (Dist * (int64_t)Bytes ==
Offset);
12230 int64_t GVOffset = 0;
12242 int FrameIdx = INT_MIN;
12243 int64_t FrameOffset = 0;
12245 FrameIdx = FI->getIndex();
12247 isa<FrameIndexSDNode>(
Ptr.getOperand(0))) {
12249 FrameIdx = cast<FrameIndexSDNode>(
Ptr.getOperand(0))->getIndex();
12250 FrameOffset =
Ptr.getConstantOperandVal(1);
12253 if (FrameIdx != INT_MIN) {
12258 return std::nullopt;
12268 "Split node must be a scalar type");
12273 return std::make_pair(
Lo,
Hi);
12286 return std::make_pair(LoVT, HiVT);
12294 bool *HiIsEmpty)
const {
12304 "Mixing fixed width and scalable vectors when enveloping a type");
12309 *HiIsEmpty =
false;
12317 return std::make_pair(LoVT, HiVT);
12322std::pair<SDValue, SDValue>
12327 "Splitting vector with an invalid mixture of fixed and scalable "
12330 N.getValueType().getVectorMinNumElements() &&
12331 "More vector elements requested than available!");
12341 return std::make_pair(
Lo,
Hi);
12348 EVT VT =
N.getValueType();
12350 "Expecting the mask to be an evenly-sized vector");
12358 return std::make_pair(
Lo,
Hi);
12363 EVT VT =
N.getValueType();
12372 unsigned Start,
unsigned Count,
12374 EVT VT =
Op.getValueType();
12377 if (EltVT ==
EVT())
12380 for (
unsigned i = Start, e = Start + Count; i != e; ++i) {
12393 return Val.MachineCPVal->getType();
12394 return Val.ConstVal->getType();
12398 unsigned &SplatBitSize,
12399 bool &HasAnyUndefs,
12400 unsigned MinSplatBits,
12401 bool IsBigEndian)
const {
12405 if (MinSplatBits > VecWidth)
12410 SplatValue =
APInt(VecWidth, 0);
12411 SplatUndef =
APInt(VecWidth, 0);
12418 assert(NumOps > 0 &&
"isConstantSplat has 0-size build vector");
12421 for (
unsigned j = 0; j < NumOps; ++j) {
12422 unsigned i = IsBigEndian ? NumOps - 1 - j : j;
12424 unsigned BitPos = j * EltWidth;
12427 SplatUndef.
setBits(BitPos, BitPos + EltWidth);
12428 else if (
auto *CN = dyn_cast<ConstantSDNode>(OpVal))
12429 SplatValue.
insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
12430 else if (
auto *CN = dyn_cast<ConstantFPSDNode>(OpVal))
12431 SplatValue.
insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
12438 HasAnyUndefs = (SplatUndef != 0);
12441 while (VecWidth > 8) {
12446 unsigned HalfSize = VecWidth / 2;
12453 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
12454 MinSplatBits > HalfSize)
12457 SplatValue = HighValue | LowValue;
12458 SplatUndef = HighUndef & LowUndef;
12460 VecWidth = HalfSize;
12469 SplatBitSize = VecWidth;
12476 if (UndefElements) {
12477 UndefElements->
clear();
12478 UndefElements->
resize(NumOps);
12484 for (
unsigned i = 0; i != NumOps; ++i) {
12485 if (!DemandedElts[i])
12488 if (
Op.isUndef()) {
12490 (*UndefElements)[i] =
true;
12491 }
else if (!Splatted) {
12493 }
else if (Splatted !=
Op) {
12499 unsigned FirstDemandedIdx = DemandedElts.
countr_zero();
12501 "Can only have a splat without a constant for all undefs.");
12518 if (UndefElements) {
12519 UndefElements->
clear();
12520 UndefElements->
resize(NumOps);
12528 for (
unsigned I = 0;
I != NumOps; ++
I)
12530 (*UndefElements)[
I] =
true;
12533 for (
unsigned SeqLen = 1; SeqLen < NumOps; SeqLen *= 2) {
12534 Sequence.append(SeqLen,
SDValue());
12535 for (
unsigned I = 0;
I != NumOps; ++
I) {
12536 if (!DemandedElts[
I])
12538 SDValue &SeqOp = Sequence[
I % SeqLen];
12540 if (
Op.isUndef()) {
12545 if (SeqOp && !SeqOp.
isUndef() && SeqOp !=
Op) {
12551 if (!Sequence.empty())
12555 assert(Sequence.empty() &&
"Failed to empty non-repeating sequence pattern");
12568 return dyn_cast_or_null<ConstantSDNode>(
12574 return dyn_cast_or_null<ConstantSDNode>(
getSplatValue(UndefElements));
12580 return dyn_cast_or_null<ConstantFPSDNode>(
12586 return dyn_cast_or_null<ConstantFPSDNode>(
getSplatValue(UndefElements));
12593 dyn_cast_or_null<ConstantFPSDNode>(
getSplatValue(UndefElements))) {
12596 const APFloat &APF = CN->getValueAPF();
12602 return IntVal.exactLogBase2();
12608 bool IsLittleEndian,
unsigned DstEltSizeInBits,
12616 assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
12617 "Invalid bitcast scale");
12622 BitVector SrcUndeElements(NumSrcOps,
false);
12624 for (
unsigned I = 0;
I != NumSrcOps; ++
I) {
12626 if (
Op.isUndef()) {
12627 SrcUndeElements.
set(
I);
12630 auto *CInt = dyn_cast<ConstantSDNode>(
Op);
12631 auto *CFP = dyn_cast<ConstantFPSDNode>(
Op);
12632 assert((CInt || CFP) &&
"Unknown constant");
12633 SrcBitElements[
I] = CInt ? CInt->getAPIntValue().trunc(SrcEltSizeInBits)
12634 : CFP->getValueAPF().bitcastToAPInt();
12638 recastRawBits(IsLittleEndian, DstEltSizeInBits, RawBitElements,
12639 SrcBitElements, UndefElements, SrcUndeElements);
12644 unsigned DstEltSizeInBits,
12649 unsigned NumSrcOps = SrcBitElements.
size();
12650 unsigned SrcEltSizeInBits = SrcBitElements[0].getBitWidth();
12651 assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
12652 "Invalid bitcast scale");
12653 assert(NumSrcOps == SrcUndefElements.
size() &&
12654 "Vector size mismatch");
12656 unsigned NumDstOps = (NumSrcOps * SrcEltSizeInBits) / DstEltSizeInBits;
12657 DstUndefElements.
clear();
12658 DstUndefElements.
resize(NumDstOps,
false);
12662 if (SrcEltSizeInBits <= DstEltSizeInBits) {
12663 unsigned Scale = DstEltSizeInBits / SrcEltSizeInBits;
12664 for (
unsigned I = 0;
I != NumDstOps; ++
I) {
12665 DstUndefElements.
set(
I);
12666 APInt &DstBits = DstBitElements[
I];
12667 for (
unsigned J = 0; J != Scale; ++J) {
12668 unsigned Idx = (
I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
12669 if (SrcUndefElements[
Idx])
12671 DstUndefElements.
reset(
I);
12672 const APInt &SrcBits = SrcBitElements[
Idx];
12674 "Illegal constant bitwidths");
12675 DstBits.
insertBits(SrcBits, J * SrcEltSizeInBits);
12682 unsigned Scale = SrcEltSizeInBits / DstEltSizeInBits;
12683 for (
unsigned I = 0;
I != NumSrcOps; ++
I) {
12684 if (SrcUndefElements[
I]) {
12685 DstUndefElements.
set(
I * Scale, (
I + 1) * Scale);
12688 const APInt &SrcBits = SrcBitElements[
I];
12689 for (
unsigned J = 0; J != Scale; ++J) {
12690 unsigned Idx = (
I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
12691 APInt &DstBits = DstBitElements[
Idx];
12692 DstBits = SrcBits.
extractBits(DstEltSizeInBits, J * DstEltSizeInBits);
12699 unsigned Opc =
Op.getOpcode();
12706std::optional<std::pair<APInt, APInt>>
12710 return std::nullopt;
12714 return std::nullopt;
12721 return std::nullopt;
12723 for (
unsigned i = 2; i < NumOps; ++i) {
12725 return std::nullopt;
12728 if (Val != (Start + (Stride * i)))
12729 return std::nullopt;
12732 return std::make_pair(Start, Stride);
12748 for (
int Idx = Mask[i]; i != e; ++i)
12749 if (Mask[i] >= 0 && Mask[i] !=
Idx)
12757 if (isa<ConstantSDNode>(
N))
12758 return N.getNode();
12760 return N.getNode();
12768 isa<ConstantSDNode>(
N.getOperand(0)))
12769 return N.getNode();
12776 if (isa<ConstantFPSDNode>(
N))
12777 return N.getNode();
12780 return N.getNode();
12783 isa<ConstantFPSDNode>(
N.getOperand(0)))
12784 return N.getNode();
12790 assert(!Node->OperandList &&
"Node already has operands");
12792 "too many operands to fit into SDNode");
12793 SDUse *Ops = OperandRecycler.allocate(
12796 bool IsDivergent =
false;
12797 for (
unsigned I = 0;
I != Vals.
size(); ++
I) {
12798 Ops[
I].setUser(Node);
12799 Ops[
I].setInitial(Vals[
I]);
12800 if (Ops[
I].Val.getValueType() != MVT::Other)
12804 Node->OperandList = Ops;
12807 Node->SDNodeBits.IsDivergent = IsDivergent;
12815 while (Vals.
size() > Limit) {
12816 unsigned SliceIdx = Vals.
size() - Limit;
12890 const SDLoc &DLoc) {
12895 Entry.Ty =
Ptr.getValueType().getTypeForEVT(*
getContext());
12896 Args.push_back(Entry);
12908 assert(
From && To &&
"Invalid SDNode; empty source SDValue?");
12909 auto I = SDEI.find(
From);
12910 if (
I == SDEI.end())
12915 NodeExtraInfo NEI =
I->second;
12924 SDEI[To] = std::move(NEI);
12943 Leafs.emplace_back(
N);
12946 if (!FromReach.
insert(
N).second)
12954 auto DeepCopyTo = [&](
auto &&Self,
const SDNode *
N) {
12957 if (!Visited.
insert(
N).second)
12962 if (!Self(Self,
Op.getNode()))
12982 for (
const SDNode *
N : StartFrom)
12983 VisitFrom(VisitFrom,
N,
MaxDepth - PrevDepth);
12995 errs() <<
"warning: incomplete propagation of SelectionDAG::NodeExtraInfo\n";
12996 assert(
false &&
"From subgraph too complex - increase max. MaxDepth?");
12998 SDEI[To] = std::move(NEI);
13012 if (!Visited.
insert(
N).second) {
13013 errs() <<
"Detected cycle in SelectionDAG\n";
13014 dbgs() <<
"Offending node:\n";
13015 N->dumprFull(DAG);
dbgs() <<
"\n";
13031 bool check = force;
13032#ifdef EXPENSIVE_CHECKS
13036 assert(
N &&
"Checking nonexistent SDNode");
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isConstant(const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
This file implements the BitVector class.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
static std::optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
#define __asan_unpoison_memory_region(p, size)
#define LLVM_LIKELY(EXPR)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Looks at all the uses of the given value Returns the Liveness deduced from the uses of this value Adds all uses that cause the result to be MaybeLive to MaybeLiveRetUses If the result is MaybeLiveUses might be modified but its content should be ignored(since it might not be complete). DeadArgumentEliminationPass
Given that RA is a live propagate it s liveness to any other values it uses(according to Uses). void DeadArgumentEliminationPass
Given that RA is a live value
This file defines the DenseSet and SmallDenseSet classes.
This file contains constants used for implementing Dwarf debug support.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file defines a hash set that can be used to remove duplication of nodes in a graph.
Rewrite Partial Register Uses
static const unsigned MaxDepth
static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB)
static bool shouldLowerMemFuncForSize(const MachineFunction &MF)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG)
mir Rename Register Operands
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
PowerPC Reduce CR logical Operation
const SmallVectorImpl< MachineOperand > & Cond
Contains matchers for matching SelectionDAG nodes and values.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo)
Lower the call to 'memset' intrinsic function into a series of store operations.
static std::optional< APInt > FoldValueWithUndef(unsigned Opcode, const APInt &C1, bool IsUndef1, const APInt &C2, bool IsUndef2)
static SDValue FoldSTEP_VECTOR(const SDLoc &DL, EVT VT, SDValue Step, SelectionDAG &DAG)
static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned OpC, SDVTList VTList, ArrayRef< SDValue > OpList)
static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, const TargetLowering &TLI, const ConstantDataArraySlice &Slice)
getMemsetStringVal - Similar to getMemsetValue.
static cl::opt< bool > EnableMemCpyDAGOpt("enable-memcpy-dag-opt", cl::Hidden, cl::init(true), cl::desc("Gang up loads and stores generated by inlining of memcpy"))
static bool haveNoCommonBitsSetCommutative(SDValue A, SDValue B)
static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList)
AddNodeIDValueTypes - Value type lists are intern'd so we can represent them solely with their pointe...
static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef< int > M)
Swaps the values of N1 and N2.
static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice)
Returns true if memcpy source is constant data.
static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo)
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo, AAResults *AA)
static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC)
AddNodeIDOpcode - Add the node opcode to the NodeID data.
static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike)
static bool doNotCSE(SDNode *N)
doNotCSE - Return true if CSE should not be performed for this node.
static cl::opt< int > MaxLdStGlue("ldstmemcpy-glue-max", cl::desc("Number limit for gluing ld/st of memcpy."), cl::Hidden, cl::init(0))
static void AddNodeIDOperands(FoldingSetNodeID &ID, ArrayRef< SDValue > Ops)
AddNodeIDOperands - Various routines for adding operands to the NodeID data.
static void VerifySDNode(SDNode *N)
VerifySDNode - Check the given SDNode. Aborts if it is invalid.
static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SelectionDAG &DAG)
Try to simplify vector concatenation to an input value, undef, or build vector.
static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, SelectionDAG &DAG, SDValue Ptr, int64_t Offset=0)
InferPointerInfo - If the specified ptr/offset is a frame index, infer a MachinePointerInfo record fr...
static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N)
If this is an SDNode with special info, add this info to the NodeID data.
static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G)
static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs)
makeVTList - Return an instance of the SDVTList struct initialized with the specified members.
static void checkForCyclesHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallPtrSetImpl< const SDNode * > &Checked, const llvm::SelectionDAG *DAG)
static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, SmallVector< SDValue, 32 > &OutChains, unsigned From, unsigned To, SmallVector< SDValue, 16 > &OutLoadChains, SmallVector< SDValue, 16 > &OutStoreChains)
static int isSignedOp(ISD::CondCode Opcode)
For an integer comparison, return 1 if the comparison is a signed operation and 2 if the result is an...
static std::optional< APInt > FoldValue(unsigned Opcode, const APInt &C1, const APInt &C2)
static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SelectionDAG &DAG)
static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, unsigned AS)
static Constant * ConstantFold(Instruction *I, const DataLayout &DL, const SmallDenseMap< Value *, Constant * > &ConstantPool)
Try to fold instruction I into a constant.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR)
Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
static unsigned getSize(unsigned Kind)
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...
static APFloat getQNaN(const fltSemantics &Sem, bool Negative=false, const APInt *payload=nullptr)
Factory for QNaN values.
opStatus divide(const APFloat &RHS, roundingMode RM)
void copySign(const APFloat &RHS)
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
opStatus subtract(const APFloat &RHS, roundingMode RM)
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
opStatus add(const APFloat &RHS, roundingMode RM)
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
opStatus multiply(const APFloat &RHS, roundingMode RM)
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
opStatus mod(const APFloat &RHS)
static APFloat getNaN(const fltSemantics &Sem, bool Negative=false, uint64_t payload=0)
Factory for NaN values.
Class for arbitrary precision integers.
APInt umul_ov(const APInt &RHS, bool &Overflow) const
APInt usub_sat(const APInt &RHS) const
APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
unsigned popcount() const
Count the number of bits set.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
APInt abs() const
Get the absolute value.
APInt sadd_sat(const APInt &RHS) const
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
void clearAllBits()
Set every bit to 0.
APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
APInt reverseBits() const
void ashrInPlace(unsigned ShiftAmt)
Arithmetic right-shift this APInt by ShiftAmt in place.
bool sle(const APInt &RHS) const
Signed less or equal comparison.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
APInt sshl_sat(const APInt &RHS) const
APInt ushl_sat(const APInt &RHS) const
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
APInt rotl(unsigned rotateAmt) const
Rotate left by rotateAmt.
void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
unsigned logBase2() const
APInt uadd_sat(const APInt &RHS) const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
void setAllBits()
Set every bit to 1.
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt sext(unsigned width) const
Sign extend to a new width.
void setBits(unsigned loBit, unsigned hiBit)
Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
APInt ssub_sat(const APInt &RHS) const
An arbitrary precision integer that knows its signedness.
AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT, unsigned SrcAS, unsigned DestAS)
Recycle small arrays allocated from a BumpPtrAllocator.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
This is an SDNode representing atomic operations.
static BaseIndexOffset match(const SDNode *N, const SelectionDAG &DAG)
Parses tree in N for base, index, offset addresses.
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
void clear()
clear - Removes all bits from the bitvector.
bool none() const
none - Returns true if none of the bits are set.
size_type size() const
size - Returns the number of bits in this bitvector.
int64_t getOffset() const
unsigned getTargetFlags() const
const BlockAddress * getBlockAddress() const
The address of a basic block.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
A "pseudo-class" with methods for operating on BUILD_VECTORs.
bool getConstantRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits, SmallVectorImpl< APInt > &RawBitElements, BitVector &UndefElements) const
Extract the raw bit data from a build vector of Undef, Constant or ConstantFP node elements.
static void recastRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits, SmallVectorImpl< APInt > &DstBitElements, ArrayRef< APInt > SrcBitElements, BitVector &DstUndefElements, const BitVector &SrcUndefElements)
Recast bit data SrcBitElements to DstEltSizeInBits wide elements.
bool getRepeatedSequence(const APInt &DemandedElts, SmallVectorImpl< SDValue > &Sequence, BitVector *UndefElements=nullptr) const
Find the shortest repeating sequence of values in the build vector.
ConstantFPSDNode * getConstantFPSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant FP or null if this is not a constant FP splat.
std::optional< std::pair< APInt, APInt > > isConstantSequence() const
If this BuildVector is constant and represents the numerical series "<a, a+n, a+2n,...
SDValue getSplatValue(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted value or a null value if this is not a splat.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
ConstantSDNode * getConstantSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant or null if this is not a constant splat.
int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, uint32_t BitWidth) const
If this is a constant FP splat and the splatted constant FP is an exact power or 2,...
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, Align Alignment)
Allocate space at the specified alignment.
void Reset()
Deallocate all but the current slab and reset the current pointer to the beginning of it,...
static bool isValueValidForType(EVT VT, const APFloat &Val)
const APFloat & getValueAPF() const
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
const APInt & getValue() const
Return the constant as an APInt value reference.
bool isMachineConstantPoolEntry() const
This class represents a range of values.
ConstantRange multiply(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a multiplication of a value in thi...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
OverflowResult unsignedSubMayOverflow(const ConstantRange &Other) const
Return whether unsigned sub of the two ranges always/never overflows.
OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const
Return whether unsigned add of the two ranges always/never overflows.
KnownBits toKnownBits() const
Return known bits for values in this range.
ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
OverflowResult unsignedMulMayOverflow(const ConstantRange &Other) const
Return whether unsigned mul of the two ranges always/never overflows.
ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
OverflowResult
Represents whether an operation on the given constant range is known to always or never overflow.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
OverflowResult signedSubMayOverflow(const ConstantRange &Other) const
Return whether signed sub of the two ranges always/never overflows.
uint64_t getZExtValue() const
const APInt & getAPIntValue() const
This is an important base class in LLVM.
Constant * getSplatValue(bool AllowUndefs=false) const
If all elements of the vector constant have the same value, return that value.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
static ExtOps getExtOps(unsigned FromSize, unsigned ToSize, bool Signed)
Returns the ops for a zero- or sign-extension in a DIExpression.
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static const DIExpression * convertToVariadicExpression(const DIExpression *Expr)
If Expr is a non-variadic expression (i.e.
static std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
Base class for variables.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
unsigned getPointerTypeSizeInBits(Type *) const
Layout pointer size, in bits, based on the type.
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Implements a dense probed hash-table based set.
const char * getSymbol() const
unsigned getTargetFlags() const
FoldingSetNodeID - This class is used to gather all the unique data bits of a node.
MachineBasicBlock * MBB
MBB - The current block.
Data structure describing the variable locations in a function.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
AttributeList getAttributes() const
Return the attribute list for this Function.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
int64_t getOffset() const
unsigned getAddressSpace() const
unsigned getTargetFlags() const
const GlobalValue * getGlobal() const
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
This class is used to form a handle around another node that is persistent and is updated across invo...
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
constexpr bool isValid() const
This is an important class for using LLVM in a threaded context.
This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate the offet and size that ar...
This class is used to represent ISD::LOAD nodes.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
static MVT getIntegerVT(unsigned BitWidth)
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
Abstract base class for all machine specific constantpool value subclasses.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
void setObjectAlignment(int ObjectIdx, Align Alignment)
setObjectAlignment - Change the alignment of the specified stack object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
bool isNonTemporal() const
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
bool isDereferenceable() const
An SDNode that represents everything that will be needed to construct a MachineInstr.
This class is used to represent an MGATHER node.
This class is used to represent an MLOAD node.
This class is used to represent an MSCATTER node.
This class is used to represent an MSTORE node.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT memvt, MachineMemOperand *MMO)
MachineMemOperand * MMO
Memory reference information.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
unsigned getRawSubclassData() const
Return the SubclassData value, without HasDebugValue.
EVT getMemoryVT() const
Return the type of the in-memory value.
Representation for a specific memory location.
A Module instance is used to store all the information related to an LLVM module.
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Pass interface - Implemented by all 'passes'.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
bool isNull() const
Test if the pointer held in the union is null, regardless of which type it is.
Analysis providing profile information.
void Deallocate(SubClass *E)
Deallocate - Release storage for the pointed-to object.
Keeps track of dbg_value information through SDISel.
BumpPtrAllocator & getAlloc()
void add(SDDbgValue *V, bool isParameter)
void erase(const SDNode *Node)
Invalidate all DbgValues attached to the node and remove it from the Node-to-DbgValues map.
ArrayRef< SDDbgValue * > getSDDbgValues(const SDNode *Node) const
Holds the information from a dbg_label node through SDISel.
Holds the information for a single machine location through SDISel; either an SDNode,...
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(unsigned VReg)
static SDDbgOperand fromConst(const Value *Const)
@ SDNODE
Value is the result of an expression.
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
unsigned getIROrder() const
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
void dumprFull(const SelectionDAG *G=nullptr) const
printrFull to dbgs().
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
static constexpr size_t getMaxNumOperands()
Return the maximum number of operands that a SDNode can hold.
MemSDNodeBitfields MemSDNodeBits
void Profile(FoldingSetNodeID &ID) const
Gather unique data for the node.
bool getHasDebugValue() const
SDNodeFlags getFlags() const
void setNodeId(int Id)
Set unique node id.
void intersectFlagsWith(const SDNodeFlags Flags)
Clear any flags in this node that aren't also set in Flags.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
bool use_empty() const
Return true if there are no uses of this node.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
static bool areOnlyUsersOf(ArrayRef< const SDNode * > Nodes, const SDNode *N)
Return true if all the users of N are contained in Nodes.
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
bool isOperandOf(const SDNode *N) const
Return true if this node is an operand of N.
const APInt & getConstantOperandAPInt(unsigned Num) const
Helper method returns the APInt of a ConstantSDNode operand.
bool hasPredecessor(const SDNode *N) const
Return true if N is a predecessor of this node.
bool hasAnyUseOfValue(unsigned Value) const
Return true if there are any use of the indicated value.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool isUndef() const
Return true if the type of the node type undefined.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
op_iterator op_end() const
op_iterator op_begin() const
static use_iterator use_end()
void DropOperands()
Release the operands and set this node to have zero operands.
Represents a use of a SDNode.
SDNode * getNode() const
Convenience function for get().getNode().
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
bool isOperandOf(const SDNode *N) const
Return true if this node is an operand of N.
bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
bool use_empty() const
Return true if there are no nodes using value ResNo of Node.
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
virtual SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, Align Alignment, bool isVolatile, bool AlwaysInline, MachinePointerInfo DstPtrInfo) const
Emit target-specific code that performs a memset.
virtual SDValue EmitTargetCodeForMemmove(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, Align Alignment, bool isVolatile, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memmove.
virtual SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, Align Alignment, bool isVolatile, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memcpy.
SDNodeFlags getFlags() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op)
Return the specified value casted to the target's desired shift amount type.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, bool IsExpanding=false)
SDValue getSplatSourceVector(SDValue V, int &SplatIndex)
If V is a splatted value, return the source vector and its splat index.
SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root, MCSymbol *Label)
OverflowKind computeOverflowForUnsignedSub(SDValue N0, SDValue N1) const
Determine if the result of the unsigned sub of 2 nodes can overflow.
unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
bool isKnownNeverSNaN(SDValue Op, unsigned Depth=0) const
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
const TargetSubtargetInfo & getSubtarget() const
const APInt * getValidMaximumShiftAmountConstant(SDValue V, const APInt &DemandedElts) const
If a SHL/SRA/SRL node V has constant shift amounts that are all less than the element bit-width of th...
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
void updateDivergence(SDNode *N)
SDValue getSplatValue(SDValue V, bool LegalTypes=false)
If V is a splat vector, return its scalar source operand by extracting that element from the source v...
SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond, const SDLoc &dl)
Constant fold a setcc to true or false.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getNeutralElement(unsigned Opcode, const SDLoc &DL, EVT VT, SDNodeFlags Flags)
Get the (commutative) neutral element for the given opcode, if it exists.
SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Value, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo)
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid, uint64_t Index, uint32_t Attr)
Creates a PseudoProbeSDNode with function GUID Guid and the index of the block Index it is probing,...
SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDNode * SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT)
These are used for target selectors to mutate the specified node to have the specified return type,...
SelectionDAG(const TargetMachine &TM, CodeGenOptLevel)
SDValue getBitcastedSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDNode * isConstantIntBuildVectorOrConstantInt(SDValue N) const
Test whether the given value is a constant int or similar node.
SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, MachineMemOperand *MMO)
Gets a node for an atomic cmpxchg op.
SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain)
If an existing load has uses of its chain, create a token factor node with that chain and the new mem...
SDDbgValue * getVRegDbgValue(DIVariable *Var, DIExpression *Expr, unsigned VReg, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a VReg SDDbgValue node.
void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To, unsigned Num)
Like ReplaceAllUsesOfValueWith, but for multiple values at once.
SDValue getJumpTableDebugInfo(int JTI, SDValue Chain, const SDLoc &DL)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)
Return a GlobalAddress of the function from the current module with name matching the given ExternalS...
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
OverflowKind
Used to represent the possible overflow behavior of an operation.
bool isADDLike(SDValue Op) const
Return true if the specified operand is an ISD::OR or ISD::XOR node that can be treated as an ISD::AD...
bool haveNoCommonBitsSet(SDValue A, SDValue B) const
Return true if A and B have no common bits set.
bool calculateDivergence(SDNode *N)
SDValue getElementCount(const SDLoc &DL, EVT VT, ElementCount EC, bool ConstantFold=true)
SDValue getGetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A)
Return an AssertAlignSDNode.
SDNode * mutateStrictFPToFP(SDNode *Node)
Mutate the specified strict FP node to its non-strict equivalent, unlinking the node from its chain a...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getBitcastedZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
void VerifyDAGDivergence()
bool shouldOptForSize() const
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
SDValue getVPZExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be an integer vector, to the vector-type VT,...
const TargetLowering & getTargetLoweringInfo() const
bool isEqualTo(SDValue A, SDValue B) const
Test whether two SDValues are known to compare equal.
static constexpr unsigned MaxRecursionDepth
SDValue getStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue expandVACopy(SDNode *Node)
Expand the specified ISD::VACOPY node as the Legalize pass would.
SDValue getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
APInt computeVectorKnownZeroElements(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
For each demanded element of a vector, see if it is known to be zero.
void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
bool NewNodesMustHaveLegalTypes
When true, additional steps are taken to ensure that getConstant() and similar functions return DAG n...
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
void salvageDebugInfo(SDNode &N)
To be invoked on an SDNode that is slated to be erased.
SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
std::pair< SDValue, SDValue > UnrollVectorOverflowOp(SDNode *N, unsigned ResNE=0)
Like UnrollVectorOp(), but for the [US](ADD|SUB|MUL)O family of opcodes.
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getBitcastedAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
void DeleteNode(SDNode *N)
Remove the specified node from the system.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
SDValue simplifySelect(SDValue Cond, SDValue TVal, SDValue FVal)
Try to simplify a select/vselect into 1 of its operands or a constant.
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
SDNode * isConstantFPBuildVectorOrConstantFP(SDValue N) const
Test whether the given value is a constant FP or similar node.
SDValue expandVAArg(SDNode *Node)
Expand the specified ISD::VAARG node as the Legalize pass would.
SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops)
Check if a node exists without modifying its flags.
bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base, unsigned Bytes, int Dist) const
Return true if loads are next to each other and can be merged.
SDDbgLabel * getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O)
Creates a SDDbgLabel node.
SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
OverflowKind computeOverflowForUnsignedMul(SDValue N0, SDValue N1) const
Determine if the result of the unsigned mul of 2 nodes can overflow.
void copyExtraInfo(SDNode *From, SDNode *To)
Copy extra info associated with one node to another.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue SV, unsigned Align)
VAArg produces a result and token chain, and takes a pointer and a source value as input.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
void clear()
Clear state and free memory necessary to make this SelectionDAG ready to process a new block.
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
const APInt * getValidShiftAmountConstant(SDValue V, const APInt &DemandedElts) const
If a SHL/SRA/SRL node V has a constant or splat constant shift amount that is less than the element b...
SDValue makeStateFunctionCall(unsigned LibFunc, SDValue Ptr, SDValue InChain, const SDLoc &DLoc)
Helper used to make a call to a library function that has one argument of pointer type.
bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly=false, unsigned Depth=0) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getIndexedLoadVP(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
SDValue getSrcValue(const Value *v)
Construct a node to track a Value* through the backend.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
OverflowKind computeOverflowForSignedMul(SDValue N0, SDValue N1) const
Determine if the result of the signed mul of 2 nodes can overflow.
MaybeAlign InferPtrAlign(SDValue Ptr) const
Infer alignment of a load / store address.
bool MaskedValueIsAllOnes(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if '(Op & Mask) == Mask'.
bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
SDValue getRegister(unsigned Reg, EVT VT)
void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE, Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, UniformityInfo *UA, ProfileSummaryInfo *PSIin, BlockFrequencyInfo *BFIin, FunctionVarLocs const *FnVarLocs)
Prepare this SelectionDAG to process code in the given MachineFunction.
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
void AddDbgLabel(SDDbgLabel *DB)
Add a dbg_label SDNode.
bool isConstantValueOfAnyType(SDValue N) const
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getBasicBlock(MachineBasicBlock *MBB)
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
bool isKnownToBeAPowerOfTwo(SDValue Val, unsigned Depth=0) const
Test if the given value is known to have exactly one bit set.
SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label)
SDValue getIndexedStoreVP(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
bool isKnownNeverZero(SDValue Op, unsigned Depth=0) const
Test whether the given SDValue is known to contain non-zero value(s).
SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
SDValue getSetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
static const fltSemantics & EVTToAPFloatSemantics(EVT VT)
Returns an APFloat semantics tag appropriate for the given type.
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
std::pair< SDValue, SDValue > getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT)
Convert Op, which must be a STRICT operation of float type, to the float type VT, by either extending...
std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
SDValue getVPLogicalNOT(const SDLoc &DL, SDValue Val, SDValue Mask, SDValue EVL, EVT VT)
Create a vector-predicated logical NOT operation as (VP_XOR Val, BooleanOne, Mask,...
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
iterator_range< allnodes_iterator > allnodes()
SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, bool isTarget=false, unsigned TargetFlags=0)
SDValue WidenVector(const SDValue &N, const SDLoc &DL)
Widen the vector up to the next power of two using INSERT_SUBVECTOR.
bool isKnownNeverZeroFloat(SDValue Op) const
Test whether the given floating point SDValue is known to never be positive or negative zero.
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDDbgValue * getConstantDbgValue(DIVariable *Var, DIExpression *Expr, const Value *C, const DebugLoc &DL, unsigned O)
Creates a constant SDDbgValue node.
SDValue getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getValueType(EVT)
ArrayRef< SDDbgValue * > GetDbgValues(const SDNode *SD) const
Get the debug values which reference the given SDNode.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
OverflowKind computeOverflowForSignedAdd(SDValue N0, SDValue N1) const
Determine if the result of the signed addition of 2 nodes can overflow.
SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
unsigned AssignTopologicalOrder()
Topological-sort the AllNodes list and a assign a unique node id for each node in the DAG based on th...
ilist< SDNode >::size_type allnodes_size() const
SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
bool isKnownNeverNaN(SDValue Op, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN.
SDValue getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
SDValue getTruncStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, bool IsCompressing=false)
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
bool MaskedVectorIsZero(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
Return true if 'Op' is known to be zero in DemandedElts.
SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
SDDbgValue * getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr, unsigned FI, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a FrameIndex SDDbgValue node.
SDValue getExtStridedLoadVP(ISD::LoadExtType ExtType, const SDLoc &DL, EVT VT, SDValue Chain, SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getJumpTable(int JTI, EVT VT, bool isTarget=false, unsigned TargetFlags=0)
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
SDValue getVPPtrExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be of integer type, to the vector-type integer type VT,...
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to extend the Op as a pointer value assuming it was the smaller SrcTy ...
bool canCreateUndefOrPoison(SDValue Op, const APInt &DemandedElts, bool PoisonOnly=false, bool ConsiderFlags=true, unsigned Depth=0) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
OverflowKind computeOverflowForUnsignedAdd(SDValue N0, SDValue N1) const
Determine if the result of the unsigned addition of 2 nodes can overflow.
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue getTruncStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT SVT, MachineMemOperand *MMO, bool IsCompressing=false)
void canonicalizeCommutativeBinop(unsigned Opcode, SDValue &N1, SDValue &N2) const
Swap N1 and N2 if Opcode is a commutative binary opcode and the canonical form expects the opposite o...
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getCondCode(ISD::CondCode Cond)
SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain, int FrameIndex, int64_t Size, int64_t Offset=-1)
Creates a LifetimeSDNode that starts (IsStart==true) or ends (IsStart==false) the lifetime of the por...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
OverflowKind computeOverflowForSignedSub(SDValue N0, SDValue N1) const
Determine if the result of the signed sub of 2 nodes can overflow.
LLVMContext * getContext() const
SDValue simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y, SDNodeFlags Flags)
Try to simplify a floating-point binary operation into 1 of its operands or a constant.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
const APInt * getValidMinimumShiftAmountConstant(SDValue V, const APInt &DemandedElts) const
If a SHL/SRA/SRL node V has constant shift amounts that are all less than the element bit-width of th...
SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL, bool LegalTypes=true)
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
bool isUndef(unsigned Opcode, ArrayRef< SDValue > Ops)
Return true if the result of this operation is always undefined.
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
std::pair< EVT, EVT > GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, bool *HiIsEmpty) const
Compute the VTs needed for the low/hi parts of a type, dependent on an enveloping VT that has been sp...
SDValue foldConstantFPMath(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops)
Fold floating-point operations when all operands are constants and/or undefined.
SDNode * getNodeIfExists(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops, const SDNodeFlags Flags)
Get the specified node if it's already available, or else return NULL.
SDValue FoldSymbolOffset(unsigned Opcode, EVT VT, const GlobalAddressSDNode *GA, const SDNode *N2)
SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDDbgValue * getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N, unsigned R, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a SDDbgValue node.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, ArrayRef< ISD::NodeType > CandidateBinOps, bool AllowPartials=false)
Match a binop + shuffle pyramid that represents a horizontal reduction over the elements of a vector ...
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
SDValue simplifyShift(SDValue X, SDValue Y)
Try to simplify a shift into 1 of its operands or a constant.
void transferDbgValues(SDValue From, SDValue To, unsigned OffsetInBits=0, unsigned SizeInBits=0, bool InvalidateDbg=true)
Transfer debug values from one node to another, while optionally generating fragment expressions for ...
SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, bool IsTruncating=false)
ilist< SDNode >::iterator allnodes_iterator
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
static bool isSplatMask(const int *Mask, EVT VT)
int getMaskElt(unsigned Idx) const
ArrayRef< int > getMask() const
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Completely target-dependent object reference.
int64_t getOffset() const
unsigned getTargetFlags() const
Provides information about what library functions are available for the current target.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const
Return true if it is beneficial to convert a load of a constant to just the constant itself.
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
unsigned getMaxStoresPerMemcpy(bool OptSize) const
Get maximum # of store operations permitted for llvm.memcpy.
virtual bool isCommutativeBinOp(unsigned Opcode) const
Returns true if the opcode is a commutative binary operation.
virtual ISD::NodeType getExtendForAtomicOps() const
Returns how the platform's atomic operations are extended (ZERO_EXTEND, SIGN_EXTEND,...
virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with ...
virtual bool shallExtractConstSplatVectorElementToStore(Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const
Return true if the target shall perform extract vector element and store given that the vector is kno...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
Returns the type for the shift amount of a shift opcode.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal on this target.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
unsigned getMaxStoresPerMemmove(bool OptSize) const
Get maximum # of store operations permitted for llvm.memmove.
virtual unsigned getMaxGluedStoresPerMemcpy() const
Get maximum # of store operations to be glued together.
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
virtual bool hasVectorBlend() const
Return true if the target has a vector blend instruction.
unsigned getMaxStoresPerMemset(bool OptSize) const
Get maximum # of store operations permitted for llvm.memset.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual bool isLegalStoreImmediate(int64_t Value) const
Return true if the specified immediate is legal for the value input of a store instruction.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual void computeKnownBitsForFrameIndex(int FIOp, KnownBits &Known, const MachineFunction &MF) const
Determine which of the bits of FrameIndex FIOp are known to be 0.
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
virtual bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual bool isKnownNeverNaNForTargetNode(SDValue Op, const SelectionDAG &DAG, bool SNaN=false, unsigned Depth=0) const
If SNaN is false,.
virtual void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
virtual bool isSDNodeSourceOfDivergence(const SDNode *N, FunctionLoweringInfo *FLI, UniformityInfo *UA) const
virtual bool isSDNodeAlwaysUniform(const SDNode *N) const
virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &UndefElts, const SelectionDAG &DAG, unsigned Depth=0) const
Return true if vector Op has the same value across all DemandedElts, indicating any elements which ma...
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
virtual const Constant * getTargetConstantFromLoad(LoadSDNode *LD) const
This method returns the constant pool value that will be loaded by LD.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
virtual bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
Primary interface to the complete machine description for the target machine.
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
const Triple & getTargetTriple() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const SelectionDAGTargetInfo * getSelectionDAGInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetLowering * getTargetLowering() const
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, XROS, or DriverKit).
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
A Use represents the edge between a Value definition and its users.
This class is used to represent an VP_GATHER node.
This class is used to represent a VP_LOAD node.
This class is used to represent an VP_SCATTER node.
This class is used to represent a VP_STORE node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isKnownEven() const
A return value of true indicates we know at compile time that the number of elements (vscale * Min) i...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
A raw_ostream that writes to an std::string.
std::string & str()
Returns the string's reference.
SmartMutex - A mutex with a compile time constant parameter that indicates whether this mutex should ...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt abdu(const APInt &A, const APInt &B)
Determine the absolute difference of two APInts considered to be unsigned.
APInt avgCeilU(const APInt &C1, const APInt &C2)
Compute the ceil of the unsigned average of C1 and C2.
APInt avgFloorU(const APInt &C1, const APInt &C2)
Compute the floor of the unsigned average of C1 and C2.
const APInt abds(const APInt &A, const APInt &B)
Determine the absolute difference of two APInts considered to be signed.
APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
APInt avgFloorS(const APInt &C1, const APInt &C2)
Compute the floor of the signed average of C1 and C2.
APInt avgCeilS(const APInt &C1, const APInt &C2)
Compute the ceil of the signed average of C1 and C2.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, EVT Type)
Return the result of a logical AND between different comparisons of identical values: ((X op1 Y) & (X...
bool isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are ~0 ...
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ MDNODE_SDNODE
MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to reference metadata in the IR.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ JUMP_TABLE_DEBUG_INFO
JUMP_TABLE_DEBUG_INFO - Jumptable debug info.
@ BSWAP
Byte Swap and Counting operators.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SRCVALUE
SRCVALUE - This is a node type that holds a Value* that is used to make reference to a value in the L...
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ TargetIndex
TargetIndex - Like a constant pool entry, but with completely target-dependent semantics.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ SETCCCARRY
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ AssertAlign
AssertAlign - These nodes record if a register contains a value that has a known alignment and the tr...
@ BasicBlock
Various leaf nodes.
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ TargetGlobalAddress
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimum or maximum on two values,...
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ LIFETIME_START
This corresponds to the llvm.lifetime.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ HANDLENODE
HANDLENODE node - Used as a handle for various purposes.
@ BF16_TO_FP
BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ GET_FPENV_MEM
Gets the current floating-point environment.
@ PSEUDO_PROBE
Pseudo probe for AutoFDO, as a place holder in a basic block to improve the sample counts quality.
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ SPLAT_VECTOR_PARTS
SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the scalar values joined together a...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ SET_FPENV_MEM
Sets the current floating point environment.
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
unsigned getVPForBaseOpcode(unsigned Opcode)
Translate this non-VP Opcode to its corresponding VP Opcode.
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
NodeType getExtForLoadExtType(bool IsFP, LoadExtType)
bool matchUnaryPredicate(SDValue Op, std::function< bool(ConstantSDNode *)> Match, bool AllowUndefs=false)
Hook for matching ConstantSDNode predicate.
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
bool matchUnaryFpPredicate(SDValue Op, std::function< bool(ConstantFPSDNode *)> Match, bool AllowUndefs=false)
Hook for matching ConstantFPSDNode predicate.
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
bool isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are 0 o...
bool isVectorShrinkable(const SDNode *N, unsigned NewEltSize, bool Signed)
Returns true if the specified node is a vector where all elements can be truncated to the specified e...
bool matchUnaryPredicateImpl(SDValue Op, std::function< bool(ConstNodeType *)> Match, bool AllowUndefs=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant BUI...
bool isVPBinaryOp(unsigned Opcode)
Whether this is a vector-predicated binary operation opcode.
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
std::optional< unsigned > getBaseOpcodeForVP(unsigned Opcode, bool hasFPExcept)
Translate this VP Opcode to its corresponding non-VP Opcode.
bool isTrueWhenEqual(CondCode Cond)
Return true if the specified condition returns true if the two operands to the condition are equal.
std::optional< unsigned > getVPMaskIdx(unsigned Opcode)
The operand position of the vector mask.
unsigned getUnorderedFlavor(CondCode Cond)
This function returns 0 if the condition is always false if an operand is a NaN, 1 if the condition i...
std::optional< unsigned > getVPExplicitVectorLengthIdx(unsigned Opcode)
The operand position of the explicit vector length parameter.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
bool isFreezeUndef(const SDNode *N)
Return true if the specified node is FREEZE(UNDEF).
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
bool matchBinaryPredicate(SDValue LHS, SDValue RHS, std::function< bool(ConstantSDNode *, ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTypeMismatch=false)
Attempt to match a binary predicate against a pair of scalar/splat constants or every element of a pa...
bool isVPReduction(unsigned Opcode)
Whether this is a vector-predicated reduction opcode.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
bool isBuildVectorOfConstantFPSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantFPSDNode or undef.
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef.
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isVPOpcode(unsigned Opcode)
Whether this is a vector-predicated Opcode.
CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, EVT Type)
Return the result of a logical OR between different comparisons of identical values: ((X op1 Y) | (X ...
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given e...
bool sd_match(SDNode *N, const SelectionDAG *DAG, Pattern &&P)
initializer< Ty > init(const Ty &Val)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
std::lock_guard< SmartMutex< mt_only > > SmartScopedLock
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
bool operator<(int64_t V1, const APSInt &V2)
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
SDValue peekThroughExtractSubvectors(SDValue V)
Return the non-extracted vector source operand of V if it exists.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
SDValue getBitwiseNotOperand(SDValue V, SDValue Mask, bool AllowUndefs)
If V is a bitwise not, returns the inverted operand.
SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
bool getAlign(const Function &F, unsigned index, unsigned &align)
bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, unsigned ElementSize, uint64_t Offset=0)
Returns true if the value V is a pointer into a ConstantDataArray.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
bool isMinSignedConstant(SDValue V)
Returns true if V is a constant min signed integer value.
ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 maximumNumber semantics.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void checkForCycles(const SelectionDAG *DAG, bool force=false)
void sort(IteratorTy Start, IteratorTy End)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
SDValue peekThroughTruncates(SDValue V)
Return the non-truncated source operand of V if it exists.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
SDValue peekThroughOneUseBitcasts(SDValue V)
Return the non-bitcasted and one-use source operand of V if it exists.
CodeGenOptLevel
Code generation optimization level.
bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 minimumNumber semantics.
@ Mul
Product of integers.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
DWARFExpression::Operation Op
ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
OutputIt copy(R &&Range, OutputIt Out)
constexpr unsigned BitWidth
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
unsigned Log2(Align A)
Returns the log2 of the alignment.
void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
bool isNeutralConstant(unsigned Opc, SDNodeFlags Flags, SDValue V, unsigned OperandNo)
Returns true if V is a neutral element of Opc with Flags.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
MDNode * TBAAStruct
The tag for type-based alias analysis (tbaa struct).
MDNode * TBAA
The tag for type-based alias analysis.
static const fltSemantics & IEEEsingle() LLVM_READNONE
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
static constexpr roundingMode rmTowardNegative
static constexpr roundingMode rmNearestTiesToEven
static constexpr roundingMode rmTowardZero
static const fltSemantics & IEEEquad() LLVM_READNONE
static const fltSemantics & IEEEdouble() LLVM_READNONE
static const fltSemantics & IEEEhalf() LLVM_READNONE
static constexpr roundingMode rmTowardPositive
static const fltSemantics & BFloat() LLVM_READNONE
opStatus
IEEE-754R 7: Default exception handling.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Represents offset+length into a ConstantDataArray.
uint64_t Length
Length of the slice.
uint64_t Offset
Slice starts at this Offset.
void move(uint64_t Delta)
Moves the Offset and adjusts Length accordingly.
const ConstantDataArray * Array
ConstantDataArray pointer.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
intptr_t getRawBits() const
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isExtended() const
Test if the given EVT is extended (as opposed to being simple).
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits sextInReg(unsigned SrcBitWidth) const
Return known bits for a in-register sign extension of the value we're tracking.
static KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
static KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
void makeNonNegative()
Make this value non-negative.
static KnownBits usub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.usub.sat(LHS, RHS)
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
static KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
static KnownBits urem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for urem(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
static std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
void makeNegative()
Make this value negative.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
KnownBits byteSwap() const
bool hasConflict() const
Returns true if there is conflicting information.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
void setAllZero()
Make all bits known to be zero and discard any previous information.
KnownBits reverseBits() const
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
unsigned getBitWidth() const
Get the bit width of this value.
static KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
static KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
bool isNonZero() const
Returns true if this value is known to be non-zero.
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
static KnownBits computeForSubBorrow(const KnownBits &LHS, KnownBits RHS, const KnownBits &Borrow)
Compute known bits results from subtracting RHS from LHS with 1-bit Borrow.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
static KnownBits srem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for srem(LHS, RHS).
static KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
static KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
static KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for sdiv(LHS, RHS).
static bool haveNoCommonBitsSet(const KnownBits &LHS, const KnownBits &RHS)
Return true if LHS and RHS have no common bits set.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits computeForAddCarry(const KnownBits &LHS, const KnownBits &RHS, const KnownBits &Carry)
Compute known bits resulting from adding LHS, RHS and a 1-bit Carry.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
static KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
This class contains a discriminated union of information about pointers in memory operands,...
bool isDereferenceable(unsigned Size, LLVMContext &C, const DataLayout &DL) const
Return true if memory region [V, V+Offset+Size) is known to be dereferenceable.
unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign, bool IsZeroMemset, bool IsVolatile)
static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign, Align SrcAlign, bool IsVolatile, bool MemcpyStrSrc=false)
These are IR-level optimization flags that may be propagated to SDNodes.
void intersectWith(const SDNodeFlags Flags)
Clear any flags in this flag set that aren't also set in Flags.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Clients of various APIs that cause global effects on the DAG can optionally implement this interface.
DAGUpdateListener *const Next
virtual void NodeDeleted(SDNode *N, SDNode *E)
The node N that was deleted and, if E is not null, an equivalent node E that replaced it.
virtual void NodeInserted(SDNode *N)
The node N that was inserted.
virtual void NodeUpdated(SDNode *N)
The node N that was updated.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setChain(SDValue InChain)