35#define DEBUG_TYPE "legalize-types"
41void DAGTypeLegalizer::ScalarizeVectorResult(
SDNode *
N,
unsigned ResNo) {
46 switch (
N->getOpcode()) {
49 dbgs() <<
"ScalarizeVectorResult #" << ResNo <<
": ";
61 case ISD::FPOWI: R = ScalarizeVecRes_ExpOp(
N);
break;
63 case ISD::LOAD: R = ScalarizeVecRes_LOAD(cast<LoadSDNode>(
N));
break;
69 case ISD::SETCC: R = ScalarizeVecRes_SETCC(
N);
break;
70 case ISD::UNDEF: R = ScalarizeVecRes_UNDEF(
N);
break;
76 R = ScalarizeVecRes_VecInregOp(
N);
125 R = ScalarizeVecRes_UnaryOp(
N);
128 R = ScalarizeVecRes_ADDRSPACECAST(
N);
131 R = ScalarizeVecRes_FFREXP(
N, ResNo);
180 R = ScalarizeVecRes_BinOp(
N);
185 R = ScalarizeVecRes_CMP(
N);
191 R = ScalarizeVecRes_TernaryOp(
N);
194#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
195 case ISD::STRICT_##DAGN:
196#include "llvm/IR/ConstrainedOps.def"
197 R = ScalarizeVecRes_StrictFPOp(
N);
202 R = ScalarizeVecRes_FP_TO_XINT_SAT(
N);
211 R = ScalarizeVecRes_OverflowOp(
N, ResNo);
221 R = ScalarizeVecRes_FIX(
N);
227 SetScalarizedVector(
SDValue(
N, ResNo), R);
231 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
232 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
234 LHS.getValueType(), LHS, RHS,
N->getFlags());
242 if (getTypeAction(
LHS.getValueType()) ==
244 LHS = GetScalarizedVector(LHS);
245 RHS = GetScalarizedVector(RHS);
247 EVT VT =
LHS.getValueType().getVectorElementType();
255 N->getValueType(0).getVectorElementType(), LHS, RHS);
259 SDValue Op0 = GetScalarizedVector(
N->getOperand(0));
260 SDValue Op1 = GetScalarizedVector(
N->getOperand(1));
261 SDValue Op2 = GetScalarizedVector(
N->getOperand(2));
267 SDValue Op0 = GetScalarizedVector(
N->getOperand(0));
268 SDValue Op1 = GetScalarizedVector(
N->getOperand(1));
274SDValue DAGTypeLegalizer::ScalarizeVecRes_FFREXP(
SDNode *
N,
unsigned ResNo) {
275 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
276 "Unexpected vector type!");
277 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
279 EVT VT0 =
N->getValueType(0);
280 EVT VT1 =
N->getValueType(1);
285 {VT0.getScalarType(), VT1.getScalarType()}, Elt)
289 unsigned OtherNo = 1 - ResNo;
290 EVT OtherVT =
N->getValueType(OtherNo);
292 SetScalarizedVector(
SDValue(
N, OtherNo),
SDValue(ScalarNode, OtherNo));
296 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
299 return SDValue(ScalarNode, ResNo);
303 EVT VT =
N->getValueType(0).getVectorElementType();
304 unsigned NumOpers =
N->getNumOperands();
306 EVT ValueVTs[] = {VT, MVT::Other};
315 for (
unsigned i = 1; i < NumOpers; ++i) {
321 Oper = GetScalarizedVector(Oper);
332 Opers,
N->getFlags());
343 EVT ResVT =
N->getValueType(0);
344 EVT OvVT =
N->getValueType(1);
348 ScalarLHS = GetScalarizedVector(
N->getOperand(0));
349 ScalarRHS = GetScalarizedVector(
N->getOperand(1));
354 ScalarLHS = ElemsLHS[0];
355 ScalarRHS = ElemsRHS[0];
361 N->getOpcode(),
DL, ScalarVTs, ScalarLHS, ScalarRHS).
getNode();
365 unsigned OtherNo = 1 - ResNo;
366 EVT OtherVT =
N->getValueType(OtherNo);
368 SetScalarizedVector(
SDValue(
N, OtherNo),
SDValue(ScalarNode, OtherNo));
372 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
375 return SDValue(ScalarNode, ResNo);
380 SDValue Op = DisintegrateMERGE_VALUES(
N, ResNo);
381 return GetScalarizedVector(
Op);
386 if (
Op.getValueType().isVector()
387 &&
Op.getValueType().getVectorNumElements() == 1
388 && !isSimpleLegalType(
Op.getValueType()))
389 Op = GetScalarizedVector(
Op);
390 EVT NewVT =
N->getValueType(0).getVectorElementType();
395SDValue DAGTypeLegalizer::ScalarizeVecRes_BUILD_VECTOR(
SDNode *
N) {
396 EVT EltVT =
N->getValueType(0).getVectorElementType();
405SDValue DAGTypeLegalizer::ScalarizeVecRes_EXTRACT_SUBVECTOR(
SDNode *
N) {
407 N->getValueType(0).getVectorElementType(),
408 N->getOperand(0),
N->getOperand(1));
414 EVT OpVT =
Op.getValueType();
418 Op = GetScalarizedVector(
Op);
425 N->getValueType(0).getVectorElementType(),
Op,
430 SDValue Op = GetScalarizedVector(
N->getOperand(0));
435SDValue DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(
SDNode *
N) {
439 EVT EltVT =
N->getValueType(0).getVectorElementType();
440 if (
Op.getValueType() != EltVT)
447 assert(
N->isUnindexed() &&
"Indexed vector load?");
451 N->getValueType(0).getVectorElementType(),
SDLoc(
N),
N->getChain(),
452 N->getBasePtr(), DAG.
getUNDEF(
N->getBasePtr().getValueType()),
453 N->getPointerInfo(),
N->getMemoryVT().getVectorElementType(),
454 N->getOriginalAlign(),
N->getMemOperand()->getFlags(),
N->getAAInfo());
464 EVT DestVT =
N->getValueType(0).getVectorElementType();
466 EVT OpVT =
Op.getValueType();
476 Op = GetScalarizedVector(
Op);
486 EVT EltVT =
N->getValueType(0).getVectorElementType();
488 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
497 EVT OpVT =
Op.getValueType();
499 EVT EltVT =
N->getValueType(0).getVectorElementType();
502 Op = GetScalarizedVector(
Op);
508 switch (
N->getOpcode()) {
520SDValue DAGTypeLegalizer::ScalarizeVecRes_ADDRSPACECAST(
SDNode *
N) {
521 EVT DestVT =
N->getValueType(0).getVectorElementType();
523 EVT OpVT =
Op.getValueType();
533 Op = GetScalarizedVector(
Op);
539 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
540 unsigned SrcAS = AddrSpaceCastN->getSrcAddressSpace();
541 unsigned DestAS = AddrSpaceCastN->getDestAddressSpace();
545SDValue DAGTypeLegalizer::ScalarizeVecRes_SCALAR_TO_VECTOR(
SDNode *
N) {
548 EVT EltVT =
N->getValueType(0).getVectorElementType();
557 EVT OpVT =
Cond.getValueType();
570 SDValue LHS = GetScalarizedVector(
N->getOperand(1));
584 EVT OpVT =
Cond->getOperand(0).getValueType();
591 EVT CondVT =
Cond.getValueType();
592 if (ScalarBool != VecBool) {
593 switch (ScalarBool) {
614 auto BoolVT = getSetCCResultType(CondVT);
615 if (BoolVT.bitsLT(CondVT))
620 GetScalarizedVector(
N->getOperand(2)));
624 SDValue LHS = GetScalarizedVector(
N->getOperand(1));
626 LHS.getValueType(),
N->getOperand(0), LHS,
627 GetScalarizedVector(
N->getOperand(2)));
631 SDValue LHS = GetScalarizedVector(
N->getOperand(2));
633 N->getOperand(0),
N->getOperand(1),
634 LHS, GetScalarizedVector(
N->getOperand(3)),
639 return DAG.
getUNDEF(
N->getValueType(0).getVectorElementType());
642SDValue DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(
SDNode *
N) {
644 SDValue Arg =
N->getOperand(2).getOperand(0);
646 return DAG.
getUNDEF(
N->getValueType(0).getVectorElementType());
647 unsigned Op = !cast<ConstantSDNode>(Arg)->isZero();
648 return GetScalarizedVector(
N->getOperand(
Op));
651SDValue DAGTypeLegalizer::ScalarizeVecRes_FP_TO_XINT_SAT(
SDNode *
N) {
653 EVT SrcVT = Src.getValueType();
658 Src = GetScalarizedVector(Src);
664 EVT DstVT =
N->getValueType(0).getVectorElementType();
665 return DAG.
getNode(
N->getOpcode(), dl, DstVT, Src,
N->getOperand(1));
669 assert(
N->getValueType(0).isVector() &&
670 N->getOperand(0).getValueType().isVector() &&
671 "Operand types must be vectors");
674 EVT OpVT =
LHS.getValueType();
675 EVT NVT =
N->getValueType(0).getVectorElementType();
680 LHS = GetScalarizedVector(LHS);
681 RHS = GetScalarizedVector(RHS);
697 return DAG.
getNode(ExtendCode,
DL, NVT, Res);
705 EVT ResultVT =
N->getValueType(0).getVectorElementType();
708 Arg = GetScalarizedVector(Arg);
721 return DAG.
getNode(ExtendCode,
DL, ResultVT, Res);
728bool DAGTypeLegalizer::ScalarizeVectorOperand(
SDNode *
N,
unsigned OpNo) {
733 switch (
N->getOpcode()) {
736 dbgs() <<
"ScalarizeVectorOperand Op #" << OpNo <<
": ";
743 Res = ScalarizeVecOp_BITCAST(
N);
755 Res = ScalarizeVecOp_UnaryOp(
N);
761 Res = ScalarizeVecOp_UnaryOp_StrictFP(
N);
764 Res = ScalarizeVecOp_CONCAT_VECTORS(
N);
767 Res = ScalarizeVecOp_EXTRACT_VECTOR_ELT(
N);
770 Res = ScalarizeVecOp_VSELECT(
N);
773 Res = ScalarizeVecOp_VSETCC(
N);
776 Res = ScalarizeVecOp_STORE(cast<StoreSDNode>(
N), OpNo);
779 Res = ScalarizeVecOp_STRICT_FP_ROUND(
N, OpNo);
782 Res = ScalarizeVecOp_FP_ROUND(
N, OpNo);
785 Res = ScalarizeVecOp_STRICT_FP_EXTEND(
N);
788 Res = ScalarizeVecOp_FP_EXTEND(
N);
805 Res = ScalarizeVecOp_VECREDUCE(
N);
809 Res = ScalarizeVecOp_VECREDUCE_SEQ(
N);
813 Res = ScalarizeVecOp_CMP(
N);
818 if (!Res.
getNode())
return false;
826 "Invalid operand expansion");
828 ReplaceValueWith(
SDValue(
N, 0), Res);
835 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
837 N->getValueType(0), Elt);
843 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
844 "Unexpected vector type!");
845 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
847 N->getValueType(0).getScalarType(), Elt);
855SDValue DAGTypeLegalizer::ScalarizeVecOp_UnaryOp_StrictFP(
SDNode *
N) {
856 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
857 "Unexpected vector type!");
858 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
860 {
N->getValueType(0).getScalarType(), MVT::Other },
861 {
N->getOperand(0), Elt });
871 ReplaceValueWith(
SDValue(
N, 0), Res);
876SDValue DAGTypeLegalizer::ScalarizeVecOp_CONCAT_VECTORS(
SDNode *
N) {
878 for (
unsigned i = 0, e =
N->getNumOperands(); i < e; ++i)
879 Ops[i] = GetScalarizedVector(
N->getOperand(i));
885SDValue DAGTypeLegalizer::ScalarizeVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
886 EVT VT =
N->getValueType(0);
887 SDValue Res = GetScalarizedVector(
N->getOperand(0));
899 SDValue ScalarCond = GetScalarizedVector(
N->getOperand(0));
900 EVT VT =
N->getValueType(0);
910 assert(
N->getValueType(0).isVector() &&
911 N->getOperand(0).getValueType().isVector() &&
912 "Operand types must be vectors");
913 assert(
N->getValueType(0) == MVT::v1i1 &&
"Expected v1i1 type");
915 EVT VT =
N->getValueType(0);
916 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
917 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
919 EVT OpVT =
N->getOperand(0).getValueType();
931 Res = DAG.
getNode(ExtendCode,
DL, NVT, Res);
939 assert(
N->isUnindexed() &&
"Indexed store of one-element vector?");
940 assert(OpNo == 1 &&
"Do not know how to scalarize this operand!");
943 if (
N->isTruncatingStore())
945 N->getChain(), dl, GetScalarizedVector(
N->getOperand(1)),
946 N->getBasePtr(),
N->getPointerInfo(),
947 N->getMemoryVT().getVectorElementType(),
N->getOriginalAlign(),
948 N->getMemOperand()->getFlags(),
N->getAAInfo());
950 return DAG.
getStore(
N->getChain(), dl, GetScalarizedVector(
N->getOperand(1)),
951 N->getBasePtr(),
N->getPointerInfo(),
952 N->getOriginalAlign(),
N->getMemOperand()->getFlags(),
958SDValue DAGTypeLegalizer::ScalarizeVecOp_FP_ROUND(
SDNode *
N,
unsigned OpNo) {
959 assert(OpNo == 0 &&
"Wrong operand for scalarization!");
960 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
962 N->getValueType(0).getVectorElementType(), Elt,
967SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_ROUND(
SDNode *
N,
969 assert(OpNo == 1 &&
"Wrong operand for scalarization!");
970 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
974 {
N->getOperand(0), Elt,
N->getOperand(2) });
983 ReplaceValueWith(
SDValue(
N, 0), Res);
990 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
992 N->getValueType(0).getVectorElementType(), Elt);
998SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_EXTEND(
SDNode *
N) {
999 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
1003 {
N->getOperand(0), Elt});
1012 ReplaceValueWith(
SDValue(
N, 0), Res);
1017 SDValue Res = GetScalarizedVector(
N->getOperand(0));
1024SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE_SEQ(
SDNode *
N) {
1030 SDValue Op = GetScalarizedVector(VecOp);
1032 AccOp,
Op,
N->getFlags());
1036 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
1037 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
1039 EVT ResVT =
N->getValueType(0).getVectorElementType();
1052void DAGTypeLegalizer::SplitVectorResult(
SDNode *
N,
unsigned ResNo) {
1057 if (CustomLowerNode(
N,
N->getValueType(ResNo),
true))
1060 switch (
N->getOpcode()) {
1063 dbgs() <<
"SplitVectorResult #" << ResNo <<
": ";
1075 case ISD::VP_SELECT: SplitRes_Select(
N,
Lo,
Hi);
break;
1088 case ISD::EXPERIMENTAL_VP_SPLAT: SplitVecRes_VP_SPLAT(
N,
Lo,
Hi);
break;
1091 SplitVecRes_ScalarOp(
N,
Lo,
Hi);
1094 SplitVecRes_STEP_VECTOR(
N,
Lo,
Hi);
1098 SplitVecRes_LOAD(cast<LoadSDNode>(
N),
Lo,
Hi);
1101 SplitVecRes_VP_LOAD(cast<VPLoadSDNode>(
N),
Lo,
Hi);
1103 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
1104 SplitVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(
N),
Lo,
Hi);
1107 SplitVecRes_MLOAD(cast<MaskedLoadSDNode>(
N),
Lo,
Hi);
1110 case ISD::VP_GATHER:
1111 SplitVecRes_Gather(cast<MemSDNode>(
N),
Lo,
Hi,
true);
1114 SplitVecRes_VECTOR_COMPRESS(
N,
Lo,
Hi);
1118 SplitVecRes_SETCC(
N,
Lo,
Hi);
1121 SplitVecRes_VECTOR_REVERSE(
N,
Lo,
Hi);
1124 SplitVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(
N),
Lo,
Hi);
1127 SplitVecRes_VECTOR_SPLICE(
N,
Lo,
Hi);
1130 SplitVecRes_VECTOR_DEINTERLEAVE(
N);
1133 SplitVecRes_VECTOR_INTERLEAVE(
N);
1136 SplitVecRes_VAARG(
N,
Lo,
Hi);
1142 SplitVecRes_ExtVecInRegOp(
N,
Lo,
Hi);
1148 case ISD::VP_BITREVERSE:
1156 case ISD::VP_CTLZ_ZERO_UNDEF:
1158 case ISD::VP_CTTZ_ZERO_UNDEF:
1173 case ISD::VP_FFLOOR:
1178 case ISD::VP_FNEARBYINT:
1183 case ISD::VP_FP_EXTEND:
1185 case ISD::VP_FP_ROUND:
1187 case ISD::VP_FP_TO_SINT:
1189 case ISD::VP_FP_TO_UINT:
1195 case ISD::VP_LLRINT:
1197 case ISD::VP_FROUND:
1199 case ISD::VP_FROUNDEVEN:
1206 case ISD::VP_FROUNDTOZERO:
1208 case ISD::VP_SINT_TO_FP:
1210 case ISD::VP_TRUNCATE:
1212 case ISD::VP_UINT_TO_FP:
1214 SplitVecRes_UnaryOp(
N,
Lo,
Hi);
1217 SplitVecRes_ADDRSPACECAST(
N,
Lo,
Hi);
1220 SplitVecRes_FFREXP(
N, ResNo,
Lo,
Hi);
1226 case ISD::VP_SIGN_EXTEND:
1227 case ISD::VP_ZERO_EXTEND:
1228 SplitVecRes_ExtendOp(
N,
Lo,
Hi);
1245 case ISD::VP_FMINNUM:
1248 case ISD::VP_FMAXNUM:
1250 case ISD::VP_FMINIMUM:
1252 case ISD::VP_FMAXIMUM:
1258 case ISD::OR:
case ISD::VP_OR:
1278 case ISD::VP_FCOPYSIGN:
1279 SplitVecRes_BinOp(
N,
Lo,
Hi);
1286 SplitVecRes_TernaryOp(
N,
Lo,
Hi);
1290 SplitVecRes_CMP(
N,
Lo,
Hi);
1293#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1294 case ISD::STRICT_##DAGN:
1295#include "llvm/IR/ConstrainedOps.def"
1296 SplitVecRes_StrictFPOp(
N,
Lo,
Hi);
1301 SplitVecRes_FP_TO_XINT_SAT(
N,
Lo,
Hi);
1310 SplitVecRes_OverflowOp(
N, ResNo,
Lo,
Hi);
1320 SplitVecRes_FIX(
N,
Lo,
Hi);
1322 case ISD::EXPERIMENTAL_VP_REVERSE:
1323 SplitVecRes_VP_REVERSE(
N,
Lo,
Hi);
1332void DAGTypeLegalizer::IncrementPointer(
MemSDNode *
N,
EVT MemVT,
1341 DL,
Ptr.getValueType(),
1342 APInt(
Ptr.getValueSizeInBits().getFixedValue(), IncrementSize));
1344 Flags.setNoUnsignedWrap(
true);
1346 *ScaledOffset += IncrementSize;
1350 MPI =
N->getPointerInfo().getWithOffset(IncrementSize);
1356std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(
SDValue Mask) {
1357 return SplitMask(Mask,
SDLoc(Mask));
1360std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(
SDValue Mask,
1363 EVT MaskVT =
Mask.getValueType();
1365 GetSplitVector(Mask, MaskLo, MaskHi);
1368 return std::make_pair(MaskLo, MaskHi);
1373 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1375 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
1379 unsigned Opcode =
N->getOpcode();
1380 if (
N->getNumOperands() == 2) {
1386 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands!");
1387 assert(
N->isVPOpcode() &&
"Expected VP opcode");
1390 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(2));
1393 std::tie(EVLLo, EVLHi) =
1394 DAG.
SplitEVL(
N->getOperand(3),
N->getValueType(0), dl);
1397 {LHSLo, RHSLo, MaskLo, EVLLo}, Flags);
1399 {LHSHi, RHSHi, MaskHi, EVLHi}, Flags);
1405 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
1407 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
1409 GetSplitVector(
N->getOperand(2), Op2Lo, Op2Hi);
1413 unsigned Opcode =
N->getOpcode();
1414 if (
N->getNumOperands() == 3) {
1420 assert(
N->getNumOperands() == 5 &&
"Unexpected number of operands!");
1421 assert(
N->isVPOpcode() &&
"Expected VP opcode");
1424 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
1427 std::tie(EVLLo, EVLHi) =
1428 DAG.
SplitEVL(
N->getOperand(4),
N->getValueType(0), dl);
1431 {Op0Lo, Op1Lo, Op2Lo, MaskLo, EVLLo}, Flags);
1433 {Op0Hi, Op1Hi, Op2Hi, MaskHi, EVLHi}, Flags);
1443 SDValue LHSLo, LHSHi, RHSLo, RHSHi;
1445 GetSplitVector(LHS, LHSLo, LHSHi);
1446 GetSplitVector(RHS, RHSLo, RHSHi);
1448 std::tie(LHSLo, LHSHi) = DAG.
SplitVector(LHS, dl);
1449 std::tie(RHSLo, RHSHi) = DAG.
SplitVector(RHS, dl);
1452 EVT SplitResVT =
N->getValueType(0).getHalfNumVectorElementsVT(Ctxt);
1453 Lo = DAG.
getNode(
N->getOpcode(), dl, SplitResVT, LHSLo, RHSLo);
1454 Hi = DAG.
getNode(
N->getOpcode(), dl, SplitResVT, LHSHi, RHSHi);
1459 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1461 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
1465 unsigned Opcode =
N->getOpcode();
1484 switch (getTypeAction(InVT)) {
1499 GetExpandedOp(InOp,
Lo,
Hi);
1510 GetSplitVector(InOp,
Lo,
Hi);
1531 SplitInteger(BitConvertToInteger(InOp), LoIntVT, HiIntVT,
Lo,
Hi);
1554 assert(!(
N->getNumOperands() & 1) &&
"Unsupported CONCAT_VECTORS");
1556 unsigned NumSubvectors =
N->getNumOperands() / 2;
1557 if (NumSubvectors == 1) {
1558 Lo =
N->getOperand(0);
1559 Hi =
N->getOperand(1);
1573void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(
SDNode *
N,
SDValue &
Lo,
1595 GetSplitVector(Vec,
Lo,
Hi);
1598 EVT LoVT =
Lo.getValueType();
1607 unsigned IdxVal =
Idx->getAsZExtVal();
1608 if (IdxVal + SubElems <= LoElems) {
1616 IdxVal >= LoElems && IdxVal + SubElems <= VecElems) {
1642 Lo = DAG.
getLoad(
Lo.getValueType(), dl, Store, StackPtr, PtrInfo,
1646 auto *
Load = cast<LoadSDNode>(
Lo);
1648 IncrementPointer(Load, LoVT, MPI, StackPtr);
1651 Hi = DAG.
getLoad(
Hi.getValueType(), dl, Store, StackPtr, MPI, SmallestAlign);
1660 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1665 EVT RHSVT =
RHS.getValueType();
1668 GetSplitVector(RHS, RHSLo, RHSHi);
1685 SDValue FpValue =
N->getOperand(0);
1687 GetSplitVector(FpValue, ArgLo, ArgHi);
1700 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1704 std::tie(LoVT, HiVT) =
1715 unsigned Opcode =
N->getOpcode();
1722 GetSplitVector(N0, InLo, InHi);
1729 EVT OutLoVT, OutHiVT;
1732 assert((2 * OutNumElements) <= InNumElements &&
1733 "Illegal extend vector in reg split");
1743 for (
unsigned i = 0; i != OutNumElements; ++i)
1744 SplitHi[i] = i + OutNumElements;
1747 Lo = DAG.
getNode(Opcode, dl, OutLoVT, InLo);
1748 Hi = DAG.
getNode(Opcode, dl, OutHiVT, InHi);
1753 unsigned NumOps =
N->getNumOperands();
1767 for (
unsigned i = 1; i < NumOps; ++i) {
1772 EVT InVT =
Op.getValueType();
1777 GetSplitVector(
Op, OpLo, OpHi);
1786 EVT LoValueVTs[] = {LoVT, MVT::Other};
1787 EVT HiValueVTs[] = {HiVT, MVT::Other};
1796 Lo.getValue(1),
Hi.getValue(1));
1800 ReplaceValueWith(
SDValue(
N, 1), Chain);
1803SDValue DAGTypeLegalizer::UnrollVectorOp_StrictFP(
SDNode *
N,
unsigned ResNE) {
1805 EVT VT =
N->getValueType(0);
1816 else if (NE > ResNE)
1820 EVT ChainVTs[] = {EltVT, MVT::Other};
1824 for (i = 0; i !=
NE; ++i) {
1826 for (
unsigned j = 1, e =
N->getNumOperands(); j != e; ++j) {
1827 SDValue Operand =
N->getOperand(j);
1838 Scalar.getNode()->setFlags(
N->getFlags());
1846 for (; i < ResNE; ++i)
1851 ReplaceValueWith(
SDValue(
N, 1), Chain);
1858void DAGTypeLegalizer::SplitVecRes_OverflowOp(
SDNode *
N,
unsigned ResNo,
1861 EVT ResVT =
N->getValueType(0);
1862 EVT OvVT =
N->getValueType(1);
1863 EVT LoResVT, HiResVT, LoOvVT, HiOvVT;
1867 SDValue LoLHS, HiLHS, LoRHS, HiRHS;
1869 GetSplitVector(
N->getOperand(0), LoLHS, HiLHS);
1870 GetSplitVector(
N->getOperand(1), LoRHS, HiRHS);
1876 unsigned Opcode =
N->getOpcode();
1888 unsigned OtherNo = 1 - ResNo;
1889 EVT OtherVT =
N->getValueType(OtherNo);
1891 SetSplitVector(
SDValue(
N, OtherNo),
1897 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
1901void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(
SDNode *
N,
SDValue &
Lo,
1907 GetSplitVector(Vec,
Lo,
Hi);
1910 unsigned IdxVal = CIdx->getZExtValue();
1911 unsigned LoNumElts =
Lo.getValueType().getVectorMinNumElements();
1912 if (IdxVal < LoNumElts) {
1914 Lo.getValueType(),
Lo, Elt,
Idx);
1960 Lo = DAG.
getLoad(LoVT, dl, Store, StackPtr, PtrInfo, SmallestAlign);
1963 auto Load = cast<LoadSDNode>(
Lo);
1965 IncrementPointer(Load, LoVT, MPI, StackPtr);
1967 Hi = DAG.
getLoad(HiVT, dl, Store, StackPtr, MPI, SmallestAlign);
1971 if (LoVT !=
Lo.getValueType())
1973 if (HiVT !=
Hi.getValueType())
1981 assert(
N->getValueType(0).isScalableVector() &&
1982 "Only scalable vectors are supported for STEP_VECTOR");
2005 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT,
N->getOperand(0));
2018 auto [MaskLo, MaskHi] = SplitMask(
N->getOperand(1));
2019 auto [EVLLo, EVLHi] = DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2020 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT,
N->getOperand(0), MaskLo, EVLLo);
2021 Hi = DAG.
getNode(
N->getOpcode(), dl, HiVT,
N->getOperand(0), MaskHi, EVLHi);
2035 EVT MemoryVT =
LD->getMemoryVT();
2039 EVT LoMemVT, HiMemVT;
2046 ReplaceValueWith(
SDValue(LD, 1), NewChain);
2051 LD->getPointerInfo(), LoMemVT,
LD->getOriginalAlign(),
2055 IncrementPointer(LD, LoMemVT, MPI,
Ptr);
2058 HiMemVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
2067 ReplaceValueWith(
SDValue(LD, 1), Ch);
2072 assert(
LD->isUnindexed() &&
"Indexed VP load during type legalization!");
2081 assert(
Offset.isUndef() &&
"Unexpected indexed variable-length load offset");
2082 Align Alignment =
LD->getOriginalAlign();
2085 EVT MemoryVT =
LD->getMemoryVT();
2087 EVT LoMemVT, HiMemVT;
2088 bool HiIsEmpty =
false;
2089 std::tie(LoMemVT, HiMemVT) =
2095 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
2098 GetSplitVector(Mask, MaskLo, MaskHi);
2100 std::tie(MaskLo, MaskHi) = DAG.
SplitVector(Mask, dl);
2105 std::tie(EVLLo, EVLHi) = DAG.
SplitEVL(EVL,
LD->getValueType(0), dl);
2114 MaskLo, EVLLo, LoMemVT, MMO,
LD->isExpandingLoad());
2123 LD->isExpandingLoad());
2129 MPI =
LD->getPointerInfo().getWithOffset(
2134 Alignment,
LD->getAAInfo(),
LD->getRanges());
2137 Offset, MaskHi, EVLHi, HiMemVT, MMO,
2138 LD->isExpandingLoad());
2148 ReplaceValueWith(
SDValue(LD, 1), Ch);
2154 "Indexed VP strided load during type legalization!");
2156 "Unexpected indexed variable-length load offset");
2163 EVT LoMemVT, HiMemVT;
2164 bool HiIsEmpty =
false;
2165 std::tie(LoMemVT, HiMemVT) =
2171 SplitVecRes_SETCC(
Mask.getNode(), LoMask, HiMask);
2174 GetSplitVector(Mask, LoMask, HiMask);
2180 std::tie(LoEVL, HiEVL) =
2218 SLD->
getStride(), HiMask, HiEVL, HiMemVT, MMO,
2229 ReplaceValueWith(
SDValue(SLD, 1), Ch);
2242 assert(
Offset.isUndef() &&
"Unexpected indexed masked load offset");
2251 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
2254 GetSplitVector(Mask, MaskLo, MaskHi);
2256 std::tie(MaskLo, MaskHi) = DAG.
SplitVector(Mask, dl);
2260 EVT LoMemVT, HiMemVT;
2261 bool HiIsEmpty =
false;
2262 std::tie(LoMemVT, HiMemVT) =
2265 SDValue PassThruLo, PassThruHi;
2267 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2269 std::tie(PassThruLo, PassThruHi) = DAG.
SplitVector(PassThru, dl);
2312 ReplaceValueWith(
SDValue(MLD, 1), Ch);
2329 if (
auto *MSC = dyn_cast<MaskedGatherSDNode>(
N)) {
2330 return {MSC->getMask(), MSC->getIndex(), MSC->getScale()};
2332 auto *VPSC = cast<VPGatherSDNode>(
N);
2333 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale()};
2336 EVT MemoryVT =
N->getMemoryVT();
2337 Align Alignment =
N->getOriginalAlign();
2341 if (SplitSETCC && Ops.Mask.getOpcode() ==
ISD::SETCC) {
2342 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
2344 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask, dl);
2347 EVT LoMemVT, HiMemVT;
2352 if (getTypeAction(Ops.Index.getValueType()) ==
2354 GetSplitVector(Ops.Index, IndexLo, IndexHi);
2356 std::tie(IndexLo, IndexHi) = DAG.
SplitVector(Ops.Index, dl);
2363 if (
auto *MGT = dyn_cast<MaskedGatherSDNode>(
N)) {
2364 SDValue PassThru = MGT->getPassThru();
2365 SDValue PassThruLo, PassThruHi;
2368 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2370 std::tie(PassThruLo, PassThruHi) = DAG.
SplitVector(PassThru, dl);
2375 SDValue OpsLo[] = {Ch, PassThruLo, MaskLo,
Ptr, IndexLo, Ops.Scale};
2377 OpsLo, MMO, IndexTy, ExtType);
2379 SDValue OpsHi[] = {Ch, PassThruHi, MaskHi,
Ptr, IndexHi, Ops.Scale};
2381 OpsHi, MMO, IndexTy, ExtType);
2383 auto *VPGT = cast<VPGatherSDNode>(
N);
2385 std::tie(EVLLo, EVLHi) =
2386 DAG.
SplitEVL(VPGT->getVectorLength(), MemoryVT, dl);
2388 SDValue OpsLo[] = {Ch,
Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
2390 MMO, VPGT->getIndexType());
2392 SDValue OpsHi[] = {Ch,
Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
2394 MMO, VPGT->getIndexType());
2404 ReplaceValueWith(
SDValue(
N, 1), Ch);
2419 assert(
N->getValueType(0).isVector() &&
2420 N->getOperand(0).getValueType().isVector() &&
2421 "Operand types must be vectors");
2429 if (getTypeAction(
N->getOperand(0).getValueType()) ==
2431 GetSplitVector(
N->getOperand(0), LL, LH);
2435 if (getTypeAction(
N->getOperand(1).getValueType()) ==
2437 GetSplitVector(
N->getOperand(1), RL, RH);
2442 Lo = DAG.
getNode(
N->getOpcode(),
DL, LoVT, LL, RL,
N->getOperand(2));
2443 Hi = DAG.
getNode(
N->getOpcode(),
DL, HiVT, LH, RH,
N->getOperand(2));
2445 assert(
N->getOpcode() == ISD::VP_SETCC &&
"Expected VP_SETCC opcode");
2446 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
2447 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
2448 std::tie(EVLLo, EVLHi) =
2449 DAG.
SplitEVL(
N->getOperand(4),
N->getValueType(0),
DL);
2450 Lo = DAG.
getNode(
N->getOpcode(),
DL, LoVT, LL, RL,
N->getOperand(2), MaskLo,
2452 Hi = DAG.
getNode(
N->getOpcode(),
DL, HiVT, LH, RH,
N->getOperand(2), MaskHi,
2466 EVT InVT =
N->getOperand(0).getValueType();
2468 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2473 unsigned Opcode =
N->getOpcode();
2474 if (
N->getNumOperands() <= 2) {
2476 Lo = DAG.
getNode(Opcode, dl, LoVT,
Lo,
N->getOperand(1), Flags);
2477 Hi = DAG.
getNode(Opcode, dl, HiVT,
Hi,
N->getOperand(1), Flags);
2485 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
2486 assert(
N->isVPOpcode() &&
"Expected VP opcode");
2489 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
2492 std::tie(EVLLo, EVLHi) =
2493 DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2506 EVT InVT =
N->getOperand(0).getValueType();
2508 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2512 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
2513 unsigned SrcAS = AddrSpaceCastN->getSrcAddressSpace();
2514 unsigned DestAS = AddrSpaceCastN->getDestAddressSpace();
2519void DAGTypeLegalizer::SplitVecRes_FFREXP(
SDNode *
N,
unsigned ResNo,
2527 EVT InVT =
N->getOperand(0).getValueType();
2529 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2533 Lo = DAG.
getNode(
N->getOpcode(), dl, {LoVT, LoVT1},
Lo);
2534 Hi = DAG.
getNode(
N->getOpcode(), dl, {HiVT, HiVT1},
Hi);
2535 Lo->setFlags(
N->getFlags());
2536 Hi->setFlags(
N->getFlags());
2542 unsigned OtherNo = 1 - ResNo;
2543 EVT OtherVT =
N->getValueType(OtherNo);
2551 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
2558 EVT SrcVT =
N->getOperand(0).getValueType();
2559 EVT DestVT =
N->getValueType(0);
2582 EVT SplitLoVT, SplitHiVT;
2586 LLVM_DEBUG(
dbgs() <<
"Split vector extend via incremental extend:";
2587 N->dump(&DAG);
dbgs() <<
"\n");
2588 if (!
N->isVPOpcode()) {
2591 DAG.
getNode(
N->getOpcode(), dl, NewSrcVT,
N->getOperand(0));
2602 DAG.
getNode(
N->getOpcode(), dl, NewSrcVT,
N->getOperand(0),
2603 N->getOperand(1),
N->getOperand(2));
2608 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
2611 std::tie(EVLLo, EVLHi) =
2612 DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2614 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT, {Lo, MaskLo, EVLLo});
2615 Hi = DAG.
getNode(
N->getOpcode(), dl, HiVT, {Hi, MaskHi, EVLHi});
2620 SplitVecRes_UnaryOp(
N,
Lo,
Hi);
2628 GetSplitVector(
N->getOperand(0), Inputs[0], Inputs[1]);
2629 GetSplitVector(
N->getOperand(1), Inputs[2], Inputs[3]);
2635 return N.getResNo() == 0 &&
2639 auto &&BuildVector = [NewElts, &DAG = DAG, NewVT, &
DL](
SDValue &Input1,
2644 "Expected build vector node.");
2647 for (
unsigned I = 0;
I < NewElts; ++
I) {
2652 Ops[
I] = Input2.getOperand(
Idx - NewElts);
2654 Ops[
I] = Input1.getOperand(
Idx);
2656 if (Ops[
I].getValueType().bitsGT(EltVT))
2659 return DAG.getBuildVector(NewVT,
DL, Ops);
2667 auto &&TryPeekThroughShufflesInputs = [&Inputs, &NewVT,
this, NewElts,
2671 for (
unsigned Idx = 0;
Idx < std::size(Inputs); ++
Idx) {
2673 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Input.
getNode());
2682 for (
auto &
P : ShufflesIdxs) {
2683 if (
P.second.size() < 2)
2687 for (
int &
Idx : Mask) {
2690 unsigned SrcRegIdx =
Idx / NewElts;
2691 if (Inputs[SrcRegIdx].
isUndef()) {
2696 dyn_cast<ShuffleVectorSDNode>(Inputs[SrcRegIdx].
getNode());
2699 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2704 Idx = MaskElt % NewElts +
2705 P.second[Shuffle->getOperand(MaskElt / NewElts) ==
P.first.first
2711 Inputs[
P.second[0]] =
P.first.first;
2712 Inputs[
P.second[1]] =
P.first.second;
2715 ShufflesIdxs[std::make_pair(
P.first.second,
P.first.first)].clear();
2719 for (
int &
Idx : Mask) {
2722 unsigned SrcRegIdx =
Idx / NewElts;
2723 if (Inputs[SrcRegIdx].
isUndef()) {
2728 getTypeAction(Inputs[SrcRegIdx].getValueType());
2730 Inputs[SrcRegIdx].getNumOperands() == 2 &&
2731 !Inputs[SrcRegIdx].getOperand(1).
isUndef() &&
2734 UsedSubVector.set(2 * SrcRegIdx + (
Idx % NewElts) / (NewElts / 2));
2736 if (UsedSubVector.count() > 1) {
2738 for (
unsigned I = 0;
I < std::size(Inputs); ++
I) {
2739 if (UsedSubVector.test(2 *
I) == UsedSubVector.test(2 *
I + 1))
2741 if (Pairs.
empty() || Pairs.
back().size() == 2)
2743 if (UsedSubVector.test(2 *
I)) {
2744 Pairs.
back().emplace_back(
I, 0);
2746 assert(UsedSubVector.test(2 *
I + 1) &&
2747 "Expected to be used one of the subvectors.");
2748 Pairs.
back().emplace_back(
I, 1);
2751 if (!Pairs.
empty() && Pairs.
front().size() > 1) {
2753 for (
int &
Idx : Mask) {
2756 unsigned SrcRegIdx =
Idx / NewElts;
2758 Pairs, [SrcRegIdx](
ArrayRef<std::pair<unsigned, int>> Idxs) {
2759 return Idxs.front().first == SrcRegIdx ||
2760 Idxs.back().first == SrcRegIdx;
2762 if (It == Pairs.
end())
2764 Idx = It->front().first * NewElts + (
Idx % NewElts) % (NewElts / 2) +
2765 (SrcRegIdx == It->front().first ? 0 : (NewElts / 2));
2768 for (
ArrayRef<std::pair<unsigned, int>> Idxs : Pairs) {
2769 Inputs[Idxs.front().first] = DAG.
getNode(
2771 Inputs[Idxs.front().first].getValueType(),
2772 Inputs[Idxs.front().first].
getOperand(Idxs.front().second),
2773 Inputs[Idxs.back().first].
getOperand(Idxs.back().second));
2782 for (
unsigned I = 0;
I < std::size(Inputs); ++
I) {
2783 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Inputs[
I].
getNode());
2786 if (Shuffle->getOperand(0).getValueType() != NewVT)
2789 if (!Inputs[
I].hasOneUse() && Shuffle->getOperand(1).isUndef() &&
2790 !Shuffle->isSplat()) {
2792 }
else if (!Inputs[
I].hasOneUse() &&
2793 !Shuffle->getOperand(1).isUndef()) {
2795 for (
int &
Idx : Mask) {
2798 unsigned SrcRegIdx =
Idx / NewElts;
2801 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2806 int OpIdx = MaskElt / NewElts;
2819 for (
int OpIdx = 0; OpIdx < 2; ++OpIdx) {
2820 if (Shuffle->getOperand(OpIdx).isUndef())
2822 auto *It =
find(Inputs, Shuffle->getOperand(OpIdx));
2823 if (It == std::end(Inputs))
2825 int FoundOp = std::distance(std::begin(Inputs), It);
2828 for (
int &
Idx : Mask) {
2831 unsigned SrcRegIdx =
Idx / NewElts;
2834 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2839 int MaskIdx = MaskElt / NewElts;
2840 if (OpIdx == MaskIdx)
2841 Idx = MaskElt % NewElts + FoundOp * NewElts;
2844 Op = (OpIdx + 1) % 2;
2852 for (
int &
Idx : Mask) {
2855 unsigned SrcRegIdx =
Idx / NewElts;
2858 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2859 int OpIdx = MaskElt / NewElts;
2862 Idx = MaskElt % NewElts + SrcRegIdx * NewElts;
2868 TryPeekThroughShufflesInputs(OrigMask);
2870 auto &&MakeUniqueInputs = [&Inputs, &
IsConstant,
2874 for (
const auto &
I : Inputs) {
2876 UniqueConstantInputs.
insert(
I);
2877 else if (!
I.isUndef())
2882 if (UniqueInputs.
size() != std::size(Inputs)) {
2883 auto &&UniqueVec = UniqueInputs.
takeVector();
2884 auto &&UniqueConstantVec = UniqueConstantInputs.
takeVector();
2885 unsigned ConstNum = UniqueConstantVec.size();
2886 for (
int &
Idx : Mask) {
2889 unsigned SrcRegIdx =
Idx / NewElts;
2890 if (Inputs[SrcRegIdx].
isUndef()) {
2894 const auto It =
find(UniqueConstantVec, Inputs[SrcRegIdx]);
2895 if (It != UniqueConstantVec.end()) {
2897 NewElts * std::distance(UniqueConstantVec.begin(), It);
2898 assert(
Idx >= 0 &&
"Expected defined mask idx.");
2901 const auto RegIt =
find(UniqueVec, Inputs[SrcRegIdx]);
2902 assert(RegIt != UniqueVec.end() &&
"Cannot find non-const value.");
2904 NewElts * (std::distance(UniqueVec.begin(), RegIt) + ConstNum);
2905 assert(
Idx >= 0 &&
"Expected defined mask idx.");
2907 copy(UniqueConstantVec, std::begin(Inputs));
2908 copy(UniqueVec, std::next(std::begin(Inputs), ConstNum));
2911 MakeUniqueInputs(OrigMask);
2913 copy(Inputs, std::begin(OrigInputs));
2919 unsigned FirstMaskIdx =
High * NewElts;
2922 assert(!Output &&
"Expected default initialized initial value.");
2923 TryPeekThroughShufflesInputs(Mask);
2924 MakeUniqueInputs(Mask);
2926 copy(Inputs, std::begin(TmpInputs));
2929 bool SecondIteration =
false;
2930 auto &&AccumulateResults = [&UsedIdx, &SecondIteration](
unsigned Idx) {
2935 if (UsedIdx >= 0 &&
static_cast<unsigned>(UsedIdx) ==
Idx)
2936 SecondIteration =
true;
2937 return SecondIteration;
2940 Mask, std::size(Inputs), std::size(Inputs),
2942 [&Output, &DAG = DAG, NewVT]() { Output = DAG.getUNDEF(NewVT); },
2943 [&Output, &DAG = DAG, NewVT, &
DL, &Inputs,
2946 Output = BuildVector(Inputs[
Idx], Inputs[
Idx], Mask);
2948 Output = DAG.getVectorShuffle(NewVT,
DL, Inputs[
Idx],
2949 DAG.getUNDEF(NewVT), Mask);
2950 Inputs[
Idx] = Output;
2952 [&AccumulateResults, &Output, &DAG = DAG, NewVT, &
DL, &Inputs,
2955 if (AccumulateResults(Idx1)) {
2958 Output = BuildVector(Inputs[Idx1], Inputs[Idx2], Mask);
2960 Output = DAG.getVectorShuffle(NewVT,
DL, Inputs[Idx1],
2961 Inputs[Idx2], Mask);
2965 Output = BuildVector(TmpInputs[Idx1], TmpInputs[Idx2], Mask);
2967 Output = DAG.getVectorShuffle(NewVT,
DL, TmpInputs[Idx1],
2968 TmpInputs[Idx2], Mask);
2970 Inputs[Idx1] = Output;
2972 copy(OrigInputs, std::begin(Inputs));
2977 EVT OVT =
N->getValueType(0);
2984 const Align Alignment =
2985 DAG.getDataLayout().getABITypeAlign(NVT.
getTypeForEVT(*DAG.getContext()));
2987 Lo = DAG.getVAArg(NVT, dl, Chain,
Ptr, SV, Alignment.
value());
2988 Hi = DAG.getVAArg(NVT, dl,
Lo.getValue(1),
Ptr, SV, Alignment.
value());
2989 Chain =
Hi.getValue(1);
2993 ReplaceValueWith(
SDValue(
N, 1), Chain);
2998 EVT DstVTLo, DstVTHi;
2999 std::tie(DstVTLo, DstVTHi) = DAG.GetSplitDestVTs(
N->getValueType(0));
3003 EVT SrcVT =
N->getOperand(0).getValueType();
3005 GetSplitVector(
N->getOperand(0), SrcLo, SrcHi);
3007 std::tie(SrcLo, SrcHi) = DAG.SplitVectorOperand(
N, 0);
3009 Lo = DAG.getNode(
N->getOpcode(), dl, DstVTLo, SrcLo,
N->getOperand(1));
3010 Hi = DAG.getNode(
N->getOpcode(), dl, DstVTHi, SrcHi,
N->getOperand(1));
3016 GetSplitVector(
N->getOperand(0), InLo, InHi);
3028 std::tie(
Lo,
Hi) = DAG.SplitVector(Expanded,
DL);
3033 EVT VT =
N->getValueType(0);
3040 Align Alignment = DAG.getReducedAlign(VT,
false);
3046 auto &MF = DAG.getMachineFunction();
3060 DAG.getConstant(1,
DL, PtrVT));
3062 DAG.getConstant(EltWidth,
DL, PtrVT));
3064 SDValue Stride = DAG.getConstant(-(int64_t)EltWidth,
DL, PtrVT);
3066 SDValue TrueMask = DAG.getBoolConstant(
true,
DL,
Mask.getValueType(), VT);
3067 SDValue Store = DAG.getStridedStoreVP(DAG.getEntryNode(),
DL, Val, StorePtr,
3068 DAG.getUNDEF(PtrVT), Stride, TrueMask,
3071 SDValue Load = DAG.getLoadVP(VT,
DL, Store, StackPtr, Mask, EVL, LoadMMO);
3073 std::tie(
Lo,
Hi) = DAG.SplitVector(Load,
DL);
3076void DAGTypeLegalizer::SplitVecRes_VECTOR_DEINTERLEAVE(
SDNode *
N) {
3078 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi;
3079 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
3080 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
3084 DAG.getVTList(VT, VT), Op0Lo, Op0Hi);
3086 DAG.getVTList(VT, VT), Op1Lo, Op1Hi);
3092void DAGTypeLegalizer::SplitVecRes_VECTOR_INTERLEAVE(
SDNode *
N) {
3093 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi;
3094 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
3095 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
3099 DAG.getVTList(VT, VT), Op0Lo, Op1Lo),
3101 DAG.getVTList(VT, VT), Op0Hi, Op1Hi)};
3103 SetSplitVector(
SDValue(
N, 0), Res[0].getValue(0), Res[0].getValue(1));
3104 SetSplitVector(
SDValue(
N, 1), Res[1].getValue(0), Res[1].getValue(1));
3115bool DAGTypeLegalizer::SplitVectorOperand(
SDNode *
N,
unsigned OpNo) {
3120 if (CustomLowerNode(
N,
N->getOperand(OpNo).getValueType(),
false))
3123 switch (
N->getOpcode()) {
3126 dbgs() <<
"SplitVectorOperand Op #" << OpNo <<
": ";
3135 case ISD::SETCC: Res = SplitVecOp_VSETCC(
N);
break;
3141 case ISD::VP_TRUNCATE:
3143 Res = SplitVecOp_TruncateHelper(
N);
3146 case ISD::VP_FP_ROUND:
3150 Res = SplitVecOp_STORE(cast<StoreSDNode>(
N), OpNo);
3153 Res = SplitVecOp_VP_STORE(cast<VPStoreSDNode>(
N), OpNo);
3155 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
3156 Res = SplitVecOp_VP_STRIDED_STORE(cast<VPStridedStoreSDNode>(
N), OpNo);
3159 Res = SplitVecOp_MSTORE(cast<MaskedStoreSDNode>(
N), OpNo);
3162 case ISD::VP_SCATTER:
3163 Res = SplitVecOp_Scatter(cast<MemSDNode>(
N), OpNo);
3166 case ISD::VP_GATHER:
3167 Res = SplitVecOp_Gather(cast<MemSDNode>(
N), OpNo);
3170 Res = SplitVecOp_VSELECT(
N, OpNo);
3176 case ISD::VP_SINT_TO_FP:
3177 case ISD::VP_UINT_TO_FP:
3178 if (
N->getValueType(0).bitsLT(
3179 N->getOperand(
N->isStrictFPOpcode() ? 1 : 0).getValueType()))
3180 Res = SplitVecOp_TruncateHelper(
N);
3182 Res = SplitVecOp_UnaryOp(
N);
3186 Res = SplitVecOp_FP_TO_XINT_SAT(
N);
3190 case ISD::VP_FP_TO_SINT:
3191 case ISD::VP_FP_TO_UINT:
3202 Res = SplitVecOp_UnaryOp(
N);
3205 Res = SplitVecOp_FPOpDifferentTypes(
N);
3210 Res = SplitVecOp_CMP(
N);
3216 Res = SplitVecOp_ExtVecInRegOp(
N);
3234 Res = SplitVecOp_VECREDUCE(
N, OpNo);
3238 Res = SplitVecOp_VECREDUCE_SEQ(
N);
3240 case ISD::VP_REDUCE_FADD:
3241 case ISD::VP_REDUCE_SEQ_FADD:
3242 case ISD::VP_REDUCE_FMUL:
3243 case ISD::VP_REDUCE_SEQ_FMUL:
3244 case ISD::VP_REDUCE_ADD:
3245 case ISD::VP_REDUCE_MUL:
3246 case ISD::VP_REDUCE_AND:
3247 case ISD::VP_REDUCE_OR:
3248 case ISD::VP_REDUCE_XOR:
3249 case ISD::VP_REDUCE_SMAX:
3250 case ISD::VP_REDUCE_SMIN:
3251 case ISD::VP_REDUCE_UMAX:
3252 case ISD::VP_REDUCE_UMIN:
3253 case ISD::VP_REDUCE_FMAX:
3254 case ISD::VP_REDUCE_FMIN:
3255 case ISD::VP_REDUCE_FMAXIMUM:
3256 case ISD::VP_REDUCE_FMINIMUM:
3257 Res = SplitVecOp_VP_REDUCE(
N, OpNo);
3259 case ISD::VP_CTTZ_ELTS:
3260 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
3261 Res = SplitVecOp_VP_CttzElements(
N);
3266 if (!Res.
getNode())
return false;
3273 if (
N->isStrictFPOpcode())
3275 "Invalid operand expansion");
3278 "Invalid operand expansion");
3280 ReplaceValueWith(
SDValue(
N, 0), Res);
3284SDValue DAGTypeLegalizer::SplitVecOp_VSELECT(
SDNode *
N,
unsigned OpNo) {
3287 assert(OpNo == 0 &&
"Illegal operand must be mask");
3294 assert(
Mask.getValueType().isVector() &&
"VSELECT without a vector mask?");
3297 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3298 assert(
Lo.getValueType() ==
Hi.getValueType() &&
3299 "Lo and Hi have differing types");
3302 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(Src0VT);
3303 assert(LoOpVT == HiOpVT &&
"Asymmetric vector split?");
3305 SDValue LoOp0, HiOp0, LoOp1, HiOp1, LoMask, HiMask;
3306 std::tie(LoOp0, HiOp0) = DAG.SplitVector(Src0,
DL);
3307 std::tie(LoOp1, HiOp1) = DAG.SplitVector(Src1,
DL);
3308 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask,
DL);
3318SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE(
SDNode *
N,
unsigned OpNo) {
3319 EVT ResVT =
N->getValueType(0);
3323 SDValue VecOp =
N->getOperand(OpNo);
3325 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3326 GetSplitVector(VecOp,
Lo,
Hi);
3328 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3334 return DAG.getNode(
N->getOpcode(), dl, ResVT, Partial,
N->getFlags());
3338 EVT ResVT =
N->getValueType(0);
3347 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3348 GetSplitVector(VecOp,
Lo,
Hi);
3350 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3356 return DAG.getNode(
N->getOpcode(), dl, ResVT, Partial,
Hi, Flags);
3359SDValue DAGTypeLegalizer::SplitVecOp_VP_REDUCE(
SDNode *
N,
unsigned OpNo) {
3360 assert(
N->isVPOpcode() &&
"Expected VP opcode");
3361 assert(OpNo == 1 &&
"Can only split reduce vector operand");
3363 unsigned Opc =
N->getOpcode();
3364 EVT ResVT =
N->getValueType(0);
3368 SDValue VecOp =
N->getOperand(OpNo);
3370 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3371 GetSplitVector(VecOp,
Lo,
Hi);
3374 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(2));
3377 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(
N->getOperand(3), VecVT, dl);
3382 DAG.
getNode(Opc, dl, ResVT, {
N->getOperand(0),
Lo, MaskLo, EVLLo},
Flags);
3383 return DAG.getNode(Opc, dl, ResVT, {ResLo,
Hi, MaskHi, EVLHi},
Flags);
3388 EVT ResVT =
N->getValueType(0);
3391 GetSplitVector(
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0),
Lo,
Hi);
3392 EVT InVT =
Lo.getValueType();
3397 if (
N->isStrictFPOpcode()) {
3398 Lo = DAG.getNode(
N->getOpcode(), dl, { OutVT, MVT::Other },
3399 { N->getOperand(0), Lo });
3400 Hi = DAG.getNode(
N->getOpcode(), dl, { OutVT, MVT::Other },
3401 { N->getOperand(0), Hi });
3410 ReplaceValueWith(
SDValue(
N, 1), Ch);
3411 }
else if (
N->getNumOperands() == 3) {
3412 assert(
N->isVPOpcode() &&
"Expected VP opcode");
3413 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
3414 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
3415 std::tie(EVLLo, EVLHi) =
3416 DAG.SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
3417 Lo = DAG.getNode(
N->getOpcode(), dl, OutVT,
Lo, MaskLo, EVLLo);
3418 Hi = DAG.getNode(
N->getOpcode(), dl, OutVT,
Hi, MaskHi, EVLHi);
3420 Lo = DAG.getNode(
N->getOpcode(), dl, OutVT,
Lo);
3421 Hi = DAG.getNode(
N->getOpcode(), dl, OutVT,
Hi);
3431 EVT ResVT =
N->getValueType(0);
3433 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3437 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(ResVT);
3443 Lo = BitConvertToInteger(
Lo);
3444 Hi = BitConvertToInteger(
Hi);
3446 if (DAG.getDataLayout().isBigEndian())
3454 assert(OpNo == 1 &&
"Invalid OpNo; can only split SubVec.");
3456 EVT ResVT =
N->getValueType(0);
3464 GetSplitVector(SubVec,
Lo,
Hi);
3467 uint64_t LoElts =
Lo.getValueType().getVectorMinNumElements();
3473 DAG.getVectorIdxConstant(IdxVal + LoElts, dl));
3475 return SecondInsertion;
3478SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(
SDNode *
N) {
3480 EVT SubVT =
N->getValueType(0);
3485 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3487 uint64_t LoEltsMin =
Lo.getValueType().getVectorMinNumElements();
3490 if (IdxVal < LoEltsMin) {
3492 "Extracted subvector crosses vector split!");
3495 N->getOperand(0).getValueType().isScalableVector())
3497 DAG.getVectorIdxConstant(IdxVal - LoEltsMin, dl));
3502 "Extracting scalable subvector from fixed-width unsupported");
3510 "subvector from a scalable predicate vector");
3516 Align SmallestAlign = DAG.getReducedAlign(VecVT,
false);
3518 DAG.CreateStackTemporary(VecVT.
getStoreSize(), SmallestAlign);
3519 auto &MF = DAG.getMachineFunction();
3523 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3530 SubVT, dl, Store, StackPtr,
3534SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
3543 GetSplitVector(Vec,
Lo,
Hi);
3545 uint64_t LoElts =
Lo.getValueType().getVectorMinNumElements();
3547 if (IdxVal < LoElts)
3551 DAG.getConstant(IdxVal - LoElts,
SDLoc(
N),
3552 Idx.getValueType())), 0);
3556 if (CustomLowerNode(
N,
N->getValueType(0),
true))
3568 return DAG.getAnyExtOrTrunc(NewExtract, dl,
N->getValueType(0));
3574 Align SmallestAlign = DAG.getReducedAlign(VecVT,
false);
3576 DAG.CreateStackTemporary(VecVT.
getStoreSize(), SmallestAlign);
3577 auto &MF = DAG.getMachineFunction();
3580 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3588 assert(
N->getValueType(0).bitsGE(EltVT) &&
"Illegal EXTRACT_VECTOR_ELT.");
3590 return DAG.getExtLoad(
3601 SplitVecRes_ExtVecInRegOp(
N,
Lo,
Hi);
3609 SplitVecRes_Gather(
N,
Lo,
Hi);
3612 ReplaceValueWith(
SDValue(
N, 0), Res);
3617 assert(
N->isUnindexed() &&
"Indexed vp_store of vector?");
3621 assert(
Offset.isUndef() &&
"Unexpected VP store offset");
3623 SDValue EVL =
N->getVectorLength();
3625 Align Alignment =
N->getOriginalAlign();
3631 GetSplitVector(
Data, DataLo, DataHi);
3633 std::tie(DataLo, DataHi) = DAG.SplitVector(
Data,
DL);
3638 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
3641 GetSplitVector(Mask, MaskLo, MaskHi);
3643 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask,
DL);
3646 EVT MemoryVT =
N->getMemoryVT();
3647 EVT LoMemVT, HiMemVT;
3648 bool HiIsEmpty =
false;
3649 std::tie(LoMemVT, HiMemVT) =
3650 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.
getValueType(), &HiIsEmpty);
3654 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(EVL,
Data.getValueType(),
DL);
3662 Lo = DAG.getStoreVP(Ch,
DL, DataLo,
Ptr,
Offset, MaskLo, EVLLo, LoMemVT, MMO,
3663 N->getAddressingMode(),
N->isTruncatingStore(),
3664 N->isCompressingStore());
3671 N->isCompressingStore());
3679 MPI =
N->getPointerInfo().getWithOffset(
3682 MMO = DAG.getMachineFunction().getMachineMemOperand(
3684 Alignment,
N->getAAInfo(),
N->getRanges());
3686 Hi = DAG.getStoreVP(Ch,
DL, DataHi,
Ptr,
Offset, MaskHi, EVLHi, HiMemVT, MMO,
3687 N->getAddressingMode(),
N->isTruncatingStore(),
3688 N->isCompressingStore());
3697 assert(
N->isUnindexed() &&
"Indexed vp_strided_store of a vector?");
3698 assert(
N->getOffset().isUndef() &&
"Unexpected VP strided store offset");
3705 GetSplitVector(
Data, LoData, HiData);
3707 std::tie(LoData, HiData) = DAG.SplitVector(
Data,
DL);
3709 EVT LoMemVT, HiMemVT;
3710 bool HiIsEmpty =
false;
3711 std::tie(LoMemVT, HiMemVT) = DAG.GetDependentSplitDestVTs(
3717 SplitVecRes_SETCC(
Mask.getNode(), LoMask, HiMask);
3718 else if (getTypeAction(
Mask.getValueType()) ==
3720 GetSplitVector(Mask, LoMask, HiMask);
3722 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask,
DL);
3725 std::tie(LoEVL, HiEVL) =
3726 DAG.SplitEVL(
N->getVectorLength(),
Data.getValueType(),
DL);
3730 N->getChain(),
DL, LoData,
N->getBasePtr(),
N->getOffset(),
3731 N->getStride(), LoMask, LoEVL, LoMemVT,
N->getMemOperand(),
3732 N->getAddressingMode(),
N->isTruncatingStore(),
N->isCompressingStore());
3743 EVT PtrVT =
N->getBasePtr().getValueType();
3746 DAG.getSExtOrTrunc(
N->getStride(),
DL, PtrVT));
3749 Align Alignment =
N->getOriginalAlign();
3757 Alignment,
N->getAAInfo(),
N->getRanges());
3760 N->getChain(),
DL, HiData,
Ptr,
N->getOffset(),
N->getStride(), HiMask,
3761 HiEVL, HiMemVT, MMO,
N->getAddressingMode(),
N->isTruncatingStore(),
3762 N->isCompressingStore());
3771 assert(
N->isUnindexed() &&
"Indexed masked store of vector?");
3775 assert(
Offset.isUndef() &&
"Unexpected indexed masked store offset");
3778 Align Alignment =
N->getOriginalAlign();
3784 GetSplitVector(
Data, DataLo, DataHi);
3786 std::tie(DataLo, DataHi) = DAG.SplitVector(
Data,
DL);
3791 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
3794 GetSplitVector(Mask, MaskLo, MaskHi);
3796 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask,
DL);
3799 EVT MemoryVT =
N->getMemoryVT();
3800 EVT LoMemVT, HiMemVT;
3801 bool HiIsEmpty =
false;
3802 std::tie(LoMemVT, HiMemVT) =
3803 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.
getValueType(), &HiIsEmpty);
3811 Lo = DAG.getMaskedStore(Ch,
DL, DataLo,
Ptr,
Offset, MaskLo, LoMemVT, MMO,
3812 N->getAddressingMode(),
N->isTruncatingStore(),
3813 N->isCompressingStore());
3822 N->isCompressingStore());
3830 MPI =
N->getPointerInfo().getWithOffset(
3833 MMO = DAG.getMachineFunction().getMachineMemOperand(
3835 Alignment,
N->getAAInfo(),
N->getRanges());
3837 Hi = DAG.getMaskedStore(Ch,
DL, DataHi,
Ptr,
Offset, MaskHi, HiMemVT, MMO,
3838 N->getAddressingMode(),
N->isTruncatingStore(),
3839 N->isCompressingStore());
3852 EVT MemoryVT =
N->getMemoryVT();
3853 Align Alignment =
N->getOriginalAlign();
3861 if (
auto *MSC = dyn_cast<MaskedScatterSDNode>(
N)) {
3862 return {MSC->getMask(), MSC->getIndex(), MSC->getScale(),
3865 auto *VPSC = cast<VPScatterSDNode>(
N);
3866 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale(),
3871 EVT LoMemVT, HiMemVT;
3872 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
3877 GetSplitVector(Ops.Data, DataLo, DataHi);
3879 std::tie(DataLo, DataHi) = DAG.SplitVector(Ops.Data,
DL);
3883 if (OpNo == 1 && Ops.Mask.getOpcode() ==
ISD::SETCC) {
3884 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
3886 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask,
DL);
3890 if (getTypeAction(Ops.Index.getValueType()) ==
3892 GetSplitVector(Ops.Index, IndexLo, IndexHi);
3894 std::tie(IndexLo, IndexHi) = DAG.SplitVector(Ops.Index,
DL);
3902 if (
auto *MSC = dyn_cast<MaskedScatterSDNode>(
N)) {
3903 SDValue OpsLo[] = {Ch, DataLo, MaskLo,
Ptr, IndexLo, Ops.Scale};
3905 DAG.getMaskedScatter(DAG.getVTList(MVT::Other), LoMemVT,
DL, OpsLo, MMO,
3906 MSC->getIndexType(), MSC->isTruncatingStore());
3911 SDValue OpsHi[] = {
Lo, DataHi, MaskHi,
Ptr, IndexHi, Ops.Scale};
3912 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), HiMemVT,
DL, OpsHi,
3913 MMO, MSC->getIndexType(),
3914 MSC->isTruncatingStore());
3916 auto *VPSC = cast<VPScatterSDNode>(
N);
3918 std::tie(EVLLo, EVLHi) =
3919 DAG.SplitEVL(VPSC->getVectorLength(), Ops.Data.getValueType(),
DL);
3921 SDValue OpsLo[] = {Ch, DataLo,
Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
3922 Lo = DAG.getScatterVP(DAG.getVTList(MVT::Other), LoMemVT,
DL, OpsLo, MMO,
3923 VPSC->getIndexType());
3928 SDValue OpsHi[] = {
Lo, DataHi,
Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
3929 return DAG.getScatterVP(DAG.getVTList(MVT::Other), HiMemVT,
DL, OpsHi, MMO,
3930 VPSC->getIndexType());
3934 assert(
N->isUnindexed() &&
"Indexed store of vector?");
3935 assert(OpNo == 1 &&
"Can only split the stored value");
3938 bool isTruncating =
N->isTruncatingStore();
3941 EVT MemoryVT =
N->getMemoryVT();
3942 Align Alignment =
N->getOriginalAlign();
3946 GetSplitVector(
N->getOperand(1),
Lo,
Hi);
3948 EVT LoMemVT, HiMemVT;
3949 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
3956 Lo = DAG.getTruncStore(Ch,
DL,
Lo,
Ptr,
N->getPointerInfo(), LoMemVT,
3957 Alignment, MMOFlags, AAInfo);
3959 Lo = DAG.getStore(Ch,
DL,
Lo,
Ptr,
N->getPointerInfo(), Alignment, MMOFlags,
3963 IncrementPointer(
N, LoMemVT, MPI,
Ptr);
3966 Hi = DAG.getTruncStore(Ch,
DL,
Hi,
Ptr, MPI,
3967 HiMemVT, Alignment, MMOFlags, AAInfo);
3969 Hi = DAG.getStore(Ch,
DL,
Hi,
Ptr, MPI, Alignment, MMOFlags, AAInfo);
3983 EVT EltVT =
N->getValueType(0).getVectorElementType();
3985 for (
unsigned i = 0, e =
Op.getValueType().getVectorNumElements();
3988 DAG.getVectorIdxConstant(i,
DL)));
3992 return DAG.getBuildVector(
N->getValueType(0),
DL, Elts);
4013 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
4014 SDValue InVec =
N->getOperand(OpNo);
4016 EVT OutVT =
N->getValueType(0);
4024 EVT LoOutVT, HiOutVT;
4025 std::tie(LoOutVT, HiOutVT) = DAG.GetSplitDestVTs(OutVT);
4026 assert(LoOutVT == HiOutVT &&
"Unequal split?");
4031 if (isTypeLegal(LoOutVT) ||
4032 InElementSize <= OutElementSize * 2)
4033 return SplitVecOp_UnaryOp(
N);
4042 return SplitVecOp_UnaryOp(
N);
4046 GetSplitVector(InVec, InLoVec, InHiVec);
4052 EVT HalfElementVT = IsFloat ?
4054 EVT::getIntegerVT(*DAG.getContext(), InElementSize/2);
4061 if (
N->isStrictFPOpcode()) {
4062 HalfLo = DAG.
getNode(
N->getOpcode(),
DL, {HalfVT, MVT::Other},
4063 {N->getOperand(0), InLoVec});
4064 HalfHi = DAG.
getNode(
N->getOpcode(),
DL, {HalfVT, MVT::Other},
4065 {N->getOperand(0), InHiVec});
4071 HalfLo = DAG.
getNode(
N->getOpcode(),
DL, HalfVT, InLoVec);
4072 HalfHi = DAG.
getNode(
N->getOpcode(),
DL, HalfVT, InHiVec);
4084 if (
N->isStrictFPOpcode()) {
4088 DAG.getTargetConstant(0,
DL, TLI.
getPointerTy(DAG.getDataLayout()))});
4096 DAG.getTargetConstant(
4103 assert(
N->getValueType(0).isVector() &&
4104 N->getOperand(isStrict ? 1 : 0).getValueType().isVector() &&
4105 "Operand types must be vectors");
4107 SDValue Lo0, Hi0, Lo1, Hi1, LoRes, HiRes;
4109 GetSplitVector(
N->getOperand(isStrict ? 1 : 0), Lo0, Hi0);
4110 GetSplitVector(
N->getOperand(isStrict ? 2 : 1), Lo1, Hi1);
4123 DAG.getVTList(PartResVT,
N->getValueType(1)),
4124 N->getOperand(0), Lo0, Lo1,
N->getOperand(3));
4126 DAG.getVTList(PartResVT,
N->getValueType(1)),
4127 N->getOperand(0), Hi0, Hi1,
N->getOperand(3));
4130 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4132 assert(
N->getOpcode() == ISD::VP_SETCC &&
"Expected VP_SETCC opcode");
4133 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
4134 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
4135 std::tie(EVLLo, EVLHi) =
4136 DAG.SplitEVL(
N->getOperand(4),
N->getValueType(0),
DL);
4137 LoRes = DAG.
getNode(ISD::VP_SETCC,
DL, PartResVT, Lo0, Lo1,
4138 N->getOperand(2), MaskLo, EVLLo);
4139 HiRes = DAG.
getNode(ISD::VP_SETCC,
DL, PartResVT, Hi0, Hi1,
4140 N->getOperand(2), MaskHi, EVLHi);
4144 EVT OpVT =
N->getOperand(0).getValueType();
4147 return DAG.getNode(ExtendCode,
DL,
N->getValueType(0), Con);
4153 EVT ResVT =
N->getValueType(0);
4156 GetSplitVector(
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0),
Lo,
Hi);
4157 EVT InVT =
Lo.getValueType();
4162 if (
N->isStrictFPOpcode()) {
4163 Lo = DAG.getNode(
N->getOpcode(),
DL, { OutVT, MVT::Other },
4164 { N->getOperand(0), Lo, N->getOperand(2) });
4165 Hi = DAG.getNode(
N->getOpcode(),
DL, { OutVT, MVT::Other },
4166 { N->getOperand(0), Hi, N->getOperand(2) });
4170 Lo.getValue(1),
Hi.getValue(1));
4171 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4172 }
else if (
N->getOpcode() == ISD::VP_FP_ROUND) {
4173 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
4174 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
4175 std::tie(EVLLo, EVLHi) =
4176 DAG.SplitEVL(
N->getOperand(2),
N->getValueType(0),
DL);
4177 Lo = DAG.getNode(ISD::VP_FP_ROUND,
DL, OutVT,
Lo, MaskLo, EVLLo);
4178 Hi = DAG.getNode(ISD::VP_FP_ROUND,
DL, OutVT,
Hi, MaskHi, EVLHi);
4192SDValue DAGTypeLegalizer::SplitVecOp_FPOpDifferentTypes(
SDNode *
N) {
4195 EVT LHSLoVT, LHSHiVT;
4196 std::tie(LHSLoVT, LHSHiVT) = DAG.GetSplitDestVTs(
N->getValueType(0));
4198 if (!isTypeLegal(LHSLoVT) || !isTypeLegal(LHSHiVT))
4199 return DAG.UnrollVectorOp(
N,
N->getValueType(0).getVectorNumElements());
4202 std::tie(LHSLo, LHSHi) =
4203 DAG.SplitVector(
N->getOperand(0),
DL, LHSLoVT, LHSHiVT);
4206 std::tie(RHSLo, RHSHi) = DAG.SplitVector(
N->getOperand(1),
DL);
4208 SDValue Lo = DAG.getNode(
N->getOpcode(),
DL, LHSLoVT, LHSLo, RHSLo);
4209 SDValue Hi = DAG.getNode(
N->getOpcode(),
DL, LHSHiVT, LHSHi, RHSHi);
4218 SDValue LHSLo, LHSHi, RHSLo, RHSHi;
4219 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
4220 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
4222 EVT ResVT =
N->getValueType(0);
4227 SDValue Lo = DAG.getNode(
N->getOpcode(), dl, NewResVT, LHSLo, RHSLo);
4228 SDValue Hi = DAG.getNode(
N->getOpcode(), dl, NewResVT, LHSHi, RHSHi);
4234 EVT ResVT =
N->getValueType(0);
4237 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
4238 EVT InVT =
Lo.getValueType();
4244 Lo = DAG.getNode(
N->getOpcode(), dl, NewResVT,
Lo,
N->getOperand(1));
4245 Hi = DAG.getNode(
N->getOpcode(), dl, NewResVT,
Hi,
N->getOperand(1));
4252 EVT ResVT =
N->getValueType(0);
4256 GetSplitVector(VecOp,
Lo,
Hi);
4258 auto [MaskLo, MaskHi] = SplitMask(
N->getOperand(1));
4259 auto [EVLLo, EVLHi] =
4261 SDValue VLo = DAG.getZExtOrTrunc(EVLLo,
DL, ResVT);
4267 DAG.getSetCC(
DL, getSetCCResultType(ResVT), ResLo, VLo,
ISD::SETNE);
4269 return DAG.getSelect(
DL, ResVT, ResLoNotEVL, ResLo,
4270 DAG.getNode(
ISD::ADD,
DL, ResVT, VLo, ResHi));
4277void DAGTypeLegalizer::WidenVectorResult(
SDNode *
N,
unsigned ResNo) {
4278 LLVM_DEBUG(
dbgs() <<
"Widen node result " << ResNo <<
": ";
N->dump(&DAG));
4281 if (CustomWidenLowerNode(
N,
N->getValueType(ResNo)))
4286 auto unrollExpandedOp = [&]() {
4291 EVT VT =
N->getValueType(0);
4301 switch (
N->getOpcode()) {
4304 dbgs() <<
"WidenVectorResult #" << ResNo <<
": ";
4312 Res = WidenVecRes_ADDRSPACECAST(
N);
4319 Res = WidenVecRes_INSERT_SUBVECTOR(
N);
4323 case ISD::LOAD: Res = WidenVecRes_LOAD(
N);
break;
4327 case ISD::EXPERIMENTAL_VP_SPLAT:
4328 Res = WidenVecRes_ScalarOp(
N);
4333 case ISD::VP_SELECT:
4335 Res = WidenVecRes_Select(
N);
4339 case ISD::SETCC: Res = WidenVecRes_SETCC(
N);
break;
4340 case ISD::UNDEF: Res = WidenVecRes_UNDEF(
N);
break;
4342 Res = WidenVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(
N));
4345 Res = WidenVecRes_VP_LOAD(cast<VPLoadSDNode>(
N));
4347 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
4348 Res = WidenVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(
N));
4351 Res = WidenVecRes_VECTOR_COMPRESS(
N);
4354 Res = WidenVecRes_MLOAD(cast<MaskedLoadSDNode>(
N));
4357 Res = WidenVecRes_MGATHER(cast<MaskedGatherSDNode>(
N));
4359 case ISD::VP_GATHER:
4360 Res = WidenVecRes_VP_GATHER(cast<VPGatherSDNode>(
N));
4363 Res = WidenVecRes_VECTOR_REVERSE(
N);
4371 case ISD::OR:
case ISD::VP_OR:
4379 case ISD::VP_FMINNUM:
4382 case ISD::VP_FMAXNUM:
4384 case ISD::VP_FMINIMUM:
4386 case ISD::VP_FMAXIMUM:
4417 case ISD::VP_FCOPYSIGN:
4418 Res = WidenVecRes_Binary(
N);
4423 Res = WidenVecRes_CMP(
N);
4428 if (unrollExpandedOp())
4443 Res = WidenVecRes_BinaryCanTrap(
N);
4452 Res = WidenVecRes_BinaryWithExtraScalarOp(
N);
4455#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
4456 case ISD::STRICT_##DAGN:
4457#include "llvm/IR/ConstrainedOps.def"
4458 Res = WidenVecRes_StrictFP(
N);
4467 Res = WidenVecRes_OverflowOp(
N, ResNo);
4471 Res = WidenVecRes_FCOPYSIGN(
N);
4476 Res = WidenVecRes_UnarySameEltsWithScalarArg(
N);
4481 if (!unrollExpandedOp())
4482 Res = WidenVecRes_ExpOp(
N);
4488 Res = WidenVecRes_EXTEND_VECTOR_INREG(
N);
4493 case ISD::VP_FP_EXTEND:
4495 case ISD::VP_FP_ROUND:
4497 case ISD::VP_FP_TO_SINT:
4499 case ISD::VP_FP_TO_UINT:
4501 case ISD::VP_SIGN_EXTEND:
4503 case ISD::VP_SINT_TO_FP:
4504 case ISD::VP_TRUNCATE:
4507 case ISD::VP_UINT_TO_FP:
4509 case ISD::VP_ZERO_EXTEND:
4510 Res = WidenVecRes_Convert(
N);
4515 Res = WidenVecRes_FP_TO_XINT_SAT(
N);
4521 case ISD::VP_LLRINT:
4522 Res = WidenVecRes_XRINT(
N);
4549 if (unrollExpandedOp())
4559 case ISD::VP_BITREVERSE:
4565 case ISD::VP_CTLZ_ZERO_UNDEF:
4571 case ISD::VP_CTTZ_ZERO_UNDEF:
4576 case ISD::VP_FFLOOR:
4578 case ISD::VP_FNEARBYINT:
4579 case ISD::VP_FROUND:
4580 case ISD::VP_FROUNDEVEN:
4581 case ISD::VP_FROUNDTOZERO:
4585 Res = WidenVecRes_Unary(
N);
4592 Res = WidenVecRes_Ternary(
N);
4598 SetWidenedVector(
SDValue(
N, ResNo), Res);
4605 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4606 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4607 SDValue InOp3 = GetWidenedVector(
N->getOperand(2));
4608 if (
N->getNumOperands() == 3)
4609 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3);
4611 assert(
N->getNumOperands() == 5 &&
"Unexpected number of operands!");
4612 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4616 return DAG.getNode(
N->getOpcode(), dl, WidenVT,
4617 {InOp1, InOp2, InOp3, Mask, N->getOperand(4)});
4624 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4625 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4626 if (
N->getNumOperands() == 2)
4627 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2,
4630 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands!");
4631 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4635 return DAG.getNode(
N->getOpcode(), dl, WidenVT,
4636 {InOp1, InOp2, Mask, N->getOperand(3)},
N->getFlags());
4645 EVT OpVT =
LHS.getValueType();
4647 LHS = GetWidenedVector(LHS);
4648 RHS = GetWidenedVector(RHS);
4649 OpVT =
LHS.getValueType();
4655 return DAG.getNode(
N->getOpcode(), dl, WidenResVT, LHS, RHS);
4661SDValue DAGTypeLegalizer::WidenVecRes_BinaryWithExtraScalarOp(
SDNode *
N) {
4665 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4666 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4668 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3,
4677 unsigned ConcatEnd,
EVT VT,
EVT MaxVT,
4680 if (ConcatEnd == 1) {
4681 VT = ConcatOps[0].getValueType();
4683 return ConcatOps[0];
4686 SDLoc dl(ConcatOps[0]);
4693 while (ConcatOps[ConcatEnd-1].getValueType() != MaxVT) {
4694 int Idx = ConcatEnd - 1;
4695 VT = ConcatOps[
Idx--].getValueType();
4696 while (
Idx >= 0 && ConcatOps[
Idx].getValueType() == VT)
4709 unsigned NumToInsert = ConcatEnd -
Idx - 1;
4710 for (
unsigned i = 0, OpIdx =
Idx+1; i < NumToInsert; i++, OpIdx++) {
4714 ConcatOps[
Idx+1] = VecOp;
4715 ConcatEnd =
Idx + 2;
4721 unsigned RealVals = ConcatEnd -
Idx - 1;
4722 unsigned SubConcatEnd = 0;
4723 unsigned SubConcatIdx =
Idx + 1;
4724 while (SubConcatEnd < RealVals)
4725 SubConcatOps[SubConcatEnd++] = ConcatOps[++
Idx];
4726 while (SubConcatEnd < OpsToConcat)
4727 SubConcatOps[SubConcatEnd++] = undefVec;
4729 NextVT, SubConcatOps);
4730 ConcatEnd = SubConcatIdx + 1;
4735 if (ConcatEnd == 1) {
4736 VT = ConcatOps[0].getValueType();
4738 return ConcatOps[0];
4743 if (NumOps != ConcatEnd ) {
4745 for (
unsigned j = ConcatEnd; j < NumOps; ++j)
4746 ConcatOps[j] = UndefVal;
4754 unsigned Opcode =
N->getOpcode();
4762 NumElts = NumElts / 2;
4766 if (NumElts != 1 && !TLI.
canOpTrap(
N->getOpcode(), VT)) {
4768 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4769 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4770 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, Flags);
4782 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4783 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4784 unsigned CurNumElts =
N->getValueType(0).getVectorNumElements();
4787 unsigned ConcatEnd = 0;
4795 while (CurNumElts != 0) {
4796 while (CurNumElts >= NumElts) {
4798 DAG.getVectorIdxConstant(
Idx, dl));
4800 DAG.getVectorIdxConstant(
Idx, dl));
4801 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, VT, EOp1, EOp2, Flags);
4803 CurNumElts -= NumElts;
4806 NumElts = NumElts / 2;
4811 for (
unsigned i = 0; i != CurNumElts; ++i, ++
Idx) {
4813 InOp1, DAG.getVectorIdxConstant(
Idx, dl));
4815 InOp2, DAG.getVectorIdxConstant(
Idx, dl));
4816 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT,
4827 switch (
N->getOpcode()) {
4830 return WidenVecRes_STRICT_FSETCC(
N);
4837 return WidenVecRes_Convert_StrictFP(
N);
4843 unsigned NumOpers =
N->getNumOperands();
4844 unsigned Opcode =
N->getOpcode();
4851 NumElts = NumElts / 2;
4862 unsigned CurNumElts =
N->getValueType(0).getVectorNumElements();
4866 unsigned ConcatEnd = 0;
4873 for (
unsigned i = 1; i < NumOpers; ++i) {
4879 Oper = GetWidenedVector(Oper);
4885 DAG.getUNDEF(WideOpVT), Oper,
4886 DAG.getVectorIdxConstant(0, dl));
4898 while (CurNumElts != 0) {
4899 while (CurNumElts >= NumElts) {
4902 for (
unsigned i = 0; i < NumOpers; ++i) {
4905 EVT OpVT =
Op.getValueType();
4911 DAG.getVectorIdxConstant(
Idx, dl));
4917 EVT OperVT[] = {VT, MVT::Other};
4919 ConcatOps[ConcatEnd++] = Oper;
4922 CurNumElts -= NumElts;
4925 NumElts = NumElts / 2;
4930 for (
unsigned i = 0; i != CurNumElts; ++i, ++
Idx) {
4933 for (
unsigned i = 0; i < NumOpers; ++i) {
4936 EVT OpVT =
Op.getValueType();
4940 DAG.getVectorIdxConstant(
Idx, dl));
4945 EVT WidenVT[] = {WidenEltVT, MVT::Other};
4947 ConcatOps[ConcatEnd++] = Oper;
4956 if (Chains.
size() == 1)
4957 NewChain = Chains[0];
4960 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4965SDValue DAGTypeLegalizer::WidenVecRes_OverflowOp(
SDNode *
N,
unsigned ResNo) {
4967 EVT ResVT =
N->getValueType(0);
4968 EVT OvVT =
N->getValueType(1);
4969 EVT WideResVT, WideOvVT;
4979 WideLHS = GetWidenedVector(
N->getOperand(0));
4980 WideRHS = GetWidenedVector(
N->getOperand(1));
4990 N->getOperand(0), Zero);
4993 N->getOperand(1), Zero);
4996 SDVTList WideVTs = DAG.getVTList(WideResVT, WideOvVT);
4997 SDNode *WideNode = DAG.getNode(
4998 N->getOpcode(),
DL, WideVTs, WideLHS, WideRHS).getNode();
5001 unsigned OtherNo = 1 - ResNo;
5002 EVT OtherVT =
N->getValueType(OtherNo);
5009 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
5012 return SDValue(WideNode, ResNo);
5025 unsigned Opcode =
N->getOpcode();
5034 InOp = ZExtPromotedInteger(InOp);
5045 InOp = GetWidenedVector(
N->getOperand(0));
5048 if (InVTEC == WidenEC) {
5049 if (
N->getNumOperands() == 1)
5050 return DAG.getNode(Opcode,
DL, WidenVT, InOp);
5051 if (
N->getNumOperands() == 3) {
5052 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5055 return DAG.getNode(Opcode,
DL, WidenVT, InOp, Mask,
N->getOperand(2));
5057 return DAG.getNode(Opcode,
DL, WidenVT, InOp,
N->getOperand(1), Flags);
5080 unsigned NumConcat =
5085 if (
N->getNumOperands() == 1)
5086 return DAG.getNode(Opcode,
DL, WidenVT, InVec);
5087 return DAG.getNode(Opcode,
DL, WidenVT, InVec,
N->getOperand(1), Flags);
5092 DAG.getVectorIdxConstant(0,
DL));
5094 if (
N->getNumOperands() == 1)
5095 return DAG.getNode(Opcode,
DL, WidenVT, InVal);
5096 return DAG.getNode(Opcode,
DL, WidenVT, InVal,
N->getOperand(1), Flags);
5105 unsigned MinElts =
N->getValueType(0).getVectorNumElements();
5106 for (
unsigned i=0; i < MinElts; ++i) {
5108 DAG.getVectorIdxConstant(i,
DL));
5109 if (
N->getNumOperands() == 1)
5110 Ops[i] = DAG.getNode(Opcode,
DL, EltVT, Val);
5112 Ops[i] = DAG.getNode(Opcode,
DL, EltVT, Val,
N->getOperand(1), Flags);
5115 return DAG.getBuildVector(WidenVT,
DL, Ops);
5124 EVT SrcVT = Src.getValueType();
5128 Src = GetWidenedVector(Src);
5129 SrcVT = Src.getValueType();
5136 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src,
N->getOperand(1));
5145 EVT SrcVT = Src.getValueType();
5149 Src = GetWidenedVector(Src);
5150 SrcVT = Src.getValueType();
5157 if (
N->getNumOperands() == 1)
5158 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src);
5160 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
5161 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5165 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src, Mask,
N->getOperand(2));
5168SDValue DAGTypeLegalizer::WidenVecRes_Convert_StrictFP(
SDNode *
N) {
5179 unsigned Opcode =
N->getOpcode();
5185 std::array<EVT, 2> EltVTs = {{EltVT, MVT::Other}};
5190 unsigned MinElts =
N->getValueType(0).getVectorNumElements();
5191 for (
unsigned i=0; i < MinElts; ++i) {
5193 DAG.getVectorIdxConstant(i,
DL));
5194 Ops[i] = DAG.getNode(Opcode,
DL, EltVTs, NewOps);
5198 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5200 return DAG.getBuildVector(WidenVT,
DL, Ops);
5203SDValue DAGTypeLegalizer::WidenVecRes_EXTEND_VECTOR_INREG(
SDNode *
N) {
5204 unsigned Opcode =
N->getOpcode();
5217 InOp = GetWidenedVector(InOp);
5224 return DAG.getNode(Opcode,
DL, WidenVT, InOp);
5231 for (
unsigned i = 0, e = std::min(InVTNumElts, WidenNumElts); i !=
e; ++i) {
5233 DAG.getVectorIdxConstant(i,
DL));
5250 while (Ops.
size() != WidenNumElts)
5253 return DAG.getBuildVector(WidenVT,
DL, Ops);
5259 if (
N->getOperand(0).getValueType() ==
N->getOperand(1).getValueType())
5260 return WidenVecRes_BinaryCanTrap(
N);
5270SDValue DAGTypeLegalizer::WidenVecRes_UnarySameEltsWithScalarArg(
SDNode *
N) {
5271 SDValue FpValue =
N->getOperand(0);
5275 SDValue Arg = GetWidenedVector(FpValue);
5276 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, {Arg,
N->getOperand(1)},
5282 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5284 EVT ExpVT =
RHS.getValueType();
5289 ExpOp = ModifyToType(RHS, WideExpVT);
5292 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, InOp, ExpOp);
5298 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5299 if (
N->getNumOperands() == 1)
5300 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, InOp,
N->getFlags());
5302 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
5303 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5307 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
5308 {InOp,
Mask,
N->getOperand(2)});
5314 cast<VTSDNode>(
N->getOperand(1))->getVT()
5315 .getVectorElementType(),
5317 SDValue WidenLHS = GetWidenedVector(
N->getOperand(0));
5318 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
5319 WidenVT, WidenLHS, DAG.getValueType(ExtVT));
5322SDValue DAGTypeLegalizer::WidenVecRes_MERGE_VALUES(
SDNode *
N,
unsigned ResNo) {
5323 SDValue WidenVec = DisintegrateMERGE_VALUES(
N, ResNo);
5324 return GetWidenedVector(WidenVec);
5329 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5330 auto *AddrSpaceCastN = cast<AddrSpaceCastSDNode>(
N);
5332 return DAG.getAddrSpaceCast(
SDLoc(
N), WidenVT, InOp,
5333 AddrSpaceCastN->getSrcAddressSpace(),
5334 AddrSpaceCastN->getDestAddressSpace());
5340 EVT VT =
N->getValueType(0);
5344 switch (getTypeAction(InVT)) {
5358 SDValue NInOp = GetPromotedInteger(InOp);
5360 if (WidenVT.
bitsEq(NInVT)) {
5363 if (DAG.getDataLayout().isBigEndian()) {
5368 DAG.getConstant(ShiftAmt, dl, ShiftAmtTy));
5387 InOp = GetWidenedVector(InOp);
5389 if (WidenVT.
bitsEq(InVT))
5399 if (WidenSize % InScalarSize == 0 && InVT != MVT::x86mmx) {
5404 unsigned NewNumParts = WidenSize / InSize;
5417 EVT OrigInVT =
N->getOperand(0).getValueType();
5430 if (WidenSize % InSize == 0) {
5437 DAG.ExtractVectorElements(InOp, Ops);
5438 Ops.
append(WidenSize / InScalarSize - Ops.
size(),
5450 return CreateStackStoreLoad(InOp, WidenVT);
5456 EVT VT =
N->getValueType(0);
5460 EVT EltVT =
N->getOperand(0).getValueType();
5467 assert(WidenNumElts >= NumElts &&
"Shrinking vector instead of widening!");
5468 NewOps.append(WidenNumElts - NumElts, DAG.getUNDEF(EltVT));
5470 return DAG.getBuildVector(WidenVT, dl, NewOps);
5474 EVT InVT =
N->getOperand(0).getValueType();
5477 unsigned NumOperands =
N->getNumOperands();
5479 bool InputWidened =
false;
5483 if (WidenNumElts % NumInElts == 0) {
5485 unsigned NumConcat = WidenNumElts / NumInElts;
5486 SDValue UndefVal = DAG.getUNDEF(InVT);
5488 for (
unsigned i=0; i < NumOperands; ++i)
5489 Ops[i] =
N->getOperand(i);
5490 for (
unsigned i = NumOperands; i != NumConcat; ++i)
5495 InputWidened =
true;
5499 for (i=1; i < NumOperands; ++i)
5500 if (!
N->getOperand(i).isUndef())
5503 if (i == NumOperands)
5506 return GetWidenedVector(
N->getOperand(0));
5508 if (NumOperands == 2) {
5510 "Cannot use vector shuffles to widen CONCAT_VECTOR result");
5516 for (
unsigned i = 0; i < NumInElts; ++i) {
5518 MaskOps[i + NumInElts] = i + WidenNumElts;
5520 return DAG.getVectorShuffle(WidenVT, dl,
5521 GetWidenedVector(
N->getOperand(0)),
5522 GetWidenedVector(
N->getOperand(1)),
5529 "Cannot use build vectors to widen CONCAT_VECTOR result");
5537 for (
unsigned i=0; i < NumOperands; ++i) {
5540 InOp = GetWidenedVector(InOp);
5541 for (
unsigned j = 0;
j < NumInElts; ++
j)
5543 DAG.getVectorIdxConstant(j, dl));
5545 SDValue UndefVal = DAG.getUNDEF(EltVT);
5546 for (;
Idx < WidenNumElts; ++
Idx)
5547 Ops[
Idx] = UndefVal;
5548 return DAG.getBuildVector(WidenVT, dl, Ops);
5551SDValue DAGTypeLegalizer::WidenVecRes_INSERT_SUBVECTOR(
SDNode *
N) {
5552 EVT VT =
N->getValueType(0);
5554 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
5561SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(
SDNode *
N) {
5562 EVT VT =
N->getValueType(0);
5569 auto InOpTypeAction = getTypeAction(InOp.
getValueType());
5571 InOp = GetWidenedVector(InOp);
5577 if (IdxVal == 0 && InVT == WidenVT)
5584 assert(IdxVal % VTNumElts == 0 &&
5585 "Expected Idx to be a multiple of subvector minimum vector length");
5586 if (IdxVal % WidenNumElts == 0 && IdxVal + WidenNumElts < InNumElts)
5599 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
5600 assert((IdxVal % GCD) == 0 &&
"Expected Idx to be a multiple of the broken "
5601 "down type's element count");
5608 for (;
I < VTNumElts / GCD; ++
I)
5611 DAG.getVectorIdxConstant(IdxVal +
I * GCD, dl)));
5612 for (;
I < WidenNumElts / GCD; ++
I)
5619 "EXTRACT_SUBVECTOR for scalable vectors");
5626 for (i = 0; i < VTNumElts; ++i)
5628 DAG.getVectorIdxConstant(IdxVal + i, dl));
5630 SDValue UndefVal = DAG.getUNDEF(EltVT);
5631 for (; i < WidenNumElts; ++i)
5633 return DAG.getBuildVector(WidenVT, dl, Ops);
5644SDValue DAGTypeLegalizer::WidenVecRes_INSERT_VECTOR_ELT(
SDNode *
N) {
5645 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5648 N->getOperand(1),
N->getOperand(2));
5661 if (!
LD->getMemoryVT().isByteSized()) {
5665 ReplaceValueWith(
SDValue(LD, 1), NewChain);
5674 EVT LdVT =
LD->getMemoryVT();
5685 const auto *MMO =
LD->getMemOperand();
5687 DAG.getLoadVP(WideVT,
DL,
LD->getChain(),
LD->getBasePtr(), Mask, EVL,
5701 Result = GenWidenVectorExtLoads(LdChain, LD, ExtType);
5703 Result = GenWidenVectorLoads(LdChain, LD);
5710 if (LdChain.
size() == 1)
5711 NewChain = LdChain[0];
5717 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5728 SDValue EVL =
N->getVectorLength();
5735 "Unable to widen binary VP op");
5736 Mask = GetWidenedVector(Mask);
5737 assert(
Mask.getValueType().getVectorElementCount() ==
5740 "Unable to widen vector load");
5743 DAG.getLoadVP(
N->getAddressingMode(), ExtType, WidenVT, dl,
N->getChain(),
5744 N->getBasePtr(),
N->getOffset(), Mask, EVL,
5745 N->getMemoryVT(),
N->getMemOperand(),
N->isExpandingLoad());
5759 "Unable to widen VP strided load");
5760 Mask = GetWidenedVector(Mask);
5763 assert(
Mask.getValueType().getVectorElementCount() ==
5765 "Data and mask vectors should have the same number of elements");
5767 SDValue Res = DAG.getStridedLoadVP(
5768 N->getAddressingMode(),
N->getExtensionType(), WidenVT,
DL,
N->getChain(),
5769 N->getBasePtr(),
N->getOffset(),
N->getStride(), Mask,
5770 N->getVectorLength(),
N->getMemoryVT(),
N->getMemOperand(),
5771 N->isExpandingLoad());
5779SDValue DAGTypeLegalizer::WidenVecRes_VECTOR_COMPRESS(
SDNode *
N) {
5782 SDValue Passthru =
N->getOperand(2);
5786 Mask.getValueType().getVectorElementType(),
5789 SDValue WideVec = ModifyToType(Vec, WideVecVT);
5790 SDValue WideMask = ModifyToType(Mask, WideMaskVT,
true);
5791 SDValue WidePassthru = ModifyToType(Passthru, WideVecVT);
5793 WideMask, WidePassthru);
5800 EVT MaskVT =
Mask.getValueType();
5801 SDValue PassThru = GetWidenedVector(
N->getPassThru());
5809 Mask = ModifyToType(Mask, WideMaskVT,
true);
5811 SDValue Res = DAG.getMaskedLoad(
5812 WidenVT, dl,
N->getChain(),
N->getBasePtr(),
N->getOffset(), Mask,
5813 PassThru,
N->getMemoryVT(),
N->getMemOperand(),
N->getAddressingMode(),
5814 ExtType,
N->isExpandingLoad());
5825 EVT MaskVT =
Mask.getValueType();
5826 SDValue PassThru = GetWidenedVector(
N->getPassThru());
5835 Mask = ModifyToType(Mask, WideMaskVT,
true);
5840 Index.getValueType().getScalarType(),
5848 N->getMemoryVT().getScalarType(), NumElts);
5849 SDValue Res = DAG.getMaskedGather(DAG.getVTList(WideVT, MVT::Other),
5850 WideMemVT, dl, Ops,
N->getMemOperand(),
5851 N->getIndexType(),
N->getExtensionType());
5868 N->getMemoryVT().getScalarType(), WideEC);
5869 Mask = GetWidenedMask(Mask, WideEC);
5872 Mask,
N->getVectorLength()};
5873 SDValue Res = DAG.getGatherVP(DAG.getVTList(WideVT, MVT::Other), WideMemVT,
5874 dl, Ops,
N->getMemOperand(),
N->getIndexType());
5884 if (
N->isVPOpcode())
5885 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
N->getOperand(0),
5886 N->getOperand(1),
N->getOperand(2));
5887 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
N->getOperand(0));
5915 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
5916 return N->getOperand(OpNo).getValueType();
5924 N =
N.getOperand(0);
5926 for (
unsigned i = 1; i <
N->getNumOperands(); ++i)
5927 if (!
N->getOperand(i)->isUndef())
5929 N =
N.getOperand(0);
5933 N =
N.getOperand(0);
5935 N =
N.getOperand(0);
5962 { MaskVT, MVT::Other }, Ops);
5963 ReplaceValueWith(InMask.
getValue(1),
Mask.getValue(1));
5973 if (MaskScalarBits < ToMaskScalBits) {
5977 }
else if (MaskScalarBits > ToMaskScalBits) {
5983 assert(
Mask->getValueType(0).getScalarSizeInBits() ==
5985 "Mask should have the right element size by now.");
5988 unsigned CurrMaskNumEls =
Mask->getValueType(0).getVectorNumElements();
5990 SDValue ZeroIdx = DAG.getVectorIdxConstant(0,
SDLoc(Mask));
5995 EVT SubVT =
Mask->getValueType(0);
6001 assert((
Mask->getValueType(0) == ToMaskVT) &&
6002 "A mask of ToMaskVT should have been produced by now.");
6023 EVT CondVT =
Cond->getValueType(0);
6027 EVT VSelVT =
N->getValueType(0);
6039 EVT FinalVT = VSelVT;
6051 EVT SetCCResVT = getSetCCResultType(SetCCOpVT);
6069 EVT ToMaskVT = VSelVT;
6076 Mask = convertMask(
Cond, MaskVT, ToMaskVT);
6092 if (ScalarBits0 != ScalarBits1) {
6093 EVT NarrowVT = ((ScalarBits0 < ScalarBits1) ? VT0 : VT1);
6094 EVT WideVT = ((NarrowVT == VT0) ? VT1 : VT0);
6106 SETCC0 = convertMask(SETCC0, VT0, MaskVT);
6107 SETCC1 = convertMask(SETCC1, VT1, MaskVT);
6111 Mask = convertMask(
Cond, MaskVT, ToMaskVT);
6124 unsigned Opcode =
N->getOpcode();
6126 if (
SDValue WideCond = WidenVSELECTMask(
N)) {
6127 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
6128 SDValue InOp2 = GetWidenedVector(
N->getOperand(2));
6130 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, WideCond, InOp1, InOp2);
6136 Cond1 = GetWidenedVector(Cond1);
6144 SDValue SplitSelect = SplitVecOp_VSELECT(
N, 0);
6145 SDValue Res = ModifyToType(SplitSelect, WidenVT);
6150 Cond1 = ModifyToType(Cond1, CondWidenVT);
6153 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
6154 SDValue InOp2 = GetWidenedVector(
N->getOperand(2));
6156 if (Opcode == ISD::VP_SELECT || Opcode == ISD::VP_MERGE)
6157 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, Cond1, InOp1, InOp2,
6159 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, Cond1, InOp1, InOp2);
6163 SDValue InOp1 = GetWidenedVector(
N->getOperand(2));
6164 SDValue InOp2 = GetWidenedVector(
N->getOperand(3));
6167 N->getOperand(1), InOp1, InOp2,
N->getOperand(4));
6172 return DAG.getUNDEF(WidenVT);
6176 EVT VT =
N->getValueType(0);
6183 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
6184 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
6188 for (
unsigned i = 0; i != NumElts; ++i) {
6189 int Idx =
N->getMaskElt(i);
6190 if (
Idx < (
int)NumElts)
6195 for (
unsigned i = NumElts; i != WidenNumElts; ++i)
6197 return DAG.getVectorShuffle(WidenVT, dl, InOp1, InOp2, NewMask);
6201 EVT VT =
N->getValueType(0);
6206 SDValue OpValue = GetWidenedVector(
N->getOperand(0));
6212 unsigned IdxVal = WidenNumElts - VTNumElts;
6225 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
6228 assert((IdxVal % GCD) == 0 &&
"Expected Idx to be a multiple of the broken "
6229 "down type's element count");
6232 for (; i < VTNumElts / GCD; ++i)
6235 DAG.getVectorIdxConstant(IdxVal + i * GCD, dl)));
6236 for (; i < WidenNumElts / GCD; ++i)
6245 for (
unsigned i = 0; i != VTNumElts; ++i) {
6246 Mask.push_back(IdxVal + i);
6248 for (
unsigned i = VTNumElts; i != WidenNumElts; ++i)
6251 return DAG.getVectorShuffle(WidenVT, dl, ReverseVal, DAG.getUNDEF(WidenVT),
6256 assert(
N->getValueType(0).isVector() &&
6257 N->getOperand(0).getValueType().isVector() &&
6258 "Operands must be vectors");
6272 SDValue SplitVSetCC = SplitVecOp_VSETCC(
N);
6273 SDValue Res = ModifyToType(SplitVSetCC, WidenVT);
6280 InOp1 = GetWidenedVector(InOp1);
6281 InOp2 = GetWidenedVector(InOp2);
6283 InOp1 = DAG.WidenVector(InOp1,
SDLoc(
N));
6284 InOp2 = DAG.WidenVector(InOp2,
SDLoc(
N));
6291 "Input not widened to expected type!");
6293 if (
N->getOpcode() == ISD::VP_SETCC) {
6296 return DAG.getNode(ISD::VP_SETCC,
SDLoc(
N), WidenVT, InOp1, InOp2,
6297 N->getOperand(2), Mask,
N->getOperand(4));
6304 assert(
N->getValueType(0).isVector() &&
6305 N->getOperand(1).getValueType().isVector() &&
6306 "Operands must be vectors");
6307 EVT VT =
N->getValueType(0);
6318 EVT TmpEltVT =
LHS.getValueType().getVectorElementType();
6323 for (
unsigned i = 0; i != NumElts; ++i) {
6325 DAG.getVectorIdxConstant(i, dl));
6327 DAG.getVectorIdxConstant(i, dl));
6329 Scalars[i] = DAG.getNode(
N->getOpcode(), dl, {MVT::i1, MVT::Other},
6330 {Chain, LHSElem, RHSElem, CC});
6331 Chains[i] = Scalars[i].getValue(1);
6332 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
6333 DAG.getBoolConstant(
true, dl, EltVT, VT),
6334 DAG.getBoolConstant(
false, dl, EltVT, VT));
6338 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6340 return DAG.getBuildVector(WidenVT, dl, Scalars);
6346bool DAGTypeLegalizer::WidenVectorOperand(
SDNode *
N,
unsigned OpNo) {
6347 LLVM_DEBUG(
dbgs() <<
"Widen node operand " << OpNo <<
": ";
N->dump(&DAG));
6351 if (CustomLowerNode(
N,
N->getOperand(OpNo).getValueType(),
false))
6354 switch (
N->getOpcode()) {
6357 dbgs() <<
"WidenVectorOperand op #" << OpNo <<
": ";
6368 case ISD::STORE: Res = WidenVecOp_STORE(
N);
break;
6369 case ISD::VP_STORE: Res = WidenVecOp_VP_STORE(
N, OpNo);
break;
6370 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
6371 Res = WidenVecOp_VP_STRIDED_STORE(
N, OpNo);
6376 Res = WidenVecOp_EXTEND_VECTOR_INREG(
N);
6378 case ISD::MSTORE: Res = WidenVecOp_MSTORE(
N, OpNo);
break;
6379 case ISD::MGATHER: Res = WidenVecOp_MGATHER(
N, OpNo);
break;
6381 case ISD::VP_SCATTER: Res = WidenVecOp_VP_SCATTER(
N, OpNo);
break;
6382 case ISD::SETCC: Res = WidenVecOp_SETCC(
N);
break;
6390 Res = WidenVecOp_UnrollVectorOp(
N);
6397 Res = WidenVecOp_EXTEND(
N);
6402 Res = WidenVecOp_CMP(
N);
6418 Res = WidenVecOp_Convert(
N);
6423 Res = WidenVecOp_FP_TO_XINT_SAT(
N);
6426 case ISD::EXPERIMENTAL_VP_SPLAT:
6427 Res = WidenVecOp_VP_SPLAT(
N, OpNo);
6445 Res = WidenVecOp_VECREDUCE(
N);
6449 Res = WidenVecOp_VECREDUCE_SEQ(
N);
6451 case ISD::VP_REDUCE_FADD:
6452 case ISD::VP_REDUCE_SEQ_FADD:
6453 case ISD::VP_REDUCE_FMUL:
6454 case ISD::VP_REDUCE_SEQ_FMUL:
6455 case ISD::VP_REDUCE_ADD:
6456 case ISD::VP_REDUCE_MUL:
6457 case ISD::VP_REDUCE_AND:
6458 case ISD::VP_REDUCE_OR:
6459 case ISD::VP_REDUCE_XOR:
6460 case ISD::VP_REDUCE_SMAX:
6461 case ISD::VP_REDUCE_SMIN:
6462 case ISD::VP_REDUCE_UMAX:
6463 case ISD::VP_REDUCE_UMIN:
6464 case ISD::VP_REDUCE_FMAX:
6465 case ISD::VP_REDUCE_FMIN:
6466 case ISD::VP_REDUCE_FMAXIMUM:
6467 case ISD::VP_REDUCE_FMINIMUM:
6468 Res = WidenVecOp_VP_REDUCE(
N);
6470 case ISD::VP_CTTZ_ELTS:
6471 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
6472 Res = WidenVecOp_VP_CttzElements(
N);
6477 if (!Res.
getNode())
return false;
6485 if (
N->isStrictFPOpcode())
6487 "Invalid operand expansion");
6490 "Invalid operand expansion");
6492 ReplaceValueWith(
SDValue(
N, 0), Res);
6498 EVT VT =
N->getValueType(0);
6503 "Unexpected type action");
6504 InOp = GetWidenedVector(InOp);
6507 "Input wasn't widened!");
6518 FixedEltVT == InEltVT) {
6520 "Not enough elements in the fixed type for the operand!");
6522 "We can't have the same type as we started with!");
6525 DAG.getUNDEF(FixedVT), InOp,
6526 DAG.getVectorIdxConstant(0,
DL));
6529 DAG.getVectorIdxConstant(0,
DL));
6538 return WidenVecOp_Convert(
N);
6543 switch (
N->getOpcode()) {
6558 EVT OpVT =
N->getOperand(0).getValueType();
6559 EVT ResVT =
N->getValueType(0);
6567 DAG.getVectorIdxConstant(0, dl));
6569 DAG.getVectorIdxConstant(0, dl));
6575 LHS = DAG.getNode(ExtendOpcode, dl, ResVT, LHS);
6576 RHS = DAG.getNode(ExtendOpcode, dl, ResVT, RHS);
6578 return DAG.getNode(
N->getOpcode(), dl, ResVT, LHS, RHS);
6585 return DAG.UnrollVectorOp(
N);
6590 EVT ResultVT =
N->getValueType(0);
6592 SDValue WideArg = GetWidenedVector(
N->getOperand(0));
6601 {WideArg,
Test},
N->getFlags());
6608 DAG.getVectorIdxConstant(0,
DL));
6610 EVT OpVT =
N->getOperand(0).getValueType();
6613 return DAG.getNode(ExtendCode,
DL, ResultVT,
CC);
6618 EVT VT =
N->getValueType(0);
6621 SDValue InOp =
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0);
6624 "Unexpected type action");
6625 InOp = GetWidenedVector(InOp);
6627 unsigned Opcode =
N->getOpcode();
6633 if (TLI.
isTypeLegal(WideVT) && !
N->isStrictFPOpcode()) {
6635 if (
N->isStrictFPOpcode()) {
6637 Res = DAG.
getNode(Opcode, dl, { WideVT, MVT::Other },
6638 {
N->getOperand(0), InOp,
N->getOperand(2) });
6640 Res = DAG.
getNode(Opcode, dl, { WideVT, MVT::Other },
6641 {
N->getOperand(0), InOp });
6647 Res = DAG.
getNode(Opcode, dl, WideVT, InOp,
N->getOperand(1));
6649 Res = DAG.
getNode(Opcode, dl, WideVT, InOp);
6652 DAG.getVectorIdxConstant(0, dl));
6660 if (
N->isStrictFPOpcode()) {
6663 for (
unsigned i=0; i < NumElts; ++i) {
6665 DAG.getVectorIdxConstant(i, dl));
6666 Ops[i] = DAG.getNode(Opcode, dl, { EltVT, MVT::Other }, NewOps);
6670 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6672 for (
unsigned i = 0; i < NumElts; ++i)
6673 Ops[i] = DAG.getNode(Opcode, dl, EltVT,
6675 InOp, DAG.getVectorIdxConstant(i, dl)));
6678 return DAG.getBuildVector(VT, dl, Ops);
6682 EVT DstVT =
N->getValueType(0);
6683 SDValue Src = GetWidenedVector(
N->getOperand(0));
6684 EVT SrcVT = Src.getValueType();
6693 DAG.
getNode(
N->getOpcode(), dl, WideDstVT, Src,
N->getOperand(1));
6696 DAG.getConstant(0, dl, TLI.
getVectorIdxTy(DAG.getDataLayout())));
6700 return DAG.UnrollVectorOp(
N);
6704 EVT VT =
N->getValueType(0);
6705 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6713 if (!VT.
isVector() && VT != MVT::x86mmx &&
6720 DAG.getVectorIdxConstant(0, dl));
6734 .divideCoefficientBy(EltSize);
6739 DAG.getVectorIdxConstant(0, dl));
6744 return CreateStackStoreLoad(InOp, VT);
6748 EVT VT =
N->getValueType(0);
6750 EVT InVT =
N->getOperand(0).getValueType();
6755 unsigned NumOperands =
N->getNumOperands();
6758 for (i = 1; i < NumOperands; ++i)
6759 if (!
N->getOperand(i).isUndef())
6762 if (i == NumOperands)
6763 return GetWidenedVector(
N->getOperand(0));
6773 for (
unsigned i=0; i < NumOperands; ++i) {
6777 "Unexpected type action");
6778 InOp = GetWidenedVector(InOp);
6779 for (
unsigned j = 0;
j < NumInElts; ++
j)
6781 DAG.getVectorIdxConstant(j, dl));
6783 return DAG.getBuildVector(VT, dl, Ops);
6786SDValue DAGTypeLegalizer::WidenVecOp_INSERT_SUBVECTOR(
SDNode *
N) {
6787 EVT VT =
N->getValueType(0);
6792 SubVec = GetWidenedVector(SubVec);
6798 bool IndicesValid =
false;
6801 IndicesValid =
true;
6805 Attribute Attr = DAG.getMachineFunction().getFunction().getFnAttribute(
6806 Attribute::VScaleRange);
6811 IndicesValid =
true;
6817 if (IndicesValid && InVec.
isUndef() &&
N->getConstantOperandVal(2) == 0)
6822 "INSERT_SUBVECTOR");
6825SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_SUBVECTOR(
SDNode *
N) {
6826 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6828 N->getValueType(0), InOp,
N->getOperand(1));
6831SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
6832 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6834 N->getValueType(0), InOp,
N->getOperand(1));
6837SDValue DAGTypeLegalizer::WidenVecOp_EXTEND_VECTOR_INREG(
SDNode *
N) {
6838 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6839 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
N->getValueType(0), InOp);
6847 if (!
ST->getMemoryVT().getScalarType().isByteSized())
6850 if (
ST->isTruncatingStore())
6869 StVal = GetWidenedVector(StVal);
6873 return DAG.getStoreVP(
ST->getChain(),
DL, StVal,
ST->getBasePtr(),
6874 DAG.getUNDEF(
ST->getBasePtr().getValueType()), Mask,
6875 EVL, StVT,
ST->getMemOperand(),
6876 ST->getAddressingMode());
6880 if (GenWidenVectorStores(StChain, ST)) {
6881 if (StChain.
size() == 1)
6890SDValue DAGTypeLegalizer::WidenVecOp_VP_SPLAT(
SDNode *
N,
unsigned OpNo) {
6891 assert(OpNo == 1 &&
"Can widen only mask operand of vp_splat");
6892 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
N->getValueType(0),
6893 N->getOperand(0), GetWidenedVector(
N->getOperand(1)),
6897SDValue DAGTypeLegalizer::WidenVecOp_VP_STORE(
SDNode *
N,
unsigned OpNo) {
6898 assert((OpNo == 1 || OpNo == 3) &&
6899 "Can widen only data or mask operand of vp_store");
6907 StVal = GetWidenedVector(StVal);
6913 "Unable to widen VP store");
6914 Mask = GetWidenedVector(Mask);
6916 Mask = GetWidenedVector(Mask);
6922 "Unable to widen VP store");
6923 StVal = GetWidenedVector(StVal);
6926 assert(
Mask.getValueType().getVectorElementCount() ==
6928 "Mask and data vectors should have the same number of elements");
6929 return DAG.getStoreVP(
ST->getChain(), dl, StVal,
ST->getBasePtr(),
6930 ST->getOffset(), Mask,
ST->getVectorLength(),
6931 ST->getMemoryVT(),
ST->getMemOperand(),
6932 ST->getAddressingMode(),
ST->isTruncatingStore(),
6933 ST->isCompressingStore());
6938 assert((OpNo == 1 || OpNo == 4) &&
6939 "Can widen only data or mask operand of vp_strided_store");
6948 "Unable to widen VP strided store");
6952 "Unable to widen VP strided store");
6954 StVal = GetWidenedVector(StVal);
6955 Mask = GetWidenedVector(Mask);
6958 Mask.getValueType().getVectorElementCount() &&
6959 "Data and mask vectors should have the same number of elements");
6961 return DAG.getStridedStoreVP(
6968SDValue DAGTypeLegalizer::WidenVecOp_MSTORE(
SDNode *
N,
unsigned OpNo) {
6969 assert((OpNo == 1 || OpNo == 4) &&
6970 "Can widen only data or mask operand of mstore");
6973 EVT MaskVT =
Mask.getValueType();
6979 StVal = GetWidenedVector(StVal);
6986 Mask = ModifyToType(Mask, WideMaskVT,
true);
6990 Mask = ModifyToType(Mask, WideMaskVT,
true);
6996 StVal = ModifyToType(StVal, WideVT);
6999 assert(
Mask.getValueType().getVectorNumElements() ==
7001 "Mask and data vectors should have the same number of elements");
7008SDValue DAGTypeLegalizer::WidenVecOp_MGATHER(
SDNode *
N,
unsigned OpNo) {
7009 assert(OpNo == 4 &&
"Can widen only the index of mgather");
7010 auto *MG = cast<MaskedGatherSDNode>(
N);
7011 SDValue DataOp = MG->getPassThru();
7013 SDValue Scale = MG->getScale();
7021 SDValue Res = DAG.getMaskedGather(MG->getVTList(), MG->getMemoryVT(), dl, Ops,
7022 MG->getMemOperand(), MG->getIndexType(),
7023 MG->getExtensionType());
7029SDValue DAGTypeLegalizer::WidenVecOp_MSCATTER(
SDNode *
N,
unsigned OpNo) {
7038 DataOp = GetWidenedVector(DataOp);
7042 EVT IndexVT =
Index.getValueType();
7048 EVT MaskVT =
Mask.getValueType();
7051 Mask = ModifyToType(Mask, WideMaskVT,
true);
7056 }
else if (OpNo == 4) {
7064 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), WideMemVT,
SDLoc(
N),
7069SDValue DAGTypeLegalizer::WidenVecOp_VP_SCATTER(
SDNode *
N,
unsigned OpNo) {
7078 DataOp = GetWidenedVector(DataOp);
7081 Mask = GetWidenedMask(Mask, WideEC);
7084 }
else if (OpNo == 3) {
7093 return DAG.getScatterVP(DAG.getVTList(MVT::Other), WideMemVT,
SDLoc(
N), Ops,
7098 SDValue InOp0 = GetWidenedVector(
N->getOperand(0));
7099 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
7101 EVT VT =
N->getValueType(0);
7116 SVT, InOp0, InOp1,
N->getOperand(2));
7123 DAG.getVectorIdxConstant(0, dl));
7125 EVT OpVT =
N->getOperand(0).getValueType();
7128 return DAG.getNode(ExtendCode, dl, VT,
CC);
7138 EVT VT =
N->getValueType(0);
7140 EVT TmpEltVT =
LHS.getValueType().getVectorElementType();
7147 for (
unsigned i = 0; i != NumElts; ++i) {
7149 DAG.getVectorIdxConstant(i, dl));
7151 DAG.getVectorIdxConstant(i, dl));
7153 Scalars[i] = DAG.getNode(
N->getOpcode(), dl, {MVT::i1, MVT::Other},
7154 {Chain, LHSElem, RHSElem, CC});
7155 Chains[i] = Scalars[i].getValue(1);
7156 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
7157 DAG.getBoolConstant(
true, dl, EltVT, VT),
7158 DAG.getBoolConstant(
false, dl, EltVT, VT));
7162 ReplaceValueWith(
SDValue(
N, 1), NewChain);
7164 return DAG.getBuildVector(VT, dl, Scalars);
7169 SDValue Op = GetWidenedVector(
N->getOperand(0));
7170 EVT OrigVT =
N->getOperand(0).getValueType();
7171 EVT WideVT =
Op.getValueType();
7175 unsigned Opc =
N->getOpcode();
7177 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
7178 assert(NeutralElem &&
"Neutral element must exist");
7185 unsigned GCD = std::gcd(OrigElts, WideElts);
7188 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
7189 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx =
Idx + GCD)
7191 DAG.getVectorIdxConstant(
Idx, dl));
7192 return DAG.getNode(Opc, dl,
N->getValueType(0),
Op, Flags);
7195 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx++)
7197 DAG.getVectorIdxConstant(
Idx, dl));
7199 return DAG.getNode(Opc, dl,
N->getValueType(0),
Op, Flags);
7209 EVT WideVT =
Op.getValueType();
7213 unsigned Opc =
N->getOpcode();
7215 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
7222 unsigned GCD = std::gcd(OrigElts, WideElts);
7225 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
7226 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx =
Idx + GCD)
7228 DAG.getVectorIdxConstant(
Idx, dl));
7229 return DAG.getNode(Opc, dl,
N->getValueType(0), AccOp,
Op, Flags);
7232 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx++)
7234 DAG.getVectorIdxConstant(
Idx, dl));
7236 return DAG.getNode(Opc, dl,
N->getValueType(0), AccOp,
Op, Flags);
7240 assert(
N->isVPOpcode() &&
"Expected VP opcode");
7243 SDValue Op = GetWidenedVector(
N->getOperand(1));
7245 Op.getValueType().getVectorElementCount());
7247 return DAG.getNode(
N->getOpcode(), dl,
N->getValueType(0),
7248 {N->getOperand(0), Op, Mask, N->getOperand(3)},
7256 EVT VT =
N->getValueType(0);
7267 DAG.getVectorIdxConstant(0,
DL));
7277 return DAG.getNode(
N->getOpcode(),
DL,
N->getValueType(0),
7278 {Source, Mask, N->getOperand(2)},
N->getFlags());
7295 unsigned WidenEx = 0) {
7300 unsigned AlignInBits =
Align*8;
7303 EVT RetVT = WidenEltVT;
7304 if (!Scalable && Width == WidenEltWidth)
7318 (WidenWidth % MemVTWidth) == 0 &&
7320 (MemVTWidth <= Width ||
7321 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
7322 if (MemVTWidth == WidenWidth)
7341 (WidenWidth % MemVTWidth) == 0 &&
7343 (MemVTWidth <= Width ||
7344 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
7353 return std::nullopt;
7364 unsigned Start,
unsigned End) {
7365 SDLoc dl(LdOps[Start]);
7366 EVT LdTy = LdOps[Start].getValueType();
7374 for (
unsigned i = Start + 1; i !=
End; ++i) {
7375 EVT NewLdTy = LdOps[i].getValueType();
7376 if (NewLdTy != LdTy) {
7397 EVT LdVT =
LD->getMemoryVT();
7411 TypeSize WidthDiff = WidenWidth - LdWidth;
7418 std::optional<EVT> FirstVT =
7419 findMemType(DAG, TLI, LdWidth.getKnownMinValue(), WidenVT, LdAlign,
7426 TypeSize FirstVTWidth = FirstVT->getSizeInBits();
7431 std::optional<EVT> NewVT = FirstVT;
7433 TypeSize NewVTWidth = FirstVTWidth;
7435 RemainingWidth -= NewVTWidth;
7442 NewVTWidth = NewVT->getSizeInBits();
7448 SDValue LdOp = DAG.getLoad(*FirstVT, dl, Chain, BasePtr,
LD->getPointerInfo(),
7449 LD->getOriginalAlign(), MMOFlags, AAInfo);
7453 if (MemVTs.
empty()) {
7455 if (!FirstVT->isVector()) {
7462 if (FirstVT == WidenVT)
7467 unsigned NumConcat =
7470 SDValue UndefVal = DAG.getUNDEF(*FirstVT);
7471 ConcatOps[0] = LdOp;
7472 for (
unsigned i = 1; i != NumConcat; ++i)
7473 ConcatOps[i] = UndefVal;
7485 IncrementPointer(cast<LoadSDNode>(LdOp), *FirstVT, MPI, BasePtr,
7488 for (
EVT MemVT : MemVTs) {
7489 Align NewAlign = ScaledOffset == 0
7490 ?
LD->getOriginalAlign()
7493 DAG.getLoad(MemVT, dl, Chain, BasePtr, MPI, NewAlign, MMOFlags, AAInfo);
7497 IncrementPointer(cast<LoadSDNode>(L), MemVT, MPI, BasePtr, &ScaledOffset);
7502 if (!LdOps[0].getValueType().
isVector())
7512 EVT LdTy = LdOps[i].getValueType();
7515 for (--i; i >= 0; --i) {
7516 LdTy = LdOps[i].getValueType();
7523 ConcatOps[--
Idx] = LdOps[i];
7524 for (--i; i >= 0; --i) {
7525 EVT NewLdTy = LdOps[i].getValueType();
7526 if (NewLdTy != LdTy) {
7537 WidenOps[j] = ConcatOps[
Idx+j];
7538 for (;
j != NumOps; ++
j)
7539 WidenOps[j] = DAG.getUNDEF(LdTy);
7546 ConcatOps[--
Idx] = LdOps[i];
7557 SDValue UndefVal = DAG.getUNDEF(LdTy);
7560 for (; i !=
End-
Idx; ++i)
7561 WidenOps[i] = ConcatOps[
Idx+i];
7562 for (; i != NumOps; ++i)
7563 WidenOps[i] = UndefVal;
7575 EVT LdVT =
LD->getMemoryVT();
7588 "not yet supported");
7599 DAG.getExtLoad(ExtType, dl, EltVT, Chain, BasePtr,
LD->getPointerInfo(),
7600 LdEltVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
7606 Ops[i] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, NewBasePtr,
7607 LD->getPointerInfo().getWithOffset(
Offset), LdEltVT,
7608 LD->getOriginalAlign(), MMOFlags, AAInfo);
7613 SDValue UndefVal = DAG.getUNDEF(EltVT);
7614 for (; i != WidenNumElts; ++i)
7617 return DAG.getBuildVector(WidenVT, dl, Ops);
7629 SDValue ValOp = GetWidenedVector(
ST->getValue());
7632 EVT StVT =
ST->getMemoryVT();
7640 "Mismatch between store and value types");
7654 std::optional<EVT> NewVT =
7659 TypeSize NewVTWidth = NewVT->getSizeInBits();
7662 StWidth -= NewVTWidth;
7663 MemVTs.
back().second++;
7667 for (
const auto &Pair : MemVTs) {
7668 EVT NewVT = Pair.first;
7669 unsigned Count = Pair.second;
7675 Align NewAlign = ScaledOffset == 0
7676 ?
ST->getOriginalAlign()
7679 DAG.getVectorIdxConstant(
Idx, dl));
7680 SDValue PartStore = DAG.getStore(Chain, dl, EOp, BasePtr, MPI, NewAlign,
7685 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr,
7697 DAG.getVectorIdxConstant(
Idx++, dl));
7699 DAG.getStore(Chain, dl, EOp, BasePtr, MPI,
ST->getOriginalAlign(),
7703 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr);
7717 bool FillWithZeroes) {
7722 "input and widen element type must match");
7724 "cannot modify scalable vectors in this way");
7736 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, InVT) :
7739 for (
unsigned i = 1; i != NumConcat; ++i)
7747 DAG.getVectorIdxConstant(0, dl));
7750 "Scalable vectors should have been handled already.");
7758 unsigned MinNumElts = std::min(WidenNumElts, InNumElts);
7762 DAG.getVectorIdxConstant(
Idx, dl));
7764 SDValue UndefVal = DAG.getUNDEF(EltVT);
7765 for (;
Idx < WidenNumElts; ++
Idx)
7766 Ops[
Idx] = UndefVal;
7768 SDValue Widened = DAG.getBuildVector(NVT, dl, Ops);
7769 if (!FillWithZeroes)
7773 "We expect to never want to FillWithZeroes for non-integral types.");
7776 MaskOps.append(MinNumElts, DAG.getAllOnesConstant(dl, EltVT));
7777 MaskOps.append(WidenNumElts - MinNumElts, DAG.getConstant(0, dl, EltVT));
7779 return DAG.getNode(
ISD::AND, dl, NVT, Widened,
7780 DAG.getBuildVector(NVT, dl,
MaskOps));
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
amdgpu AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static bool isUndef(ArrayRef< int > Mask)
static SDValue BuildVectorFromScalar(SelectionDAG &DAG, EVT VecTy, SmallVectorImpl< SDValue > &LdOps, unsigned Start, unsigned End)
static EVT getSETCCOperandType(SDValue N)
static bool isSETCCOp(unsigned Opcode)
static bool isLogicalMaskOp(unsigned Opcode)
static bool isSETCCorConvertedSETCC(SDValue N)
static SDValue CollectOpsToWiden(SelectionDAG &DAG, const TargetLowering &TLI, SmallVectorImpl< SDValue > &ConcatOps, unsigned ConcatEnd, EVT VT, EVT MaxVT, EVT WidenVT)
static std::optional< EVT > findMemType(SelectionDAG &DAG, const TargetLowering &TLI, unsigned Width, EVT WidenVT, unsigned Align=0, unsigned WidenEx=0)
mir Rename Register Operands
This file provides utility analysis objects describing memory locations.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements the SmallBitVector class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Class for arbitrary precision integers.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
This class represents an Operation in the Expression.
static constexpr ElementCount getScalable(ScalarTy MinVal)
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
static auto integer_valuetypes()
static auto vector_valuetypes()
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
This class implements a map that also provides access to all stored values in a deterministic order.
This class is used to represent an MGATHER node.
const SDValue & getIndex() const
const SDValue & getScale() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
This class is used to represent an MLOAD node.
const SDValue & getBasePtr() const
bool isExpandingLoad() const
ISD::LoadExtType getExtensionType() const
const SDValue & getMask() const
const SDValue & getPassThru() const
const SDValue & getOffset() const
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent an MSCATTER node.
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if the op does a truncation before store.
This class is used to represent an MSTORE node.
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
const SDValue & getOffset() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
This is an abstract virtual class for memory operations.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
Align getOriginalAlign() const
Returns alignment and volatility of the memory access.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isStrictFPOpcode()
Test if this node is a strict floating point pseudo-op.
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
std::pair< SDValue, SDValue > SplitVectorOperand(const SDNode *N, unsigned OpNo)
Split the node's operand with EXTRACT_SUBVECTOR and return the low/high part.
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
std::pair< EVT, EVT > GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, bool *HiIsEmpty) const
Compute the VTs needed for the low/hi parts of a type, dependent on an enveloping VT that has been sp...
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
Vector takeVector()
Clear the SetVector and return the underlying vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
@ TypeScalarizeScalableVector
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::VECTOR_SPLICE.
SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, EVT SubVecVT, SDValue Index) const
Get a pointer to a sub-vector of type SubVecVT at index Idx located in memory for a vector of type Ve...
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored.
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
SDValue expandVECTOR_COMPRESS(SDNode *Node, SelectionDAG &DAG) const
Expand a vector VECTOR_COMPRESS into a sequence of extract element, store temporarily,...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
This class is used to represent an VP_GATHER node.
const SDValue & getScale() const
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
const SDValue & getVectorLength() const
const SDValue & getIndex() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
This class is used to represent a VP_LOAD node.
This class is used to represent an VP_SCATTER node.
const SDValue & getValue() const
This class is used to represent a VP_STORE node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
const SDValue & getMask() const
ISD::LoadExtType getExtensionType() const
bool isExpandingLoad() const
const SDValue & getStride() const
const SDValue & getOffset() const
const SDValue & getVectorLength() const
const SDValue & getBasePtr() const
This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if this is a truncating store.
const SDValue & getOffset() const
const SDValue & getVectorLength() const
const SDValue & getStride() const
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
LLVM Value Representation.
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isNonZero() const
constexpr ScalarTy getKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns a value X where RHS.multiplyCoefficientBy(X) will result in a value whose quantity matches ou...
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isKnownEven() const
A return value of true indicates we know at compile time that the number of elements (vscale * Min) i...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ MLOAD
Masked load and store - consecutive vector load and store operations with additional mask operand tha...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ FPTRUNC_ROUND
FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SSUBO
Same for subtraction.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the same...
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ MGATHER
Masked gather and scatter - load and store operations for a vector of random addresses with additiona...
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the sa...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
bool isUNINDEXEDLoad(const SDNode *N)
Returns true if the specified node is an unindexed load.
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
LegalityPredicate isVector(unsigned TypeIdx)
True iff the specified type index is a vector.
This is an optimization pass for GlobalISel generic memory operations.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
auto reverse(ContainerTy &&C)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr int PoisonMaskElem
void processShuffleMasks(ArrayRef< int > Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs, unsigned NumOfUsedRegs, function_ref< void()> NoInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> SingleInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> ManyInputsAction)
Splits and processes shuffle mask depending on the number of input and output registers.
DWARFExpression::Operation Op
OutputIt copy(R &&Range, OutputIt Out)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and the bit indexes (Mask) nee...
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
EVT changeElementType(EVT EltVT) const
Return a VT for a type whose attributes match ourselves with the exception of the element type that i...
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
EVT widenIntegerVectorElementType(LLVMContext &Context) const
Return a VT for an integer vector type with the size of the elements doubled.
bool isFixedLengthVector() const
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
EVT getRoundIntegerType(LLVMContext &Context) const
Rounds the bit-width of the given integer EVT up to the nearest power of two (and at least to eight),...
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
bool knownBitsGE(EVT VT) const
Return true if we know at compile time this has more than or the same bits as VT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
This class contains a discriminated union of information about pointers in memory operands,...
unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.