34#define DEBUG_TYPE "legalize-types"
40void DAGTypeLegalizer::ScalarizeVectorResult(
SDNode *
N,
unsigned ResNo) {
45 switch (
N->getOpcode()) {
48 dbgs() <<
"ScalarizeVectorResult #" << ResNo <<
": ";
60 case ISD::FPOWI: R = ScalarizeVecRes_ExpOp(
N);
break;
62 case ISD::LOAD: R = ScalarizeVecRes_LOAD(cast<LoadSDNode>(
N));
break;
68 case ISD::SETCC: R = ScalarizeVecRes_SETCC(
N);
break;
69 case ISD::UNDEF: R = ScalarizeVecRes_UNDEF(
N);
break;
75 R = ScalarizeVecRes_VecInregOp(
N);
117 R = ScalarizeVecRes_UnaryOp(
N);
120 R = ScalarizeVecRes_FFREXP(
N, ResNo);
165 R = ScalarizeVecRes_BinOp(
N);
170 R = ScalarizeVecRes_TernaryOp(
N);
173#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
174 case ISD::STRICT_##DAGN:
175#include "llvm/IR/ConstrainedOps.def"
176 R = ScalarizeVecRes_StrictFPOp(
N);
181 R = ScalarizeVecRes_FP_TO_XINT_SAT(
N);
190 R = ScalarizeVecRes_OverflowOp(
N, ResNo);
200 R = ScalarizeVecRes_FIX(
N);
206 SetScalarizedVector(
SDValue(
N, ResNo), R);
210 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
211 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
213 LHS.getValueType(), LHS, RHS,
N->getFlags());
217 SDValue Op0 = GetScalarizedVector(
N->getOperand(0));
218 SDValue Op1 = GetScalarizedVector(
N->getOperand(1));
219 SDValue Op2 = GetScalarizedVector(
N->getOperand(2));
225 SDValue Op0 = GetScalarizedVector(
N->getOperand(0));
226 SDValue Op1 = GetScalarizedVector(
N->getOperand(1));
232SDValue DAGTypeLegalizer::ScalarizeVecRes_FFREXP(
SDNode *
N,
unsigned ResNo) {
233 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
234 "Unexpected vector type!");
235 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
237 EVT VT0 =
N->getValueType(0);
238 EVT VT1 =
N->getValueType(1);
243 {VT0.getScalarType(), VT1.getScalarType()}, Elt)
247 unsigned OtherNo = 1 - ResNo;
248 EVT OtherVT =
N->getValueType(OtherNo);
250 SetScalarizedVector(
SDValue(
N, OtherNo),
SDValue(ScalarNode, OtherNo));
254 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
257 return SDValue(ScalarNode, ResNo);
261 EVT VT =
N->getValueType(0).getVectorElementType();
262 unsigned NumOpers =
N->getNumOperands();
264 EVT ValueVTs[] = {VT, MVT::Other};
273 for (
unsigned i = 1; i < NumOpers; ++i) {
279 Oper = GetScalarizedVector(Oper);
290 Opers,
N->getFlags());
301 EVT ResVT =
N->getValueType(0);
302 EVT OvVT =
N->getValueType(1);
306 ScalarLHS = GetScalarizedVector(
N->getOperand(0));
307 ScalarRHS = GetScalarizedVector(
N->getOperand(1));
312 ScalarLHS = ElemsLHS[0];
313 ScalarRHS = ElemsRHS[0];
319 N->getOpcode(),
DL, ScalarVTs, ScalarLHS, ScalarRHS).
getNode();
323 unsigned OtherNo = 1 - ResNo;
324 EVT OtherVT =
N->getValueType(OtherNo);
326 SetScalarizedVector(
SDValue(
N, OtherNo),
SDValue(ScalarNode, OtherNo));
330 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
333 return SDValue(ScalarNode, ResNo);
338 SDValue Op = DisintegrateMERGE_VALUES(
N, ResNo);
339 return GetScalarizedVector(
Op);
344 if (
Op.getValueType().isVector()
345 &&
Op.getValueType().getVectorNumElements() == 1
346 && !isSimpleLegalType(
Op.getValueType()))
347 Op = GetScalarizedVector(
Op);
348 EVT NewVT =
N->getValueType(0).getVectorElementType();
353SDValue DAGTypeLegalizer::ScalarizeVecRes_BUILD_VECTOR(
SDNode *
N) {
354 EVT EltVT =
N->getValueType(0).getVectorElementType();
363SDValue DAGTypeLegalizer::ScalarizeVecRes_EXTRACT_SUBVECTOR(
SDNode *
N) {
365 N->getValueType(0).getVectorElementType(),
366 N->getOperand(0),
N->getOperand(1));
372 EVT OpVT =
Op.getValueType();
376 Op = GetScalarizedVector(
Op);
383 N->getValueType(0).getVectorElementType(),
Op,
388 SDValue Op = GetScalarizedVector(
N->getOperand(0));
393SDValue DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(
SDNode *
N) {
397 EVT EltVT =
N->getValueType(0).getVectorElementType();
398 if (
Op.getValueType() != EltVT)
405 assert(
N->isUnindexed() &&
"Indexed vector load?");
409 N->getValueType(0).getVectorElementType(),
SDLoc(
N),
N->getChain(),
410 N->getBasePtr(), DAG.
getUNDEF(
N->getBasePtr().getValueType()),
411 N->getPointerInfo(),
N->getMemoryVT().getVectorElementType(),
412 N->getOriginalAlign(),
N->getMemOperand()->getFlags(),
N->getAAInfo());
422 EVT DestVT =
N->getValueType(0).getVectorElementType();
424 EVT OpVT =
Op.getValueType();
434 Op = GetScalarizedVector(
Op);
444 EVT EltVT =
N->getValueType(0).getVectorElementType();
446 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
455 EVT OpVT =
Op.getValueType();
457 EVT EltVT =
N->getValueType(0).getVectorElementType();
460 Op = GetScalarizedVector(
Op);
466 switch (
N->getOpcode()) {
478SDValue DAGTypeLegalizer::ScalarizeVecRes_SCALAR_TO_VECTOR(
SDNode *
N) {
481 EVT EltVT =
N->getValueType(0).getVectorElementType();
490 EVT OpVT =
Cond.getValueType();
503 SDValue LHS = GetScalarizedVector(
N->getOperand(1));
517 EVT OpVT =
Cond->getOperand(0).getValueType();
524 EVT CondVT =
Cond.getValueType();
525 if (ScalarBool != VecBool) {
526 switch (ScalarBool) {
547 auto BoolVT = getSetCCResultType(CondVT);
548 if (BoolVT.bitsLT(CondVT))
553 GetScalarizedVector(
N->getOperand(2)));
557 SDValue LHS = GetScalarizedVector(
N->getOperand(1));
559 LHS.getValueType(),
N->getOperand(0), LHS,
560 GetScalarizedVector(
N->getOperand(2)));
564 SDValue LHS = GetScalarizedVector(
N->getOperand(2));
566 N->getOperand(0),
N->getOperand(1),
567 LHS, GetScalarizedVector(
N->getOperand(3)),
572 return DAG.
getUNDEF(
N->getValueType(0).getVectorElementType());
575SDValue DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(
SDNode *
N) {
577 SDValue Arg =
N->getOperand(2).getOperand(0);
579 return DAG.
getUNDEF(
N->getValueType(0).getVectorElementType());
580 unsigned Op = !cast<ConstantSDNode>(Arg)->isZero();
581 return GetScalarizedVector(
N->getOperand(
Op));
584SDValue DAGTypeLegalizer::ScalarizeVecRes_FP_TO_XINT_SAT(
SDNode *
N) {
586 EVT SrcVT = Src.getValueType();
591 Src = GetScalarizedVector(Src);
597 EVT DstVT =
N->getValueType(0).getVectorElementType();
598 return DAG.
getNode(
N->getOpcode(), dl, DstVT, Src,
N->getOperand(1));
602 assert(
N->getValueType(0).isVector() &&
603 N->getOperand(0).getValueType().isVector() &&
604 "Operand types must be vectors");
607 EVT OpVT =
LHS.getValueType();
608 EVT NVT =
N->getValueType(0).getVectorElementType();
613 LHS = GetScalarizedVector(LHS);
614 RHS = GetScalarizedVector(RHS);
630 return DAG.
getNode(ExtendCode,
DL, NVT, Res);
638 EVT ResultVT =
N->getValueType(0).getVectorElementType();
641 Arg = GetScalarizedVector(Arg);
654 return DAG.
getNode(ExtendCode,
DL, ResultVT, Res);
661bool DAGTypeLegalizer::ScalarizeVectorOperand(
SDNode *
N,
unsigned OpNo) {
666 switch (
N->getOpcode()) {
669 dbgs() <<
"ScalarizeVectorOperand Op #" << OpNo <<
": ";
676 Res = ScalarizeVecOp_BITCAST(
N);
688 Res = ScalarizeVecOp_UnaryOp(
N);
694 Res = ScalarizeVecOp_UnaryOp_StrictFP(
N);
697 Res = ScalarizeVecOp_CONCAT_VECTORS(
N);
700 Res = ScalarizeVecOp_EXTRACT_VECTOR_ELT(
N);
703 Res = ScalarizeVecOp_VSELECT(
N);
706 Res = ScalarizeVecOp_VSETCC(
N);
709 Res = ScalarizeVecOp_STORE(cast<StoreSDNode>(
N), OpNo);
712 Res = ScalarizeVecOp_STRICT_FP_ROUND(
N, OpNo);
715 Res = ScalarizeVecOp_FP_ROUND(
N, OpNo);
718 Res = ScalarizeVecOp_STRICT_FP_EXTEND(
N);
721 Res = ScalarizeVecOp_FP_EXTEND(
N);
738 Res = ScalarizeVecOp_VECREDUCE(
N);
742 Res = ScalarizeVecOp_VECREDUCE_SEQ(
N);
747 if (!Res.
getNode())
return false;
755 "Invalid operand expansion");
757 ReplaceValueWith(
SDValue(
N, 0), Res);
764 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
766 N->getValueType(0), Elt);
772 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
773 "Unexpected vector type!");
774 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
776 N->getValueType(0).getScalarType(), Elt);
784SDValue DAGTypeLegalizer::ScalarizeVecOp_UnaryOp_StrictFP(
SDNode *
N) {
785 assert(
N->getValueType(0).getVectorNumElements() == 1 &&
786 "Unexpected vector type!");
787 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
789 {
N->getValueType(0).getScalarType(), MVT::Other },
790 {
N->getOperand(0), Elt });
800 ReplaceValueWith(
SDValue(
N, 0), Res);
805SDValue DAGTypeLegalizer::ScalarizeVecOp_CONCAT_VECTORS(
SDNode *
N) {
807 for (
unsigned i = 0, e =
N->getNumOperands(); i < e; ++i)
808 Ops[i] = GetScalarizedVector(
N->getOperand(i));
814SDValue DAGTypeLegalizer::ScalarizeVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
815 EVT VT =
N->getValueType(0);
816 SDValue Res = GetScalarizedVector(
N->getOperand(0));
828 SDValue ScalarCond = GetScalarizedVector(
N->getOperand(0));
829 EVT VT =
N->getValueType(0);
839 assert(
N->getValueType(0).isVector() &&
840 N->getOperand(0).getValueType().isVector() &&
841 "Operand types must be vectors");
842 assert(
N->getValueType(0) == MVT::v1i1 &&
"Expected v1i1 type");
844 EVT VT =
N->getValueType(0);
845 SDValue LHS = GetScalarizedVector(
N->getOperand(0));
846 SDValue RHS = GetScalarizedVector(
N->getOperand(1));
848 EVT OpVT =
N->getOperand(0).getValueType();
860 Res = DAG.
getNode(ExtendCode,
DL, NVT, Res);
868 assert(
N->isUnindexed() &&
"Indexed store of one-element vector?");
869 assert(OpNo == 1 &&
"Do not know how to scalarize this operand!");
872 if (
N->isTruncatingStore())
874 N->getChain(), dl, GetScalarizedVector(
N->getOperand(1)),
875 N->getBasePtr(),
N->getPointerInfo(),
876 N->getMemoryVT().getVectorElementType(),
N->getOriginalAlign(),
877 N->getMemOperand()->getFlags(),
N->getAAInfo());
879 return DAG.
getStore(
N->getChain(), dl, GetScalarizedVector(
N->getOperand(1)),
880 N->getBasePtr(),
N->getPointerInfo(),
881 N->getOriginalAlign(),
N->getMemOperand()->getFlags(),
887SDValue DAGTypeLegalizer::ScalarizeVecOp_FP_ROUND(
SDNode *
N,
unsigned OpNo) {
888 assert(OpNo == 0 &&
"Wrong operand for scalarization!");
889 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
891 N->getValueType(0).getVectorElementType(), Elt,
896SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_ROUND(
SDNode *
N,
898 assert(OpNo == 1 &&
"Wrong operand for scalarization!");
899 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
903 {
N->getOperand(0), Elt,
N->getOperand(2) });
912 ReplaceValueWith(
SDValue(
N, 0), Res);
919 SDValue Elt = GetScalarizedVector(
N->getOperand(0));
921 N->getValueType(0).getVectorElementType(), Elt);
927SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_EXTEND(
SDNode *
N) {
928 SDValue Elt = GetScalarizedVector(
N->getOperand(1));
932 {
N->getOperand(0), Elt});
941 ReplaceValueWith(
SDValue(
N, 0), Res);
946 SDValue Res = GetScalarizedVector(
N->getOperand(0));
953SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE_SEQ(
SDNode *
N) {
961 AccOp,
Op,
N->getFlags());
972void DAGTypeLegalizer::SplitVectorResult(
SDNode *
N,
unsigned ResNo) {
977 if (CustomLowerNode(
N,
N->getValueType(ResNo),
true))
980 switch (
N->getOpcode()) {
983 dbgs() <<
"SplitVectorResult #" << ResNo <<
": ";
995 case ISD::VP_SELECT: SplitRes_Select(
N,
Lo,
Hi);
break;
1010 SplitVecRes_ScalarOp(
N,
Lo,
Hi);
1013 SplitVecRes_STEP_VECTOR(
N,
Lo,
Hi);
1017 SplitVecRes_LOAD(cast<LoadSDNode>(
N),
Lo,
Hi);
1020 SplitVecRes_VP_LOAD(cast<VPLoadSDNode>(
N),
Lo,
Hi);
1022 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
1023 SplitVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(
N),
Lo,
Hi);
1026 SplitVecRes_MLOAD(cast<MaskedLoadSDNode>(
N),
Lo,
Hi);
1029 case ISD::VP_GATHER:
1030 SplitVecRes_Gather(cast<MemSDNode>(
N),
Lo,
Hi,
true);
1034 SplitVecRes_SETCC(
N,
Lo,
Hi);
1037 SplitVecRes_VECTOR_REVERSE(
N,
Lo,
Hi);
1040 SplitVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(
N),
Lo,
Hi);
1043 SplitVecRes_VECTOR_SPLICE(
N,
Lo,
Hi);
1046 SplitVecRes_VECTOR_DEINTERLEAVE(
N);
1049 SplitVecRes_VECTOR_INTERLEAVE(
N);
1052 SplitVecRes_VAARG(
N,
Lo,
Hi);
1058 SplitVecRes_ExtVecInRegOp(
N,
Lo,
Hi);
1064 case ISD::VP_BITREVERSE:
1072 case ISD::VP_CTLZ_ZERO_UNDEF:
1074 case ISD::VP_CTTZ_ZERO_UNDEF:
1085 case ISD::VP_FFLOOR:
1090 case ISD::VP_FNEARBYINT:
1095 case ISD::VP_FP_EXTEND:
1097 case ISD::VP_FP_ROUND:
1099 case ISD::VP_FP_TO_SINT:
1101 case ISD::VP_FP_TO_UINT:
1107 case ISD::VP_LLRINT:
1109 case ISD::VP_FROUND:
1111 case ISD::VP_FROUNDEVEN:
1115 case ISD::VP_FROUNDTOZERO:
1117 case ISD::VP_SINT_TO_FP:
1119 case ISD::VP_TRUNCATE:
1121 case ISD::VP_UINT_TO_FP:
1123 SplitVecRes_UnaryOp(
N,
Lo,
Hi);
1126 SplitVecRes_FFREXP(
N, ResNo,
Lo,
Hi);
1132 case ISD::VP_SIGN_EXTEND:
1133 case ISD::VP_ZERO_EXTEND:
1134 SplitVecRes_ExtendOp(
N,
Lo,
Hi);
1148 case ISD::VP_FMINIMUM:
1150 case ISD::VP_FMAXIMUM:
1156 case ISD::OR:
case ISD::VP_OR:
1176 case ISD::VP_FCOPYSIGN:
1177 SplitVecRes_BinOp(
N,
Lo,
Hi);
1184 SplitVecRes_TernaryOp(
N,
Lo,
Hi);
1187#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1188 case ISD::STRICT_##DAGN:
1189#include "llvm/IR/ConstrainedOps.def"
1190 SplitVecRes_StrictFPOp(
N,
Lo,
Hi);
1195 SplitVecRes_FP_TO_XINT_SAT(
N,
Lo,
Hi);
1204 SplitVecRes_OverflowOp(
N, ResNo,
Lo,
Hi);
1214 SplitVecRes_FIX(
N,
Lo,
Hi);
1216 case ISD::EXPERIMENTAL_VP_REVERSE:
1217 SplitVecRes_VP_REVERSE(
N,
Lo,
Hi);
1226void DAGTypeLegalizer::IncrementPointer(
MemSDNode *
N,
EVT MemVT,
1235 DL,
Ptr.getValueType(),
1236 APInt(
Ptr.getValueSizeInBits().getFixedValue(), IncrementSize));
1238 Flags.setNoUnsignedWrap(
true);
1240 *ScaledOffset += IncrementSize;
1244 MPI =
N->getPointerInfo().getWithOffset(IncrementSize);
1250std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(
SDValue Mask) {
1251 return SplitMask(Mask,
SDLoc(Mask));
1254std::pair<SDValue, SDValue> DAGTypeLegalizer::SplitMask(
SDValue Mask,
1257 EVT MaskVT =
Mask.getValueType();
1259 GetSplitVector(Mask, MaskLo, MaskHi);
1262 return std::make_pair(MaskLo, MaskHi);
1267 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1269 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
1273 unsigned Opcode =
N->getOpcode();
1274 if (
N->getNumOperands() == 2) {
1280 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands!");
1281 assert(
N->isVPOpcode() &&
"Expected VP opcode");
1284 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(2));
1287 std::tie(EVLLo, EVLHi) =
1288 DAG.
SplitEVL(
N->getOperand(3),
N->getValueType(0), dl);
1291 {LHSLo, RHSLo, MaskLo, EVLLo}, Flags);
1293 {LHSHi, RHSHi, MaskHi, EVLHi}, Flags);
1299 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
1301 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
1303 GetSplitVector(
N->getOperand(2), Op2Lo, Op2Hi);
1307 unsigned Opcode =
N->getOpcode();
1308 if (
N->getNumOperands() == 3) {
1314 assert(
N->getNumOperands() == 5 &&
"Unexpected number of operands!");
1315 assert(
N->isVPOpcode() &&
"Expected VP opcode");
1318 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
1321 std::tie(EVLLo, EVLHi) =
1322 DAG.
SplitEVL(
N->getOperand(4),
N->getValueType(0), dl);
1325 {Op0Lo, Op1Lo, Op2Lo, MaskLo, EVLLo}, Flags);
1327 {Op0Hi, Op1Hi, Op2Hi, MaskHi, EVLHi}, Flags);
1332 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1334 GetSplitVector(
N->getOperand(1), RHSLo, RHSHi);
1338 unsigned Opcode =
N->getOpcode();
1357 switch (getTypeAction(InVT)) {
1372 GetExpandedOp(InOp,
Lo,
Hi);
1383 GetSplitVector(InOp,
Lo,
Hi);
1404 SplitInteger(BitConvertToInteger(InOp), LoIntVT, HiIntVT,
Lo,
Hi);
1427 assert(!(
N->getNumOperands() & 1) &&
"Unsupported CONCAT_VECTORS");
1429 unsigned NumSubvectors =
N->getNumOperands() / 2;
1430 if (NumSubvectors == 1) {
1431 Lo =
N->getOperand(0);
1432 Hi =
N->getOperand(1);
1446void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(
SDNode *
N,
SDValue &
Lo,
1468 GetSplitVector(Vec,
Lo,
Hi);
1471 EVT LoVT =
Lo.getValueType();
1480 unsigned IdxVal =
Idx->getAsZExtVal();
1481 if (IdxVal + SubElems <= LoElems) {
1489 IdxVal >= LoElems && IdxVal + SubElems <= VecElems) {
1515 Lo = DAG.
getLoad(
Lo.getValueType(), dl, Store, StackPtr, PtrInfo,
1519 auto *
Load = cast<LoadSDNode>(
Lo);
1521 IncrementPointer(Load, LoVT, MPI, StackPtr);
1524 Hi = DAG.
getLoad(
Hi.getValueType(), dl, Store, StackPtr, MPI, SmallestAlign);
1533 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1538 EVT RHSVT =
RHS.getValueType();
1541 GetSplitVector(RHS, RHSLo, RHSHi);
1558 SDValue FpValue =
N->getOperand(0);
1560 GetSplitVector(FpValue, ArgLo, ArgHi);
1573 GetSplitVector(
N->getOperand(0), LHSLo, LHSHi);
1577 std::tie(LoVT, HiVT) =
1588 unsigned Opcode =
N->getOpcode();
1595 GetSplitVector(N0, InLo, InHi);
1602 EVT OutLoVT, OutHiVT;
1605 assert((2 * OutNumElements) <= InNumElements &&
1606 "Illegal extend vector in reg split");
1616 for (
unsigned i = 0; i != OutNumElements; ++i)
1617 SplitHi[i] = i + OutNumElements;
1620 Lo = DAG.
getNode(Opcode, dl, OutLoVT, InLo);
1621 Hi = DAG.
getNode(Opcode, dl, OutHiVT, InHi);
1626 unsigned NumOps =
N->getNumOperands();
1640 for (
unsigned i = 1; i < NumOps; ++i) {
1645 EVT InVT =
Op.getValueType();
1650 GetSplitVector(
Op, OpLo, OpHi);
1659 EVT LoValueVTs[] = {LoVT, MVT::Other};
1660 EVT HiValueVTs[] = {HiVT, MVT::Other};
1669 Lo.getValue(1),
Hi.getValue(1));
1673 ReplaceValueWith(
SDValue(
N, 1), Chain);
1676SDValue DAGTypeLegalizer::UnrollVectorOp_StrictFP(
SDNode *
N,
unsigned ResNE) {
1678 EVT VT =
N->getValueType(0);
1689 else if (NE > ResNE)
1693 EVT ChainVTs[] = {EltVT, MVT::Other};
1697 for (i = 0; i !=
NE; ++i) {
1699 for (
unsigned j = 1, e =
N->getNumOperands(); j != e; ++j) {
1700 SDValue Operand =
N->getOperand(j);
1711 Scalar.getNode()->setFlags(
N->getFlags());
1719 for (; i < ResNE; ++i)
1724 ReplaceValueWith(
SDValue(
N, 1), Chain);
1731void DAGTypeLegalizer::SplitVecRes_OverflowOp(
SDNode *
N,
unsigned ResNo,
1734 EVT ResVT =
N->getValueType(0);
1735 EVT OvVT =
N->getValueType(1);
1736 EVT LoResVT, HiResVT, LoOvVT, HiOvVT;
1740 SDValue LoLHS, HiLHS, LoRHS, HiRHS;
1742 GetSplitVector(
N->getOperand(0), LoLHS, HiLHS);
1743 GetSplitVector(
N->getOperand(1), LoRHS, HiRHS);
1749 unsigned Opcode =
N->getOpcode();
1761 unsigned OtherNo = 1 - ResNo;
1762 EVT OtherVT =
N->getValueType(OtherNo);
1764 SetSplitVector(
SDValue(
N, OtherNo),
1770 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
1774void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(
SDNode *
N,
SDValue &
Lo,
1780 GetSplitVector(Vec,
Lo,
Hi);
1783 unsigned IdxVal = CIdx->getZExtValue();
1784 unsigned LoNumElts =
Lo.getValueType().getVectorMinNumElements();
1785 if (IdxVal < LoNumElts) {
1787 Lo.getValueType(),
Lo, Elt,
Idx);
1797 if (CustomLowerNode(
N,
N->getValueType(0),
true))
1838 Lo = DAG.
getLoad(LoVT, dl, Store, StackPtr, PtrInfo, SmallestAlign);
1841 auto Load = cast<LoadSDNode>(
Lo);
1843 IncrementPointer(Load, LoVT, MPI, StackPtr);
1845 Hi = DAG.
getLoad(HiVT, dl, Store, StackPtr, MPI, SmallestAlign);
1849 if (LoVT !=
Lo.getValueType())
1851 if (HiVT !=
Hi.getValueType())
1859 assert(
N->getValueType(0).isScalableVector() &&
1860 "Only scalable vectors are supported for STEP_VECTOR");
1883 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT,
N->getOperand(0));
1903 EVT MemoryVT =
LD->getMemoryVT();
1907 EVT LoMemVT, HiMemVT;
1914 ReplaceValueWith(
SDValue(LD, 1), NewChain);
1919 LD->getPointerInfo(), LoMemVT,
LD->getOriginalAlign(),
1923 IncrementPointer(LD, LoMemVT, MPI,
Ptr);
1926 HiMemVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
1935 ReplaceValueWith(
SDValue(LD, 1), Ch);
1940 assert(
LD->isUnindexed() &&
"Indexed VP load during type legalization!");
1949 assert(
Offset.isUndef() &&
"Unexpected indexed variable-length load offset");
1950 Align Alignment =
LD->getOriginalAlign();
1953 EVT MemoryVT =
LD->getMemoryVT();
1955 EVT LoMemVT, HiMemVT;
1956 bool HiIsEmpty =
false;
1957 std::tie(LoMemVT, HiMemVT) =
1963 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
1966 GetSplitVector(Mask, MaskLo, MaskHi);
1968 std::tie(MaskLo, MaskHi) = DAG.
SplitVector(Mask, dl);
1973 std::tie(EVLLo, EVLHi) = DAG.
SplitEVL(EVL,
LD->getValueType(0), dl);
1982 MaskLo, EVLLo, LoMemVT, MMO,
LD->isExpandingLoad());
1991 LD->isExpandingLoad());
1997 MPI =
LD->getPointerInfo().getWithOffset(
2002 Alignment,
LD->getAAInfo(),
LD->getRanges());
2005 Offset, MaskHi, EVLHi, HiMemVT, MMO,
2006 LD->isExpandingLoad());
2016 ReplaceValueWith(
SDValue(LD, 1), Ch);
2022 "Indexed VP strided load during type legalization!");
2024 "Unexpected indexed variable-length load offset");
2031 EVT LoMemVT, HiMemVT;
2032 bool HiIsEmpty =
false;
2033 std::tie(LoMemVT, HiMemVT) =
2039 SplitVecRes_SETCC(
Mask.getNode(), LoMask, HiMask);
2042 GetSplitVector(Mask, LoMask, HiMask);
2048 std::tie(LoEVL, HiEVL) =
2086 SLD->
getStride(), HiMask, HiEVL, HiMemVT, MMO,
2097 ReplaceValueWith(
SDValue(SLD, 1), Ch);
2110 assert(
Offset.isUndef() &&
"Unexpected indexed masked load offset");
2119 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
2122 GetSplitVector(Mask, MaskLo, MaskHi);
2124 std::tie(MaskLo, MaskHi) = DAG.
SplitVector(Mask, dl);
2128 EVT LoMemVT, HiMemVT;
2129 bool HiIsEmpty =
false;
2130 std::tie(LoMemVT, HiMemVT) =
2133 SDValue PassThruLo, PassThruHi;
2135 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2137 std::tie(PassThruLo, PassThruHi) = DAG.
SplitVector(PassThru, dl);
2180 ReplaceValueWith(
SDValue(MLD, 1), Ch);
2197 if (
auto *MSC = dyn_cast<MaskedGatherSDNode>(
N)) {
2198 return {MSC->getMask(), MSC->getIndex(), MSC->getScale()};
2200 auto *VPSC = cast<VPGatherSDNode>(
N);
2201 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale()};
2204 EVT MemoryVT =
N->getMemoryVT();
2205 Align Alignment =
N->getOriginalAlign();
2209 if (SplitSETCC && Ops.Mask.getOpcode() ==
ISD::SETCC) {
2210 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
2212 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask, dl);
2215 EVT LoMemVT, HiMemVT;
2220 if (getTypeAction(Ops.Index.getValueType()) ==
2222 GetSplitVector(Ops.Index, IndexLo, IndexHi);
2224 std::tie(IndexLo, IndexHi) = DAG.
SplitVector(Ops.Index, dl);
2231 if (
auto *MGT = dyn_cast<MaskedGatherSDNode>(
N)) {
2232 SDValue PassThru = MGT->getPassThru();
2233 SDValue PassThruLo, PassThruHi;
2236 GetSplitVector(PassThru, PassThruLo, PassThruHi);
2238 std::tie(PassThruLo, PassThruHi) = DAG.
SplitVector(PassThru, dl);
2243 SDValue OpsLo[] = {Ch, PassThruLo, MaskLo,
Ptr, IndexLo, Ops.Scale};
2245 OpsLo, MMO, IndexTy, ExtType);
2247 SDValue OpsHi[] = {Ch, PassThruHi, MaskHi,
Ptr, IndexHi, Ops.Scale};
2249 OpsHi, MMO, IndexTy, ExtType);
2251 auto *VPGT = cast<VPGatherSDNode>(
N);
2253 std::tie(EVLLo, EVLHi) =
2254 DAG.
SplitEVL(VPGT->getVectorLength(), MemoryVT, dl);
2256 SDValue OpsLo[] = {Ch,
Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
2258 MMO, VPGT->getIndexType());
2260 SDValue OpsHi[] = {Ch,
Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
2262 MMO, VPGT->getIndexType());
2272 ReplaceValueWith(
SDValue(
N, 1), Ch);
2276 assert(
N->getValueType(0).isVector() &&
2277 N->getOperand(0).getValueType().isVector() &&
2278 "Operand types must be vectors");
2286 if (getTypeAction(
N->getOperand(0).getValueType()) ==
2288 GetSplitVector(
N->getOperand(0), LL, LH);
2292 if (getTypeAction(
N->getOperand(1).getValueType()) ==
2294 GetSplitVector(
N->getOperand(1), RL, RH);
2299 Lo = DAG.
getNode(
N->getOpcode(),
DL, LoVT, LL, RL,
N->getOperand(2));
2300 Hi = DAG.
getNode(
N->getOpcode(),
DL, HiVT, LH, RH,
N->getOperand(2));
2302 assert(
N->getOpcode() == ISD::VP_SETCC &&
"Expected VP_SETCC opcode");
2303 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
2304 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
2305 std::tie(EVLLo, EVLHi) =
2306 DAG.
SplitEVL(
N->getOperand(4),
N->getValueType(0),
DL);
2307 Lo = DAG.
getNode(
N->getOpcode(),
DL, LoVT, LL, RL,
N->getOperand(2), MaskLo,
2309 Hi = DAG.
getNode(
N->getOpcode(),
DL, HiVT, LH, RH,
N->getOperand(2), MaskHi,
2323 EVT InVT =
N->getOperand(0).getValueType();
2325 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2330 unsigned Opcode =
N->getOpcode();
2331 if (
N->getNumOperands() <= 2) {
2333 Lo = DAG.
getNode(Opcode, dl, LoVT,
Lo,
N->getOperand(1), Flags);
2334 Hi = DAG.
getNode(Opcode, dl, HiVT,
Hi,
N->getOperand(1), Flags);
2342 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
2343 assert(
N->isVPOpcode() &&
"Expected VP opcode");
2346 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
2349 std::tie(EVLLo, EVLHi) =
2350 DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2356void DAGTypeLegalizer::SplitVecRes_FFREXP(
SDNode *
N,
unsigned ResNo,
2364 EVT InVT =
N->getOperand(0).getValueType();
2366 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
2370 Lo = DAG.
getNode(
N->getOpcode(), dl, {LoVT, LoVT1},
Lo);
2371 Hi = DAG.
getNode(
N->getOpcode(), dl, {HiVT, HiVT1},
Hi);
2372 Lo->setFlags(
N->getFlags());
2373 Hi->setFlags(
N->getFlags());
2379 unsigned OtherNo = 1 - ResNo;
2380 EVT OtherVT =
N->getValueType(OtherNo);
2388 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
2395 EVT SrcVT =
N->getOperand(0).getValueType();
2396 EVT DestVT =
N->getValueType(0);
2419 EVT SplitLoVT, SplitHiVT;
2423 LLVM_DEBUG(
dbgs() <<
"Split vector extend via incremental extend:";
2424 N->dump(&DAG);
dbgs() <<
"\n");
2425 if (!
N->isVPOpcode()) {
2428 DAG.
getNode(
N->getOpcode(), dl, NewSrcVT,
N->getOperand(0));
2439 DAG.
getNode(
N->getOpcode(), dl, NewSrcVT,
N->getOperand(0),
2440 N->getOperand(1),
N->getOperand(2));
2445 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
2448 std::tie(EVLLo, EVLHi) =
2449 DAG.
SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
2451 Lo = DAG.
getNode(
N->getOpcode(), dl, LoVT, {Lo, MaskLo, EVLLo});
2452 Hi = DAG.
getNode(
N->getOpcode(), dl, HiVT, {Hi, MaskHi, EVLHi});
2457 SplitVecRes_UnaryOp(
N,
Lo,
Hi);
2465 GetSplitVector(
N->getOperand(0), Inputs[0], Inputs[1]);
2466 GetSplitVector(
N->getOperand(1), Inputs[2], Inputs[3]);
2472 return N.getResNo() == 0 &&
2476 auto &&BuildVector = [NewElts, &DAG = DAG, NewVT, &
DL](
SDValue &Input1,
2481 "Expected build vector node.");
2484 for (
unsigned I = 0;
I < NewElts; ++
I) {
2489 Ops[
I] = Input2.getOperand(
Idx - NewElts);
2491 Ops[
I] = Input1.getOperand(
Idx);
2493 if (Ops[
I].getValueType().bitsGT(EltVT))
2496 return DAG.getBuildVector(NewVT,
DL, Ops);
2504 auto &&TryPeekThroughShufflesInputs = [&Inputs, &NewVT,
this, NewElts,
2508 for (
unsigned Idx = 0;
Idx < std::size(Inputs); ++
Idx) {
2510 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Input.
getNode());
2519 for (
auto &
P : ShufflesIdxs) {
2520 if (
P.second.size() < 2)
2524 for (
int &
Idx : Mask) {
2527 unsigned SrcRegIdx =
Idx / NewElts;
2528 if (Inputs[SrcRegIdx].
isUndef()) {
2533 dyn_cast<ShuffleVectorSDNode>(Inputs[SrcRegIdx].getNode());
2536 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2541 Idx = MaskElt % NewElts +
2542 P.second[Shuffle->getOperand(MaskElt / NewElts) ==
P.first.first
2548 Inputs[
P.second[0]] =
P.first.first;
2549 Inputs[
P.second[1]] =
P.first.second;
2552 ShufflesIdxs[std::make_pair(
P.first.second,
P.first.first)].clear();
2556 for (
int &
Idx : Mask) {
2559 unsigned SrcRegIdx =
Idx / NewElts;
2560 if (Inputs[SrcRegIdx].
isUndef()) {
2565 getTypeAction(Inputs[SrcRegIdx].getValueType());
2567 Inputs[SrcRegIdx].getNumOperands() == 2 &&
2568 !Inputs[SrcRegIdx].getOperand(1).
isUndef() &&
2571 UsedSubVector.set(2 * SrcRegIdx + (
Idx % NewElts) / (NewElts / 2));
2573 if (UsedSubVector.count() > 1) {
2575 for (
unsigned I = 0;
I < std::size(Inputs); ++
I) {
2576 if (UsedSubVector.test(2 *
I) == UsedSubVector.test(2 *
I + 1))
2578 if (Pairs.
empty() || Pairs.
back().size() == 2)
2580 if (UsedSubVector.test(2 *
I)) {
2581 Pairs.
back().emplace_back(
I, 0);
2583 assert(UsedSubVector.test(2 *
I + 1) &&
2584 "Expected to be used one of the subvectors.");
2585 Pairs.
back().emplace_back(
I, 1);
2588 if (!Pairs.
empty() && Pairs.
front().size() > 1) {
2590 for (
int &
Idx : Mask) {
2593 unsigned SrcRegIdx =
Idx / NewElts;
2595 Pairs, [SrcRegIdx](
ArrayRef<std::pair<unsigned, int>> Idxs) {
2596 return Idxs.front().first == SrcRegIdx ||
2597 Idxs.back().first == SrcRegIdx;
2599 if (It == Pairs.
end())
2601 Idx = It->front().first * NewElts + (
Idx % NewElts) % (NewElts / 2) +
2602 (SrcRegIdx == It->front().first ? 0 : (NewElts / 2));
2605 for (
ArrayRef<std::pair<unsigned, int>> Idxs : Pairs) {
2606 Inputs[Idxs.front().first] = DAG.
getNode(
2608 Inputs[Idxs.front().first].getValueType(),
2609 Inputs[Idxs.front().first].
getOperand(Idxs.front().second),
2610 Inputs[Idxs.back().first].
getOperand(Idxs.back().second));
2619 for (
unsigned I = 0;
I < std::size(Inputs); ++
I) {
2620 auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Inputs[
I].getNode());
2623 if (Shuffle->getOperand(0).getValueType() != NewVT)
2626 if (!Inputs[
I].hasOneUse() && Shuffle->getOperand(1).isUndef() &&
2627 !Shuffle->isSplat()) {
2629 }
else if (!Inputs[
I].hasOneUse() &&
2630 !Shuffle->getOperand(1).isUndef()) {
2632 for (
int &
Idx : Mask) {
2635 unsigned SrcRegIdx =
Idx / NewElts;
2638 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2643 int OpIdx = MaskElt / NewElts;
2656 for (
int OpIdx = 0; OpIdx < 2; ++OpIdx) {
2657 if (Shuffle->getOperand(OpIdx).isUndef())
2659 auto *It =
find(Inputs, Shuffle->getOperand(OpIdx));
2660 if (It == std::end(Inputs))
2662 int FoundOp = std::distance(std::begin(Inputs), It);
2665 for (
int &
Idx : Mask) {
2668 unsigned SrcRegIdx =
Idx / NewElts;
2671 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2676 int MaskIdx = MaskElt / NewElts;
2677 if (OpIdx == MaskIdx)
2678 Idx = MaskElt % NewElts + FoundOp * NewElts;
2681 Op = (OpIdx + 1) % 2;
2689 for (
int &
Idx : Mask) {
2692 unsigned SrcRegIdx =
Idx / NewElts;
2695 int MaskElt = Shuffle->getMaskElt(
Idx % NewElts);
2696 int OpIdx = MaskElt / NewElts;
2699 Idx = MaskElt % NewElts + SrcRegIdx * NewElts;
2705 TryPeekThroughShufflesInputs(OrigMask);
2707 auto &&MakeUniqueInputs = [&Inputs, &
IsConstant,
2711 for (
const auto &
I : Inputs) {
2713 UniqueConstantInputs.
insert(
I);
2714 else if (!
I.isUndef())
2719 if (UniqueInputs.
size() != std::size(Inputs)) {
2720 auto &&UniqueVec = UniqueInputs.
takeVector();
2721 auto &&UniqueConstantVec = UniqueConstantInputs.
takeVector();
2722 unsigned ConstNum = UniqueConstantVec.size();
2723 for (
int &
Idx : Mask) {
2726 unsigned SrcRegIdx =
Idx / NewElts;
2727 if (Inputs[SrcRegIdx].
isUndef()) {
2731 const auto It =
find(UniqueConstantVec, Inputs[SrcRegIdx]);
2732 if (It != UniqueConstantVec.end()) {
2734 NewElts * std::distance(UniqueConstantVec.begin(), It);
2735 assert(
Idx >= 0 &&
"Expected defined mask idx.");
2738 const auto RegIt =
find(UniqueVec, Inputs[SrcRegIdx]);
2739 assert(RegIt != UniqueVec.end() &&
"Cannot find non-const value.");
2741 NewElts * (std::distance(UniqueVec.begin(), RegIt) + ConstNum);
2742 assert(
Idx >= 0 &&
"Expected defined mask idx.");
2744 copy(UniqueConstantVec, std::begin(Inputs));
2745 copy(UniqueVec, std::next(std::begin(Inputs), ConstNum));
2748 MakeUniqueInputs(OrigMask);
2750 copy(Inputs, std::begin(OrigInputs));
2756 unsigned FirstMaskIdx =
High * NewElts;
2759 assert(!Output &&
"Expected default initialized initial value.");
2760 TryPeekThroughShufflesInputs(Mask);
2761 MakeUniqueInputs(Mask);
2763 copy(Inputs, std::begin(TmpInputs));
2766 bool SecondIteration =
false;
2767 auto &&AccumulateResults = [&UsedIdx, &SecondIteration](
unsigned Idx) {
2772 if (UsedIdx >= 0 &&
static_cast<unsigned>(UsedIdx) ==
Idx)
2773 SecondIteration =
true;
2774 return SecondIteration;
2777 Mask, std::size(Inputs), std::size(Inputs),
2779 [&Output, &DAG = DAG, NewVT]() { Output = DAG.getUNDEF(NewVT); },
2780 [&Output, &DAG = DAG, NewVT, &
DL, &Inputs,
2783 Output = BuildVector(Inputs[
Idx], Inputs[
Idx], Mask);
2785 Output = DAG.getVectorShuffle(NewVT,
DL, Inputs[
Idx],
2786 DAG.getUNDEF(NewVT), Mask);
2787 Inputs[
Idx] = Output;
2789 [&AccumulateResults, &Output, &DAG = DAG, NewVT, &
DL, &Inputs,
2792 if (AccumulateResults(Idx1)) {
2795 Output = BuildVector(Inputs[Idx1], Inputs[Idx2], Mask);
2797 Output = DAG.getVectorShuffle(NewVT,
DL, Inputs[Idx1],
2798 Inputs[Idx2], Mask);
2802 Output = BuildVector(TmpInputs[Idx1], TmpInputs[Idx2], Mask);
2804 Output = DAG.getVectorShuffle(NewVT,
DL, TmpInputs[Idx1],
2805 TmpInputs[Idx2], Mask);
2807 Inputs[Idx1] = Output;
2809 copy(OrigInputs, std::begin(Inputs));
2814 EVT OVT =
N->getValueType(0);
2821 const Align Alignment =
2822 DAG.getDataLayout().getABITypeAlign(NVT.
getTypeForEVT(*DAG.getContext()));
2824 Lo = DAG.getVAArg(NVT, dl, Chain,
Ptr, SV, Alignment.
value());
2825 Hi = DAG.getVAArg(NVT, dl,
Lo.getValue(1),
Ptr, SV, Alignment.
value());
2826 Chain =
Hi.getValue(1);
2830 ReplaceValueWith(
SDValue(
N, 1), Chain);
2835 EVT DstVTLo, DstVTHi;
2836 std::tie(DstVTLo, DstVTHi) = DAG.GetSplitDestVTs(
N->getValueType(0));
2840 EVT SrcVT =
N->getOperand(0).getValueType();
2842 GetSplitVector(
N->getOperand(0), SrcLo, SrcHi);
2844 std::tie(SrcLo, SrcHi) = DAG.SplitVectorOperand(
N, 0);
2846 Lo = DAG.getNode(
N->getOpcode(), dl, DstVTLo, SrcLo,
N->getOperand(1));
2847 Hi = DAG.getNode(
N->getOpcode(), dl, DstVTHi, SrcHi,
N->getOperand(1));
2853 GetSplitVector(
N->getOperand(0), InLo, InHi);
2862 EVT VT =
N->getValueType(0);
2866 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
2870 DAG.getVectorIdxConstant(0,
DL));
2878 EVT VT =
N->getValueType(0);
2885 Align Alignment = DAG.getReducedAlign(VT,
false);
2891 auto &MF = DAG.getMachineFunction();
2905 DAG.getConstant(1,
DL, PtrVT));
2907 DAG.getConstant(EltWidth,
DL, PtrVT));
2909 SDValue Stride = DAG.getConstant(-(int64_t)EltWidth,
DL, PtrVT);
2911 SDValue TrueMask = DAG.getBoolConstant(
true,
DL,
Mask.getValueType(), VT);
2912 SDValue Store = DAG.getStridedStoreVP(DAG.getEntryNode(),
DL, Val, StorePtr,
2913 DAG.getUNDEF(PtrVT), Stride, TrueMask,
2916 SDValue Load = DAG.getLoadVP(VT,
DL, Store, StackPtr, Mask, EVL, LoadMMO);
2918 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(VT);
2920 DAG.getVectorIdxConstant(0,
DL));
2926void DAGTypeLegalizer::SplitVecRes_VECTOR_DEINTERLEAVE(
SDNode *
N) {
2928 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi;
2929 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
2930 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
2934 DAG.getVTList(VT, VT), Op0Lo, Op0Hi);
2936 DAG.getVTList(VT, VT), Op1Lo, Op1Hi);
2942void DAGTypeLegalizer::SplitVecRes_VECTOR_INTERLEAVE(
SDNode *
N) {
2943 SDValue Op0Lo, Op0Hi, Op1Lo, Op1Hi;
2944 GetSplitVector(
N->getOperand(0), Op0Lo, Op0Hi);
2945 GetSplitVector(
N->getOperand(1), Op1Lo, Op1Hi);
2949 DAG.getVTList(VT, VT), Op0Lo, Op1Lo),
2951 DAG.getVTList(VT, VT), Op0Hi, Op1Hi)};
2953 SetSplitVector(
SDValue(
N, 0), Res[0].getValue(0), Res[0].getValue(1));
2954 SetSplitVector(
SDValue(
N, 1), Res[1].getValue(0), Res[1].getValue(1));
2965bool DAGTypeLegalizer::SplitVectorOperand(
SDNode *
N,
unsigned OpNo) {
2970 if (CustomLowerNode(
N,
N->getOperand(OpNo).getValueType(),
false))
2973 switch (
N->getOpcode()) {
2976 dbgs() <<
"SplitVectorOperand Op #" << OpNo <<
": ";
2984 case ISD::SETCC: Res = SplitVecOp_VSETCC(
N);
break;
2990 case ISD::VP_TRUNCATE:
2992 Res = SplitVecOp_TruncateHelper(
N);
2995 case ISD::VP_FP_ROUND:
2999 Res = SplitVecOp_STORE(cast<StoreSDNode>(
N), OpNo);
3002 Res = SplitVecOp_VP_STORE(cast<VPStoreSDNode>(
N), OpNo);
3004 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
3005 Res = SplitVecOp_VP_STRIDED_STORE(cast<VPStridedStoreSDNode>(
N), OpNo);
3008 Res = SplitVecOp_MSTORE(cast<MaskedStoreSDNode>(
N), OpNo);
3011 case ISD::VP_SCATTER:
3012 Res = SplitVecOp_Scatter(cast<MemSDNode>(
N), OpNo);
3015 case ISD::VP_GATHER:
3016 Res = SplitVecOp_Gather(cast<MemSDNode>(
N), OpNo);
3019 Res = SplitVecOp_VSELECT(
N, OpNo);
3025 case ISD::VP_SINT_TO_FP:
3026 case ISD::VP_UINT_TO_FP:
3027 if (
N->getValueType(0).bitsLT(
3028 N->getOperand(
N->isStrictFPOpcode() ? 1 : 0).getValueType()))
3029 Res = SplitVecOp_TruncateHelper(
N);
3031 Res = SplitVecOp_UnaryOp(
N);
3035 Res = SplitVecOp_FP_TO_XINT_SAT(
N);
3039 case ISD::VP_FP_TO_SINT:
3040 case ISD::VP_FP_TO_UINT:
3051 Res = SplitVecOp_UnaryOp(
N);
3054 Res = SplitVecOp_FPOpDifferentTypes(
N);
3060 Res = SplitVecOp_ExtVecInRegOp(
N);
3078 Res = SplitVecOp_VECREDUCE(
N, OpNo);
3082 Res = SplitVecOp_VECREDUCE_SEQ(
N);
3084 case ISD::VP_REDUCE_FADD:
3085 case ISD::VP_REDUCE_SEQ_FADD:
3086 case ISD::VP_REDUCE_FMUL:
3087 case ISD::VP_REDUCE_SEQ_FMUL:
3088 case ISD::VP_REDUCE_ADD:
3089 case ISD::VP_REDUCE_MUL:
3090 case ISD::VP_REDUCE_AND:
3091 case ISD::VP_REDUCE_OR:
3092 case ISD::VP_REDUCE_XOR:
3093 case ISD::VP_REDUCE_SMAX:
3094 case ISD::VP_REDUCE_SMIN:
3095 case ISD::VP_REDUCE_UMAX:
3096 case ISD::VP_REDUCE_UMIN:
3097 case ISD::VP_REDUCE_FMAX:
3098 case ISD::VP_REDUCE_FMIN:
3099 Res = SplitVecOp_VP_REDUCE(
N, OpNo);
3101 case ISD::VP_CTTZ_ELTS:
3102 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
3103 Res = SplitVecOp_VP_CttzElements(
N);
3108 if (!Res.
getNode())
return false;
3115 if (
N->isStrictFPOpcode())
3117 "Invalid operand expansion");
3120 "Invalid operand expansion");
3122 ReplaceValueWith(
SDValue(
N, 0), Res);
3126SDValue DAGTypeLegalizer::SplitVecOp_VSELECT(
SDNode *
N,
unsigned OpNo) {
3129 assert(OpNo == 0 &&
"Illegal operand must be mask");
3136 assert(
Mask.getValueType().isVector() &&
"VSELECT without a vector mask?");
3139 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3140 assert(
Lo.getValueType() ==
Hi.getValueType() &&
3141 "Lo and Hi have differing types");
3144 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(Src0VT);
3145 assert(LoOpVT == HiOpVT &&
"Asymmetric vector split?");
3147 SDValue LoOp0, HiOp0, LoOp1, HiOp1, LoMask, HiMask;
3148 std::tie(LoOp0, HiOp0) = DAG.SplitVector(Src0,
DL);
3149 std::tie(LoOp1, HiOp1) = DAG.SplitVector(Src1,
DL);
3150 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask,
DL);
3160SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE(
SDNode *
N,
unsigned OpNo) {
3161 EVT ResVT =
N->getValueType(0);
3165 SDValue VecOp =
N->getOperand(OpNo);
3167 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3168 GetSplitVector(VecOp,
Lo,
Hi);
3170 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3176 return DAG.getNode(
N->getOpcode(), dl, ResVT, Partial,
N->getFlags());
3180 EVT ResVT =
N->getValueType(0);
3189 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3190 GetSplitVector(VecOp,
Lo,
Hi);
3192 std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
3198 return DAG.getNode(
N->getOpcode(), dl, ResVT, Partial,
Hi, Flags);
3201SDValue DAGTypeLegalizer::SplitVecOp_VP_REDUCE(
SDNode *
N,
unsigned OpNo) {
3202 assert(
N->isVPOpcode() &&
"Expected VP opcode");
3203 assert(OpNo == 1 &&
"Can only split reduce vector operand");
3205 unsigned Opc =
N->getOpcode();
3206 EVT ResVT =
N->getValueType(0);
3210 SDValue VecOp =
N->getOperand(OpNo);
3212 assert(VecVT.
isVector() &&
"Can only split reduce vector operand");
3213 GetSplitVector(VecOp,
Lo,
Hi);
3216 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(2));
3219 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(
N->getOperand(3), VecVT, dl);
3224 DAG.
getNode(Opc, dl, ResVT, {
N->getOperand(0),
Lo, MaskLo, EVLLo},
Flags);
3225 return DAG.getNode(Opc, dl, ResVT, {ResLo,
Hi, MaskHi, EVLHi},
Flags);
3230 EVT ResVT =
N->getValueType(0);
3233 GetSplitVector(
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0),
Lo,
Hi);
3234 EVT InVT =
Lo.getValueType();
3239 if (
N->isStrictFPOpcode()) {
3240 Lo = DAG.getNode(
N->getOpcode(), dl, { OutVT, MVT::Other },
3241 { N->getOperand(0), Lo });
3242 Hi = DAG.getNode(
N->getOpcode(), dl, { OutVT, MVT::Other },
3243 { N->getOperand(0), Hi });
3252 ReplaceValueWith(
SDValue(
N, 1), Ch);
3253 }
else if (
N->getNumOperands() == 3) {
3254 assert(
N->isVPOpcode() &&
"Expected VP opcode");
3255 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
3256 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
3257 std::tie(EVLLo, EVLHi) =
3258 DAG.SplitEVL(
N->getOperand(2),
N->getValueType(0), dl);
3259 Lo = DAG.getNode(
N->getOpcode(), dl, OutVT,
Lo, MaskLo, EVLLo);
3260 Hi = DAG.getNode(
N->getOpcode(), dl, OutVT,
Hi, MaskHi, EVLHi);
3262 Lo = DAG.getNode(
N->getOpcode(), dl, OutVT,
Lo);
3263 Hi = DAG.getNode(
N->getOpcode(), dl, OutVT,
Hi);
3273 EVT ResVT =
N->getValueType(0);
3275 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3279 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(ResVT);
3285 Lo = BitConvertToInteger(
Lo);
3286 Hi = BitConvertToInteger(
Hi);
3288 if (DAG.getDataLayout().isBigEndian())
3296 assert(OpNo == 1 &&
"Invalid OpNo; can only split SubVec.");
3298 EVT ResVT =
N->getValueType(0);
3306 GetSplitVector(SubVec,
Lo,
Hi);
3309 uint64_t LoElts =
Lo.getValueType().getVectorMinNumElements();
3315 DAG.getVectorIdxConstant(IdxVal + LoElts, dl));
3317 return SecondInsertion;
3320SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(
SDNode *
N) {
3322 EVT SubVT =
N->getValueType(0);
3327 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
3329 uint64_t LoEltsMin =
Lo.getValueType().getVectorMinNumElements();
3332 if (IdxVal < LoEltsMin) {
3334 "Extracted subvector crosses vector split!");
3337 N->getOperand(0).getValueType().isScalableVector())
3339 DAG.getVectorIdxConstant(IdxVal - LoEltsMin, dl));
3344 "Extracting scalable subvector from fixed-width unsupported");
3352 "subvector from a scalable predicate vector");
3358 Align SmallestAlign = DAG.getReducedAlign(VecVT,
false);
3360 DAG.CreateStackTemporary(VecVT.
getStoreSize(), SmallestAlign);
3361 auto &MF = DAG.getMachineFunction();
3365 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3372 SubVT, dl, Store, StackPtr,
3376SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
3385 GetSplitVector(Vec,
Lo,
Hi);
3387 uint64_t LoElts =
Lo.getValueType().getVectorMinNumElements();
3389 if (IdxVal < LoElts)
3393 DAG.getConstant(IdxVal - LoElts,
SDLoc(
N),
3394 Idx.getValueType())), 0);
3398 if (CustomLowerNode(
N,
N->getValueType(0),
true))
3414 Align SmallestAlign = DAG.getReducedAlign(VecVT,
false);
3416 DAG.CreateStackTemporary(VecVT.
getStoreSize(), SmallestAlign);
3417 auto &MF = DAG.getMachineFunction();
3420 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo,
3428 if (
N->getValueType(0).bitsLT(EltVT)) {
3429 SDValue Load = DAG.getLoad(EltVT, dl, Store, StackPtr,
3431 return DAG.getZExtOrTrunc(Load, dl,
N->getValueType(0));
3434 return DAG.getExtLoad(
3445 SplitVecRes_ExtVecInRegOp(
N,
Lo,
Hi);
3453 SplitVecRes_Gather(
N,
Lo,
Hi);
3456 ReplaceValueWith(
SDValue(
N, 0), Res);
3461 assert(
N->isUnindexed() &&
"Indexed vp_store of vector?");
3465 assert(
Offset.isUndef() &&
"Unexpected VP store offset");
3467 SDValue EVL =
N->getVectorLength();
3469 Align Alignment =
N->getOriginalAlign();
3475 GetSplitVector(
Data, DataLo, DataHi);
3477 std::tie(DataLo, DataHi) = DAG.SplitVector(
Data,
DL);
3482 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
3485 GetSplitVector(Mask, MaskLo, MaskHi);
3487 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask,
DL);
3490 EVT MemoryVT =
N->getMemoryVT();
3491 EVT LoMemVT, HiMemVT;
3492 bool HiIsEmpty =
false;
3493 std::tie(LoMemVT, HiMemVT) =
3494 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.
getValueType(), &HiIsEmpty);
3498 std::tie(EVLLo, EVLHi) = DAG.SplitEVL(EVL,
Data.getValueType(),
DL);
3506 Lo = DAG.getStoreVP(Ch,
DL, DataLo,
Ptr,
Offset, MaskLo, EVLLo, LoMemVT, MMO,
3507 N->getAddressingMode(),
N->isTruncatingStore(),
3508 N->isCompressingStore());
3515 N->isCompressingStore());
3523 MPI =
N->getPointerInfo().getWithOffset(
3526 MMO = DAG.getMachineFunction().getMachineMemOperand(
3528 Alignment,
N->getAAInfo(),
N->getRanges());
3530 Hi = DAG.getStoreVP(Ch,
DL, DataHi,
Ptr,
Offset, MaskHi, EVLHi, HiMemVT, MMO,
3531 N->getAddressingMode(),
N->isTruncatingStore(),
3532 N->isCompressingStore());
3541 assert(
N->isUnindexed() &&
"Indexed vp_strided_store of a vector?");
3542 assert(
N->getOffset().isUndef() &&
"Unexpected VP strided store offset");
3549 GetSplitVector(
Data, LoData, HiData);
3551 std::tie(LoData, HiData) = DAG.SplitVector(
Data,
DL);
3553 EVT LoMemVT, HiMemVT;
3554 bool HiIsEmpty =
false;
3555 std::tie(LoMemVT, HiMemVT) = DAG.GetDependentSplitDestVTs(
3561 SplitVecRes_SETCC(
Mask.getNode(), LoMask, HiMask);
3562 else if (getTypeAction(
Mask.getValueType()) ==
3564 GetSplitVector(Mask, LoMask, HiMask);
3566 std::tie(LoMask, HiMask) = DAG.SplitVector(Mask,
DL);
3569 std::tie(LoEVL, HiEVL) =
3570 DAG.SplitEVL(
N->getVectorLength(),
Data.getValueType(),
DL);
3574 N->getChain(),
DL, LoData,
N->getBasePtr(),
N->getOffset(),
3575 N->getStride(), LoMask, LoEVL, LoMemVT,
N->getMemOperand(),
3576 N->getAddressingMode(),
N->isTruncatingStore(),
N->isCompressingStore());
3587 EVT PtrVT =
N->getBasePtr().getValueType();
3590 DAG.getSExtOrTrunc(
N->getStride(),
DL, PtrVT));
3593 Align Alignment =
N->getOriginalAlign();
3601 Alignment,
N->getAAInfo(),
N->getRanges());
3604 N->getChain(),
DL, HiData,
Ptr,
N->getOffset(),
N->getStride(), HiMask,
3605 HiEVL, HiMemVT, MMO,
N->getAddressingMode(),
N->isTruncatingStore(),
3606 N->isCompressingStore());
3615 assert(
N->isUnindexed() &&
"Indexed masked store of vector?");
3619 assert(
Offset.isUndef() &&
"Unexpected indexed masked store offset");
3622 Align Alignment =
N->getOriginalAlign();
3628 GetSplitVector(
Data, DataLo, DataHi);
3630 std::tie(DataLo, DataHi) = DAG.SplitVector(
Data,
DL);
3635 SplitVecRes_SETCC(
Mask.getNode(), MaskLo, MaskHi);
3638 GetSplitVector(Mask, MaskLo, MaskHi);
3640 std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask,
DL);
3643 EVT MemoryVT =
N->getMemoryVT();
3644 EVT LoMemVT, HiMemVT;
3645 bool HiIsEmpty =
false;
3646 std::tie(LoMemVT, HiMemVT) =
3647 DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.
getValueType(), &HiIsEmpty);
3655 Lo = DAG.getMaskedStore(Ch,
DL, DataLo,
Ptr,
Offset, MaskLo, LoMemVT, MMO,
3656 N->getAddressingMode(),
N->isTruncatingStore(),
3657 N->isCompressingStore());
3666 N->isCompressingStore());
3674 MPI =
N->getPointerInfo().getWithOffset(
3677 MMO = DAG.getMachineFunction().getMachineMemOperand(
3679 Alignment,
N->getAAInfo(),
N->getRanges());
3681 Hi = DAG.getMaskedStore(Ch,
DL, DataHi,
Ptr,
Offset, MaskHi, HiMemVT, MMO,
3682 N->getAddressingMode(),
N->isTruncatingStore(),
3683 N->isCompressingStore());
3696 EVT MemoryVT =
N->getMemoryVT();
3697 Align Alignment =
N->getOriginalAlign();
3705 if (
auto *MSC = dyn_cast<MaskedScatterSDNode>(
N)) {
3706 return {MSC->getMask(), MSC->getIndex(), MSC->getScale(),
3709 auto *VPSC = cast<VPScatterSDNode>(
N);
3710 return {VPSC->getMask(), VPSC->getIndex(), VPSC->getScale(),
3715 EVT LoMemVT, HiMemVT;
3716 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
3721 GetSplitVector(Ops.Data, DataLo, DataHi);
3723 std::tie(DataLo, DataHi) = DAG.SplitVector(Ops.Data,
DL);
3727 if (OpNo == 1 && Ops.Mask.getOpcode() ==
ISD::SETCC) {
3728 SplitVecRes_SETCC(Ops.Mask.getNode(), MaskLo, MaskHi);
3730 std::tie(MaskLo, MaskHi) = SplitMask(Ops.Mask,
DL);
3734 if (getTypeAction(Ops.Index.getValueType()) ==
3736 GetSplitVector(Ops.Index, IndexLo, IndexHi);
3738 std::tie(IndexLo, IndexHi) = DAG.SplitVector(Ops.Index,
DL);
3746 if (
auto *MSC = dyn_cast<MaskedScatterSDNode>(
N)) {
3747 SDValue OpsLo[] = {Ch, DataLo, MaskLo,
Ptr, IndexLo, Ops.Scale};
3749 DAG.getMaskedScatter(DAG.getVTList(MVT::Other), LoMemVT,
DL, OpsLo, MMO,
3750 MSC->getIndexType(), MSC->isTruncatingStore());
3755 SDValue OpsHi[] = {
Lo, DataHi, MaskHi,
Ptr, IndexHi, Ops.Scale};
3756 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), HiMemVT,
DL, OpsHi,
3757 MMO, MSC->getIndexType(),
3758 MSC->isTruncatingStore());
3760 auto *VPSC = cast<VPScatterSDNode>(
N);
3762 std::tie(EVLLo, EVLHi) =
3763 DAG.SplitEVL(VPSC->getVectorLength(), Ops.Data.getValueType(),
DL);
3765 SDValue OpsLo[] = {Ch, DataLo,
Ptr, IndexLo, Ops.Scale, MaskLo, EVLLo};
3766 Lo = DAG.getScatterVP(DAG.getVTList(MVT::Other), LoMemVT,
DL, OpsLo, MMO,
3767 VPSC->getIndexType());
3772 SDValue OpsHi[] = {
Lo, DataHi,
Ptr, IndexHi, Ops.Scale, MaskHi, EVLHi};
3773 return DAG.getScatterVP(DAG.getVTList(MVT::Other), HiMemVT,
DL, OpsHi, MMO,
3774 VPSC->getIndexType());
3778 assert(
N->isUnindexed() &&
"Indexed store of vector?");
3779 assert(OpNo == 1 &&
"Can only split the stored value");
3782 bool isTruncating =
N->isTruncatingStore();
3785 EVT MemoryVT =
N->getMemoryVT();
3786 Align Alignment =
N->getOriginalAlign();
3790 GetSplitVector(
N->getOperand(1),
Lo,
Hi);
3792 EVT LoMemVT, HiMemVT;
3793 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
3800 Lo = DAG.getTruncStore(Ch,
DL,
Lo,
Ptr,
N->getPointerInfo(), LoMemVT,
3801 Alignment, MMOFlags, AAInfo);
3803 Lo = DAG.getStore(Ch,
DL,
Lo,
Ptr,
N->getPointerInfo(), Alignment, MMOFlags,
3807 IncrementPointer(
N, LoMemVT, MPI,
Ptr);
3810 Hi = DAG.getTruncStore(Ch,
DL,
Hi,
Ptr, MPI,
3811 HiMemVT, Alignment, MMOFlags, AAInfo);
3813 Hi = DAG.getStore(Ch,
DL,
Hi,
Ptr, MPI, Alignment, MMOFlags, AAInfo);
3827 EVT EltVT =
N->getValueType(0).getVectorElementType();
3829 for (
unsigned i = 0, e =
Op.getValueType().getVectorNumElements();
3832 DAG.getVectorIdxConstant(i,
DL)));
3836 return DAG.getBuildVector(
N->getValueType(0),
DL, Elts);
3857 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
3858 SDValue InVec =
N->getOperand(OpNo);
3860 EVT OutVT =
N->getValueType(0);
3868 EVT LoOutVT, HiOutVT;
3869 std::tie(LoOutVT, HiOutVT) = DAG.GetSplitDestVTs(OutVT);
3870 assert(LoOutVT == HiOutVT &&
"Unequal split?");
3875 if (isTypeLegal(LoOutVT) ||
3876 InElementSize <= OutElementSize * 2)
3877 return SplitVecOp_UnaryOp(
N);
3886 return SplitVecOp_UnaryOp(
N);
3890 GetSplitVector(InVec, InLoVec, InHiVec);
3896 EVT HalfElementVT = IsFloat ?
3898 EVT::getIntegerVT(*DAG.getContext(), InElementSize/2);
3905 if (
N->isStrictFPOpcode()) {
3906 HalfLo = DAG.
getNode(
N->getOpcode(),
DL, {HalfVT, MVT::Other},
3907 {N->getOperand(0), InLoVec});
3908 HalfHi = DAG.
getNode(
N->getOpcode(),
DL, {HalfVT, MVT::Other},
3909 {N->getOperand(0), InHiVec});
3915 HalfLo = DAG.
getNode(
N->getOpcode(),
DL, HalfVT, InLoVec);
3916 HalfHi = DAG.
getNode(
N->getOpcode(),
DL, HalfVT, InHiVec);
3928 if (
N->isStrictFPOpcode()) {
3932 DAG.getTargetConstant(0,
DL, TLI.
getPointerTy(DAG.getDataLayout()))});
3940 DAG.getTargetConstant(
3946 assert(
N->getValueType(0).isVector() &&
3947 N->getOperand(0).getValueType().isVector() &&
3948 "Operand types must be vectors");
3950 SDValue Lo0, Hi0, Lo1, Hi1, LoRes, HiRes;
3952 GetSplitVector(
N->getOperand(0), Lo0, Hi0);
3953 GetSplitVector(
N->getOperand(1), Lo1, Hi1);
3964 assert(
N->getOpcode() == ISD::VP_SETCC &&
"Expected VP_SETCC opcode");
3965 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
3966 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(3));
3967 std::tie(EVLLo, EVLHi) =
3968 DAG.SplitEVL(
N->getOperand(4),
N->getValueType(0),
DL);
3969 LoRes = DAG.
getNode(ISD::VP_SETCC,
DL, PartResVT, Lo0, Lo1,
3970 N->getOperand(2), MaskLo, EVLLo);
3971 HiRes = DAG.
getNode(ISD::VP_SETCC,
DL, PartResVT, Hi0, Hi1,
3972 N->getOperand(2), MaskHi, EVLHi);
3976 EVT OpVT =
N->getOperand(0).getValueType();
3979 return DAG.getNode(ExtendCode,
DL,
N->getValueType(0), Con);
3985 EVT ResVT =
N->getValueType(0);
3988 GetSplitVector(
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0),
Lo,
Hi);
3989 EVT InVT =
Lo.getValueType();
3994 if (
N->isStrictFPOpcode()) {
3995 Lo = DAG.getNode(
N->getOpcode(),
DL, { OutVT, MVT::Other },
3996 { N->getOperand(0), Lo, N->getOperand(2) });
3997 Hi = DAG.getNode(
N->getOpcode(),
DL, { OutVT, MVT::Other },
3998 { N->getOperand(0), Hi, N->getOperand(2) });
4002 Lo.getValue(1),
Hi.getValue(1));
4003 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4004 }
else if (
N->getOpcode() == ISD::VP_FP_ROUND) {
4005 SDValue MaskLo, MaskHi, EVLLo, EVLHi;
4006 std::tie(MaskLo, MaskHi) = SplitMask(
N->getOperand(1));
4007 std::tie(EVLLo, EVLHi) =
4008 DAG.SplitEVL(
N->getOperand(2),
N->getValueType(0),
DL);
4009 Lo = DAG.getNode(ISD::VP_FP_ROUND,
DL, OutVT,
Lo, MaskLo, EVLLo);
4010 Hi = DAG.getNode(ISD::VP_FP_ROUND,
DL, OutVT,
Hi, MaskHi, EVLHi);
4024SDValue DAGTypeLegalizer::SplitVecOp_FPOpDifferentTypes(
SDNode *
N) {
4027 EVT LHSLoVT, LHSHiVT;
4028 std::tie(LHSLoVT, LHSHiVT) = DAG.GetSplitDestVTs(
N->getValueType(0));
4030 if (!isTypeLegal(LHSLoVT) || !isTypeLegal(LHSHiVT))
4031 return DAG.UnrollVectorOp(
N,
N->getValueType(0).getVectorNumElements());
4034 std::tie(LHSLo, LHSHi) =
4035 DAG.SplitVector(
N->getOperand(0),
DL, LHSLoVT, LHSHiVT);
4038 std::tie(RHSLo, RHSHi) = DAG.SplitVector(
N->getOperand(1),
DL);
4040 SDValue Lo = DAG.getNode(
N->getOpcode(),
DL, LHSLoVT, LHSLo, RHSLo);
4041 SDValue Hi = DAG.getNode(
N->getOpcode(),
DL, LHSHiVT, LHSHi, RHSHi);
4047 EVT ResVT =
N->getValueType(0);
4050 GetSplitVector(
N->getOperand(0),
Lo,
Hi);
4051 EVT InVT =
Lo.getValueType();
4057 Lo = DAG.getNode(
N->getOpcode(), dl, NewResVT,
Lo,
N->getOperand(1));
4058 Hi = DAG.getNode(
N->getOpcode(), dl, NewResVT,
Hi,
N->getOperand(1));
4065 EVT ResVT =
N->getValueType(0);
4069 GetSplitVector(VecOp,
Lo,
Hi);
4071 auto [MaskLo, MaskHi] = SplitMask(
N->getOperand(1));
4072 auto [EVLLo, EVLHi] =
4074 SDValue VLo = DAG.getZExtOrTrunc(EVLLo,
DL, ResVT);
4080 DAG.getSetCC(
DL, getSetCCResultType(ResVT), ResLo, VLo,
ISD::SETNE);
4082 return DAG.getSelect(
DL, ResVT, ResLoNotEVL, ResLo,
4083 DAG.getNode(
ISD::ADD,
DL, ResVT, VLo, ResHi));
4090void DAGTypeLegalizer::WidenVectorResult(
SDNode *
N,
unsigned ResNo) {
4091 LLVM_DEBUG(
dbgs() <<
"Widen node result " << ResNo <<
": ";
N->dump(&DAG));
4094 if (CustomWidenLowerNode(
N,
N->getValueType(ResNo)))
4099 auto unrollExpandedOp = [&]() {
4104 EVT VT =
N->getValueType(0);
4114 switch (
N->getOpcode()) {
4117 dbgs() <<
"WidenVectorResult #" << ResNo <<
": ";
4129 Res = WidenVecRes_INSERT_SUBVECTOR(
N);
4133 case ISD::LOAD: Res = WidenVecRes_LOAD(
N);
break;
4137 Res = WidenVecRes_ScalarOp(
N);
4142 case ISD::VP_SELECT:
4144 Res = WidenVecRes_Select(
N);
4148 case ISD::SETCC: Res = WidenVecRes_SETCC(
N);
break;
4149 case ISD::UNDEF: Res = WidenVecRes_UNDEF(
N);
break;
4151 Res = WidenVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(
N));
4154 Res = WidenVecRes_VP_LOAD(cast<VPLoadSDNode>(
N));
4156 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
4157 Res = WidenVecRes_VP_STRIDED_LOAD(cast<VPStridedLoadSDNode>(
N));
4160 Res = WidenVecRes_MLOAD(cast<MaskedLoadSDNode>(
N));
4163 Res = WidenVecRes_MGATHER(cast<MaskedGatherSDNode>(
N));
4165 case ISD::VP_GATHER:
4166 Res = WidenVecRes_VP_GATHER(cast<VPGatherSDNode>(
N));
4169 Res = WidenVecRes_VECTOR_REVERSE(
N);
4177 case ISD::OR:
case ISD::VP_OR:
4186 case ISD::VP_FMINIMUM:
4188 case ISD::VP_FMAXIMUM:
4219 case ISD::VP_FCOPYSIGN:
4220 Res = WidenVecRes_Binary(
N);
4225 if (unrollExpandedOp())
4240 Res = WidenVecRes_BinaryCanTrap(
N);
4249 Res = WidenVecRes_BinaryWithExtraScalarOp(
N);
4252#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
4253 case ISD::STRICT_##DAGN:
4254#include "llvm/IR/ConstrainedOps.def"
4255 Res = WidenVecRes_StrictFP(
N);
4264 Res = WidenVecRes_OverflowOp(
N, ResNo);
4268 Res = WidenVecRes_FCOPYSIGN(
N);
4273 Res = WidenVecRes_UnarySameEltsWithScalarArg(
N);
4278 if (!unrollExpandedOp())
4279 Res = WidenVecRes_ExpOp(
N);
4285 Res = WidenVecRes_EXTEND_VECTOR_INREG(
N);
4290 case ISD::VP_FP_EXTEND:
4292 case ISD::VP_FP_ROUND:
4294 case ISD::VP_FP_TO_SINT:
4296 case ISD::VP_FP_TO_UINT:
4298 case ISD::VP_SIGN_EXTEND:
4300 case ISD::VP_SINT_TO_FP:
4301 case ISD::VP_TRUNCATE:
4304 case ISD::VP_UINT_TO_FP:
4306 case ISD::VP_ZERO_EXTEND:
4307 Res = WidenVecRes_Convert(
N);
4312 Res = WidenVecRes_FP_TO_XINT_SAT(
N);
4318 case ISD::VP_LLRINT:
4319 Res = WidenVecRes_XRINT(
N);
4339 if (unrollExpandedOp())
4349 case ISD::VP_BITREVERSE:
4355 case ISD::VP_CTLZ_ZERO_UNDEF:
4361 case ISD::VP_CTTZ_ZERO_UNDEF:
4366 case ISD::VP_FFLOOR:
4368 case ISD::VP_FNEARBYINT:
4369 case ISD::VP_FROUND:
4370 case ISD::VP_FROUNDEVEN:
4371 case ISD::VP_FROUNDTOZERO:
4375 Res = WidenVecRes_Unary(
N);
4382 Res = WidenVecRes_Ternary(
N);
4388 SetWidenedVector(
SDValue(
N, ResNo), Res);
4395 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4396 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4397 SDValue InOp3 = GetWidenedVector(
N->getOperand(2));
4398 if (
N->getNumOperands() == 3)
4399 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3);
4401 assert(
N->getNumOperands() == 5 &&
"Unexpected number of operands!");
4402 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4406 return DAG.getNode(
N->getOpcode(), dl, WidenVT,
4407 {InOp1, InOp2, InOp3, Mask, N->getOperand(4)});
4414 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4415 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4416 if (
N->getNumOperands() == 2)
4417 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2,
4420 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands!");
4421 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4425 return DAG.getNode(
N->getOpcode(), dl, WidenVT,
4426 {InOp1, InOp2, Mask, N->getOperand(3)},
N->getFlags());
4429SDValue DAGTypeLegalizer::WidenVecRes_BinaryWithExtraScalarOp(
SDNode *
N) {
4433 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4434 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4436 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3,
4445 unsigned ConcatEnd,
EVT VT,
EVT MaxVT,
4448 if (ConcatEnd == 1) {
4449 VT = ConcatOps[0].getValueType();
4451 return ConcatOps[0];
4454 SDLoc dl(ConcatOps[0]);
4461 while (ConcatOps[ConcatEnd-1].getValueType() != MaxVT) {
4462 int Idx = ConcatEnd - 1;
4463 VT = ConcatOps[
Idx--].getValueType();
4464 while (
Idx >= 0 && ConcatOps[
Idx].getValueType() == VT)
4477 unsigned NumToInsert = ConcatEnd -
Idx - 1;
4478 for (
unsigned i = 0, OpIdx =
Idx+1; i < NumToInsert; i++, OpIdx++) {
4482 ConcatOps[
Idx+1] = VecOp;
4483 ConcatEnd =
Idx + 2;
4489 unsigned RealVals = ConcatEnd -
Idx - 1;
4490 unsigned SubConcatEnd = 0;
4491 unsigned SubConcatIdx =
Idx + 1;
4492 while (SubConcatEnd < RealVals)
4493 SubConcatOps[SubConcatEnd++] = ConcatOps[++
Idx];
4494 while (SubConcatEnd < OpsToConcat)
4495 SubConcatOps[SubConcatEnd++] = undefVec;
4497 NextVT, SubConcatOps);
4498 ConcatEnd = SubConcatIdx + 1;
4503 if (ConcatEnd == 1) {
4504 VT = ConcatOps[0].getValueType();
4506 return ConcatOps[0];
4511 if (NumOps != ConcatEnd ) {
4513 for (
unsigned j = ConcatEnd; j < NumOps; ++j)
4514 ConcatOps[j] = UndefVal;
4522 unsigned Opcode =
N->getOpcode();
4530 NumElts = NumElts / 2;
4534 if (NumElts != 1 && !TLI.
canOpTrap(
N->getOpcode(), VT)) {
4536 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4537 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4538 return DAG.getNode(
N->getOpcode(), dl, WidenVT, InOp1, InOp2, Flags);
4550 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
4551 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
4552 unsigned CurNumElts =
N->getValueType(0).getVectorNumElements();
4555 unsigned ConcatEnd = 0;
4563 while (CurNumElts != 0) {
4564 while (CurNumElts >= NumElts) {
4566 DAG.getVectorIdxConstant(
Idx, dl));
4568 DAG.getVectorIdxConstant(
Idx, dl));
4569 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, VT, EOp1, EOp2, Flags);
4571 CurNumElts -= NumElts;
4574 NumElts = NumElts / 2;
4579 for (
unsigned i = 0; i != CurNumElts; ++i, ++
Idx) {
4581 InOp1, DAG.getVectorIdxConstant(
Idx, dl));
4583 InOp2, DAG.getVectorIdxConstant(
Idx, dl));
4584 ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT,
4595 switch (
N->getOpcode()) {
4598 return WidenVecRes_STRICT_FSETCC(
N);
4605 return WidenVecRes_Convert_StrictFP(
N);
4611 unsigned NumOpers =
N->getNumOperands();
4612 unsigned Opcode =
N->getOpcode();
4619 NumElts = NumElts / 2;
4630 unsigned CurNumElts =
N->getValueType(0).getVectorNumElements();
4634 unsigned ConcatEnd = 0;
4641 for (
unsigned i = 1; i < NumOpers; ++i) {
4647 Oper = GetWidenedVector(Oper);
4653 DAG.getUNDEF(WideOpVT), Oper,
4654 DAG.getVectorIdxConstant(0, dl));
4666 while (CurNumElts != 0) {
4667 while (CurNumElts >= NumElts) {
4670 for (
unsigned i = 0; i < NumOpers; ++i) {
4673 EVT OpVT =
Op.getValueType();
4679 DAG.getVectorIdxConstant(
Idx, dl));
4685 EVT OperVT[] = {VT, MVT::Other};
4687 ConcatOps[ConcatEnd++] = Oper;
4690 CurNumElts -= NumElts;
4693 NumElts = NumElts / 2;
4698 for (
unsigned i = 0; i != CurNumElts; ++i, ++
Idx) {
4701 for (
unsigned i = 0; i < NumOpers; ++i) {
4704 EVT OpVT =
Op.getValueType();
4708 DAG.getVectorIdxConstant(
Idx, dl));
4713 EVT WidenVT[] = {WidenEltVT, MVT::Other};
4715 ConcatOps[ConcatEnd++] = Oper;
4724 if (Chains.
size() == 1)
4725 NewChain = Chains[0];
4728 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4733SDValue DAGTypeLegalizer::WidenVecRes_OverflowOp(
SDNode *
N,
unsigned ResNo) {
4735 EVT ResVT =
N->getValueType(0);
4736 EVT OvVT =
N->getValueType(1);
4737 EVT WideResVT, WideOvVT;
4747 WideLHS = GetWidenedVector(
N->getOperand(0));
4748 WideRHS = GetWidenedVector(
N->getOperand(1));
4758 N->getOperand(0), Zero);
4761 N->getOperand(1), Zero);
4764 SDVTList WideVTs = DAG.getVTList(WideResVT, WideOvVT);
4765 SDNode *WideNode = DAG.getNode(
4766 N->getOpcode(),
DL, WideVTs, WideLHS, WideRHS).getNode();
4769 unsigned OtherNo = 1 - ResNo;
4770 EVT OtherVT =
N->getValueType(OtherNo);
4777 ReplaceValueWith(
SDValue(
N, OtherNo), OtherVal);
4780 return SDValue(WideNode, ResNo);
4793 unsigned Opcode =
N->getOpcode();
4802 InOp = ZExtPromotedInteger(InOp);
4813 InOp = GetWidenedVector(
N->getOperand(0));
4816 if (InVTEC == WidenEC) {
4817 if (
N->getNumOperands() == 1)
4818 return DAG.getNode(Opcode,
DL, WidenVT, InOp);
4819 if (
N->getNumOperands() == 3) {
4820 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4823 return DAG.getNode(Opcode,
DL, WidenVT, InOp, Mask,
N->getOperand(2));
4825 return DAG.getNode(Opcode,
DL, WidenVT, InOp,
N->getOperand(1), Flags);
4848 unsigned NumConcat =
4853 if (
N->getNumOperands() == 1)
4854 return DAG.getNode(Opcode,
DL, WidenVT, InVec);
4855 return DAG.getNode(Opcode,
DL, WidenVT, InVec,
N->getOperand(1), Flags);
4860 DAG.getVectorIdxConstant(0,
DL));
4862 if (
N->getNumOperands() == 1)
4863 return DAG.getNode(Opcode,
DL, WidenVT, InVal);
4864 return DAG.getNode(Opcode,
DL, WidenVT, InVal,
N->getOperand(1), Flags);
4873 unsigned MinElts =
N->getValueType(0).getVectorNumElements();
4874 for (
unsigned i=0; i < MinElts; ++i) {
4876 DAG.getVectorIdxConstant(i,
DL));
4877 if (
N->getNumOperands() == 1)
4878 Ops[i] = DAG.getNode(Opcode,
DL, EltVT, Val);
4880 Ops[i] = DAG.getNode(Opcode,
DL, EltVT, Val,
N->getOperand(1), Flags);
4883 return DAG.getBuildVector(WidenVT,
DL, Ops);
4892 EVT SrcVT = Src.getValueType();
4896 Src = GetWidenedVector(Src);
4897 SrcVT = Src.getValueType();
4904 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src,
N->getOperand(1));
4913 EVT SrcVT = Src.getValueType();
4917 Src = GetWidenedVector(Src);
4918 SrcVT = Src.getValueType();
4925 if (
N->getNumOperands() == 1)
4926 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src);
4928 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
4929 assert(
N->isVPOpcode() &&
"Expected VP opcode");
4933 return DAG.getNode(
N->getOpcode(), dl, WidenVT, Src, Mask,
N->getOperand(2));
4936SDValue DAGTypeLegalizer::WidenVecRes_Convert_StrictFP(
SDNode *
N) {
4947 unsigned Opcode =
N->getOpcode();
4953 std::array<EVT, 2> EltVTs = {{EltVT, MVT::Other}};
4958 unsigned MinElts =
N->getValueType(0).getVectorNumElements();
4959 for (
unsigned i=0; i < MinElts; ++i) {
4961 DAG.getVectorIdxConstant(i,
DL));
4962 Ops[i] = DAG.getNode(Opcode,
DL, EltVTs, NewOps);
4966 ReplaceValueWith(
SDValue(
N, 1), NewChain);
4968 return DAG.getBuildVector(WidenVT,
DL, Ops);
4971SDValue DAGTypeLegalizer::WidenVecRes_EXTEND_VECTOR_INREG(
SDNode *
N) {
4972 unsigned Opcode =
N->getOpcode();
4985 InOp = GetWidenedVector(InOp);
4992 return DAG.getNode(Opcode,
DL, WidenVT, InOp);
4999 for (
unsigned i = 0, e = std::min(InVTNumElts, WidenNumElts); i !=
e; ++i) {
5001 DAG.getVectorIdxConstant(i,
DL));
5018 while (Ops.
size() != WidenNumElts)
5021 return DAG.getBuildVector(WidenVT,
DL, Ops);
5027 if (
N->getOperand(0).getValueType() ==
N->getOperand(1).getValueType())
5028 return WidenVecRes_BinaryCanTrap(
N);
5038SDValue DAGTypeLegalizer::WidenVecRes_UnarySameEltsWithScalarArg(
SDNode *
N) {
5039 SDValue FpValue =
N->getOperand(0);
5043 SDValue Arg = GetWidenedVector(FpValue);
5044 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, {Arg,
N->getOperand(1)},
5050 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5052 SDValue ExpOp =
RHS.getValueType().isVector() ? GetWidenedVector(RHS) :
RHS;
5054 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, InOp, ExpOp);
5060 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5061 if (
N->getNumOperands() == 1)
5062 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT, InOp,
N->getFlags());
5064 assert(
N->getNumOperands() == 3 &&
"Unexpected number of operands!");
5065 assert(
N->isVPOpcode() &&
"Expected VP opcode");
5069 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
5070 {InOp,
Mask,
N->getOperand(2)});
5076 cast<VTSDNode>(
N->getOperand(1))->getVT()
5077 .getVectorElementType(),
5079 SDValue WidenLHS = GetWidenedVector(
N->getOperand(0));
5080 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
5081 WidenVT, WidenLHS, DAG.getValueType(ExtVT));
5084SDValue DAGTypeLegalizer::WidenVecRes_MERGE_VALUES(
SDNode *
N,
unsigned ResNo) {
5085 SDValue WidenVec = DisintegrateMERGE_VALUES(
N, ResNo);
5086 return GetWidenedVector(WidenVec);
5092 EVT VT =
N->getValueType(0);
5096 switch (getTypeAction(InVT)) {
5110 SDValue NInOp = GetPromotedInteger(InOp);
5112 if (WidenVT.
bitsEq(NInVT)) {
5115 if (DAG.getDataLayout().isBigEndian()) {
5120 DAG.getConstant(ShiftAmt, dl, ShiftAmtTy));
5139 InOp = GetWidenedVector(InOp);
5141 if (WidenVT.
bitsEq(InVT))
5151 if (WidenSize % InScalarSize == 0 && InVT != MVT::x86mmx) {
5156 unsigned NewNumParts = WidenSize / InSize;
5169 EVT OrigInVT =
N->getOperand(0).getValueType();
5182 if (WidenSize % InSize == 0) {
5189 DAG.ExtractVectorElements(InOp, Ops);
5190 Ops.
append(WidenSize / InScalarSize - Ops.
size(),
5202 return CreateStackStoreLoad(InOp, WidenVT);
5208 EVT VT =
N->getValueType(0);
5212 EVT EltVT =
N->getOperand(0).getValueType();
5219 assert(WidenNumElts >= NumElts &&
"Shrinking vector instead of widening!");
5220 NewOps.append(WidenNumElts - NumElts, DAG.getUNDEF(EltVT));
5222 return DAG.getBuildVector(WidenVT, dl, NewOps);
5226 EVT InVT =
N->getOperand(0).getValueType();
5229 unsigned NumOperands =
N->getNumOperands();
5231 bool InputWidened =
false;
5235 if (WidenNumElts % NumInElts == 0) {
5237 unsigned NumConcat = WidenNumElts / NumInElts;
5238 SDValue UndefVal = DAG.getUNDEF(InVT);
5240 for (
unsigned i=0; i < NumOperands; ++i)
5241 Ops[i] =
N->getOperand(i);
5242 for (
unsigned i = NumOperands; i != NumConcat; ++i)
5247 InputWidened =
true;
5251 for (i=1; i < NumOperands; ++i)
5252 if (!
N->getOperand(i).isUndef())
5255 if (i == NumOperands)
5258 return GetWidenedVector(
N->getOperand(0));
5260 if (NumOperands == 2) {
5262 "Cannot use vector shuffles to widen CONCAT_VECTOR result");
5268 for (
unsigned i = 0; i < NumInElts; ++i) {
5270 MaskOps[i + NumInElts] = i + WidenNumElts;
5272 return DAG.getVectorShuffle(WidenVT, dl,
5273 GetWidenedVector(
N->getOperand(0)),
5274 GetWidenedVector(
N->getOperand(1)),
5281 "Cannot use build vectors to widen CONCAT_VECTOR result");
5289 for (
unsigned i=0; i < NumOperands; ++i) {
5292 InOp = GetWidenedVector(InOp);
5293 for (
unsigned j = 0;
j < NumInElts; ++
j)
5295 DAG.getVectorIdxConstant(j, dl));
5297 SDValue UndefVal = DAG.getUNDEF(EltVT);
5298 for (;
Idx < WidenNumElts; ++
Idx)
5299 Ops[
Idx] = UndefVal;
5300 return DAG.getBuildVector(WidenVT, dl, Ops);
5303SDValue DAGTypeLegalizer::WidenVecRes_INSERT_SUBVECTOR(
SDNode *
N) {
5304 EVT VT =
N->getValueType(0);
5306 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
5313SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(
SDNode *
N) {
5314 EVT VT =
N->getValueType(0);
5321 auto InOpTypeAction = getTypeAction(InOp.
getValueType());
5323 InOp = GetWidenedVector(InOp);
5329 if (IdxVal == 0 && InVT == WidenVT)
5336 assert(IdxVal % VTNumElts == 0 &&
5337 "Expected Idx to be a multiple of subvector minimum vector length");
5338 if (IdxVal % WidenNumElts == 0 && IdxVal + WidenNumElts < InNumElts)
5351 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
5352 assert((IdxVal % GCD) == 0 &&
"Expected Idx to be a multiple of the broken "
5353 "down type's element count");
5360 for (;
I < VTNumElts / GCD; ++
I)
5363 DAG.getVectorIdxConstant(IdxVal +
I * GCD, dl)));
5364 for (;
I < WidenNumElts / GCD; ++
I)
5371 "EXTRACT_SUBVECTOR for scalable vectors");
5378 for (i = 0; i < VTNumElts; ++i)
5380 DAG.getVectorIdxConstant(IdxVal + i, dl));
5382 SDValue UndefVal = DAG.getUNDEF(EltVT);
5383 for (; i < WidenNumElts; ++i)
5385 return DAG.getBuildVector(WidenVT, dl, Ops);
5396SDValue DAGTypeLegalizer::WidenVecRes_INSERT_VECTOR_ELT(
SDNode *
N) {
5397 SDValue InOp = GetWidenedVector(
N->getOperand(0));
5400 N->getOperand(1),
N->getOperand(2));
5413 if (!
LD->getMemoryVT().isByteSized()) {
5417 ReplaceValueWith(
SDValue(LD, 1), NewChain);
5426 EVT LdVT =
LD->getMemoryVT();
5437 const auto *MMO =
LD->getMemOperand();
5439 DAG.getLoadVP(WideVT,
DL,
LD->getChain(),
LD->getBasePtr(), Mask, EVL,
5453 Result = GenWidenVectorExtLoads(LdChain, LD, ExtType);
5455 Result = GenWidenVectorLoads(LdChain, LD);
5462 if (LdChain.
size() == 1)
5463 NewChain = LdChain[0];
5469 ReplaceValueWith(
SDValue(
N, 1), NewChain);
5480 SDValue EVL =
N->getVectorLength();
5487 "Unable to widen binary VP op");
5488 Mask = GetWidenedVector(Mask);
5489 assert(
Mask.getValueType().getVectorElementCount() ==
5492 "Unable to widen vector load");
5495 DAG.getLoadVP(
N->getAddressingMode(), ExtType, WidenVT, dl,
N->getChain(),
5496 N->getBasePtr(),
N->getOffset(), Mask, EVL,
5497 N->getMemoryVT(),
N->getMemOperand(),
N->isExpandingLoad());
5511 "Unable to widen VP strided load");
5512 Mask = GetWidenedVector(Mask);
5515 assert(
Mask.getValueType().getVectorElementCount() ==
5517 "Data and mask vectors should have the same number of elements");
5519 SDValue Res = DAG.getStridedLoadVP(
5520 N->getAddressingMode(),
N->getExtensionType(), WidenVT,
DL,
N->getChain(),
5521 N->getBasePtr(),
N->getOffset(),
N->getStride(), Mask,
5522 N->getVectorLength(),
N->getMemoryVT(),
N->getMemOperand(),
5523 N->isExpandingLoad());
5535 EVT MaskVT =
Mask.getValueType();
5536 SDValue PassThru = GetWidenedVector(
N->getPassThru());
5544 Mask = ModifyToType(Mask, WideMaskVT,
true);
5546 SDValue Res = DAG.getMaskedLoad(
5547 WidenVT, dl,
N->getChain(),
N->getBasePtr(),
N->getOffset(), Mask,
5548 PassThru,
N->getMemoryVT(),
N->getMemOperand(),
N->getAddressingMode(),
5549 ExtType,
N->isExpandingLoad());
5560 EVT MaskVT =
Mask.getValueType();
5561 SDValue PassThru = GetWidenedVector(
N->getPassThru());
5570 Mask = ModifyToType(Mask, WideMaskVT,
true);
5575 Index.getValueType().getScalarType(),
5583 N->getMemoryVT().getScalarType(), NumElts);
5584 SDValue Res = DAG.getMaskedGather(DAG.getVTList(WideVT, MVT::Other),
5585 WideMemVT, dl, Ops,
N->getMemOperand(),
5586 N->getIndexType(),
N->getExtensionType());
5603 N->getMemoryVT().getScalarType(), WideEC);
5604 Mask = GetWidenedMask(Mask, WideEC);
5607 Mask,
N->getVectorLength()};
5608 SDValue Res = DAG.getGatherVP(DAG.getVTList(WideVT, MVT::Other), WideMemVT,
5609 dl, Ops,
N->getMemOperand(),
N->getIndexType());
5619 return DAG.getNode(
N->getOpcode(),
SDLoc(
N), WidenVT,
N->getOperand(0));
5647 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
5648 return N->getOperand(OpNo).getValueType();
5656 N =
N.getOperand(0);
5658 for (
unsigned i = 1; i <
N->getNumOperands(); ++i)
5659 if (!
N->getOperand(i)->isUndef())
5661 N =
N.getOperand(0);
5665 N =
N.getOperand(0);
5667 N =
N.getOperand(0);
5694 { MaskVT, MVT::Other }, Ops);
5695 ReplaceValueWith(InMask.
getValue(1),
Mask.getValue(1));
5705 if (MaskScalarBits < ToMaskScalBits) {
5709 }
else if (MaskScalarBits > ToMaskScalBits) {
5715 assert(
Mask->getValueType(0).getScalarSizeInBits() ==
5717 "Mask should have the right element size by now.");
5720 unsigned CurrMaskNumEls =
Mask->getValueType(0).getVectorNumElements();
5722 SDValue ZeroIdx = DAG.getVectorIdxConstant(0,
SDLoc(Mask));
5727 EVT SubVT =
Mask->getValueType(0);
5733 assert((
Mask->getValueType(0) == ToMaskVT) &&
5734 "A mask of ToMaskVT should have been produced by now.");
5755 EVT CondVT =
Cond->getValueType(0);
5759 EVT VSelVT =
N->getValueType(0);
5771 EVT FinalVT = VSelVT;
5783 EVT SetCCResVT = getSetCCResultType(SetCCOpVT);
5801 EVT ToMaskVT = VSelVT;
5808 Mask = convertMask(
Cond, MaskVT, ToMaskVT);
5824 if (ScalarBits0 != ScalarBits1) {
5825 EVT NarrowVT = ((ScalarBits0 < ScalarBits1) ? VT0 : VT1);
5826 EVT WideVT = ((NarrowVT == VT0) ? VT1 : VT0);
5838 SETCC0 = convertMask(SETCC0, VT0, MaskVT);
5839 SETCC1 = convertMask(SETCC1, VT1, MaskVT);
5843 Mask = convertMask(
Cond, MaskVT, ToMaskVT);
5856 unsigned Opcode =
N->getOpcode();
5858 if (
SDValue WideCond = WidenVSELECTMask(
N)) {
5859 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
5860 SDValue InOp2 = GetWidenedVector(
N->getOperand(2));
5862 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, WideCond, InOp1, InOp2);
5868 Cond1 = GetWidenedVector(Cond1);
5876 SDValue SplitSelect = SplitVecOp_VSELECT(
N, 0);
5877 SDValue Res = ModifyToType(SplitSelect, WidenVT);
5882 Cond1 = ModifyToType(Cond1, CondWidenVT);
5885 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
5886 SDValue InOp2 = GetWidenedVector(
N->getOperand(2));
5888 if (Opcode == ISD::VP_SELECT || Opcode == ISD::VP_MERGE)
5889 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, Cond1, InOp1, InOp2,
5891 return DAG.getNode(Opcode,
SDLoc(
N), WidenVT, Cond1, InOp1, InOp2);
5895 SDValue InOp1 = GetWidenedVector(
N->getOperand(2));
5896 SDValue InOp2 = GetWidenedVector(
N->getOperand(3));
5899 N->getOperand(1), InOp1, InOp2,
N->getOperand(4));
5904 return DAG.getUNDEF(WidenVT);
5908 EVT VT =
N->getValueType(0);
5915 SDValue InOp1 = GetWidenedVector(
N->getOperand(0));
5916 SDValue InOp2 = GetWidenedVector(
N->getOperand(1));
5920 for (
unsigned i = 0; i != NumElts; ++i) {
5921 int Idx =
N->getMaskElt(i);
5922 if (
Idx < (
int)NumElts)
5927 for (
unsigned i = NumElts; i != WidenNumElts; ++i)
5929 return DAG.getVectorShuffle(WidenVT, dl, InOp1, InOp2, NewMask);
5933 EVT VT =
N->getValueType(0);
5938 SDValue OpValue = GetWidenedVector(
N->getOperand(0));
5944 unsigned IdxVal = WidenNumElts - VTNumElts;
5957 unsigned GCD = std::gcd(VTNumElts, WidenNumElts);
5960 assert((IdxVal % GCD) == 0 &&
"Expected Idx to be a multiple of the broken "
5961 "down type's element count");
5964 for (; i < VTNumElts / GCD; ++i)
5967 DAG.getVectorIdxConstant(IdxVal + i * GCD, dl)));
5968 for (; i < WidenNumElts / GCD; ++i)
5977 for (
unsigned i = 0; i != VTNumElts; ++i) {
5978 Mask.push_back(IdxVal + i);
5980 for (
unsigned i = VTNumElts; i != WidenNumElts; ++i)
5983 return DAG.getVectorShuffle(WidenVT, dl, ReverseVal, DAG.getUNDEF(WidenVT),
5988 assert(
N->getValueType(0).isVector() &&
5989 N->getOperand(0).getValueType().isVector() &&
5990 "Operands must be vectors");
6004 SDValue SplitVSetCC = SplitVecOp_VSETCC(
N);
6005 SDValue Res = ModifyToType(SplitVSetCC, WidenVT);
6012 InOp1 = GetWidenedVector(InOp1);
6013 InOp2 = GetWidenedVector(InOp2);
6015 InOp1 = DAG.WidenVector(InOp1,
SDLoc(
N));
6016 InOp2 = DAG.WidenVector(InOp2,
SDLoc(
N));
6023 "Input not widened to expected type!");
6025 if (
N->getOpcode() == ISD::VP_SETCC) {
6028 return DAG.getNode(ISD::VP_SETCC,
SDLoc(
N), WidenVT, InOp1, InOp2,
6029 N->getOperand(2), Mask,
N->getOperand(4));
6036 assert(
N->getValueType(0).isVector() &&
6037 N->getOperand(1).getValueType().isVector() &&
6038 "Operands must be vectors");
6039 EVT VT =
N->getValueType(0);
6050 EVT TmpEltVT =
LHS.getValueType().getVectorElementType();
6055 for (
unsigned i = 0; i != NumElts; ++i) {
6057 DAG.getVectorIdxConstant(i, dl));
6059 DAG.getVectorIdxConstant(i, dl));
6061 Scalars[i] = DAG.getNode(
N->getOpcode(), dl, {MVT::i1, MVT::Other},
6062 {Chain, LHSElem, RHSElem, CC});
6063 Chains[i] = Scalars[i].getValue(1);
6064 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
6065 DAG.getBoolConstant(
true, dl, EltVT, VT),
6066 DAG.getBoolConstant(
false, dl, EltVT, VT));
6070 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6072 return DAG.getBuildVector(WidenVT, dl, Scalars);
6078bool DAGTypeLegalizer::WidenVectorOperand(
SDNode *
N,
unsigned OpNo) {
6079 LLVM_DEBUG(
dbgs() <<
"Widen node operand " << OpNo <<
": ";
N->dump(&DAG));
6083 if (CustomLowerNode(
N,
N->getOperand(OpNo).getValueType(),
false))
6086 switch (
N->getOpcode()) {
6089 dbgs() <<
"WidenVectorOperand op #" << OpNo <<
": ";
6100 case ISD::STORE: Res = WidenVecOp_STORE(
N);
break;
6101 case ISD::VP_STORE: Res = WidenVecOp_VP_STORE(
N, OpNo);
break;
6102 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
6103 Res = WidenVecOp_VP_STRIDED_STORE(
N, OpNo);
6108 Res = WidenVecOp_EXTEND_VECTOR_INREG(
N);
6110 case ISD::MSTORE: Res = WidenVecOp_MSTORE(
N, OpNo);
break;
6111 case ISD::MGATHER: Res = WidenVecOp_MGATHER(
N, OpNo);
break;
6113 case ISD::VP_SCATTER: Res = WidenVecOp_VP_SCATTER(
N, OpNo);
break;
6114 case ISD::SETCC: Res = WidenVecOp_SETCC(
N);
break;
6122 Res = WidenVecOp_UnrollVectorOp(
N);
6129 Res = WidenVecOp_EXTEND(
N);
6145 Res = WidenVecOp_Convert(
N);
6150 Res = WidenVecOp_FP_TO_XINT_SAT(
N);
6168 Res = WidenVecOp_VECREDUCE(
N);
6172 Res = WidenVecOp_VECREDUCE_SEQ(
N);
6174 case ISD::VP_REDUCE_FADD:
6175 case ISD::VP_REDUCE_SEQ_FADD:
6176 case ISD::VP_REDUCE_FMUL:
6177 case ISD::VP_REDUCE_SEQ_FMUL:
6178 case ISD::VP_REDUCE_ADD:
6179 case ISD::VP_REDUCE_MUL:
6180 case ISD::VP_REDUCE_AND:
6181 case ISD::VP_REDUCE_OR:
6182 case ISD::VP_REDUCE_XOR:
6183 case ISD::VP_REDUCE_SMAX:
6184 case ISD::VP_REDUCE_SMIN:
6185 case ISD::VP_REDUCE_UMAX:
6186 case ISD::VP_REDUCE_UMIN:
6187 case ISD::VP_REDUCE_FMAX:
6188 case ISD::VP_REDUCE_FMIN:
6189 Res = WidenVecOp_VP_REDUCE(
N);
6191 case ISD::VP_CTTZ_ELTS:
6192 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
6193 Res = WidenVecOp_VP_CttzElements(
N);
6198 if (!Res.
getNode())
return false;
6206 if (
N->isStrictFPOpcode())
6208 "Invalid operand expansion");
6211 "Invalid operand expansion");
6213 ReplaceValueWith(
SDValue(
N, 0), Res);
6219 EVT VT =
N->getValueType(0);
6224 "Unexpected type action");
6225 InOp = GetWidenedVector(InOp);
6228 "Input wasn't widened!");
6239 FixedEltVT == InEltVT) {
6241 "Not enough elements in the fixed type for the operand!");
6243 "We can't have the same type as we started with!");
6246 DAG.getUNDEF(FixedVT), InOp,
6247 DAG.getVectorIdxConstant(0,
DL));
6250 DAG.getVectorIdxConstant(0,
DL));
6259 return WidenVecOp_Convert(
N);
6264 switch (
N->getOpcode()) {
6280 return DAG.UnrollVectorOp(
N);
6285 EVT ResultVT =
N->getValueType(0);
6287 SDValue WideArg = GetWidenedVector(
N->getOperand(0));
6296 {WideArg,
Test},
N->getFlags());
6303 DAG.getVectorIdxConstant(0,
DL));
6305 EVT OpVT =
N->getOperand(0).getValueType();
6308 return DAG.getNode(ExtendCode,
DL, ResultVT,
CC);
6313 EVT VT =
N->getValueType(0);
6316 SDValue InOp =
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0);
6319 "Unexpected type action");
6320 InOp = GetWidenedVector(InOp);
6322 unsigned Opcode =
N->getOpcode();
6328 if (TLI.
isTypeLegal(WideVT) && !
N->isStrictFPOpcode()) {
6330 if (
N->isStrictFPOpcode()) {
6332 Res = DAG.
getNode(Opcode, dl, { WideVT, MVT::Other },
6333 {
N->getOperand(0), InOp,
N->getOperand(2) });
6335 Res = DAG.
getNode(Opcode, dl, { WideVT, MVT::Other },
6336 {
N->getOperand(0), InOp });
6342 Res = DAG.
getNode(Opcode, dl, WideVT, InOp,
N->getOperand(1));
6344 Res = DAG.
getNode(Opcode, dl, WideVT, InOp);
6347 DAG.getVectorIdxConstant(0, dl));
6355 if (
N->isStrictFPOpcode()) {
6358 for (
unsigned i=0; i < NumElts; ++i) {
6360 DAG.getVectorIdxConstant(i, dl));
6361 Ops[i] = DAG.getNode(Opcode, dl, { EltVT, MVT::Other }, NewOps);
6365 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6367 for (
unsigned i = 0; i < NumElts; ++i)
6368 Ops[i] = DAG.getNode(Opcode, dl, EltVT,
6370 InOp, DAG.getVectorIdxConstant(i, dl)));
6373 return DAG.getBuildVector(VT, dl, Ops);
6377 EVT DstVT =
N->getValueType(0);
6378 SDValue Src = GetWidenedVector(
N->getOperand(0));
6379 EVT SrcVT = Src.getValueType();
6388 DAG.
getNode(
N->getOpcode(), dl, WideDstVT, Src,
N->getOperand(1));
6391 DAG.getConstant(0, dl, TLI.
getVectorIdxTy(DAG.getDataLayout())));
6395 return DAG.UnrollVectorOp(
N);
6399 EVT VT =
N->getValueType(0);
6400 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6408 if (!VT.
isVector() && VT != MVT::x86mmx &&
6415 DAG.getVectorIdxConstant(0, dl));
6429 .divideCoefficientBy(EltSize);
6434 DAG.getVectorIdxConstant(0, dl));
6439 return CreateStackStoreLoad(InOp, VT);
6443 EVT VT =
N->getValueType(0);
6445 EVT InVT =
N->getOperand(0).getValueType();
6450 unsigned NumOperands =
N->getNumOperands();
6453 for (i = 1; i < NumOperands; ++i)
6454 if (!
N->getOperand(i).isUndef())
6457 if (i == NumOperands)
6458 return GetWidenedVector(
N->getOperand(0));
6468 for (
unsigned i=0; i < NumOperands; ++i) {
6472 "Unexpected type action");
6473 InOp = GetWidenedVector(InOp);
6474 for (
unsigned j = 0;
j < NumInElts; ++
j)
6476 DAG.getVectorIdxConstant(j, dl));
6478 return DAG.getBuildVector(VT, dl, Ops);
6481SDValue DAGTypeLegalizer::WidenVecOp_INSERT_SUBVECTOR(
SDNode *
N) {
6482 EVT VT =
N->getValueType(0);
6487 SubVec = GetWidenedVector(SubVec);
6493 bool IndicesValid =
false;
6496 IndicesValid =
true;
6500 Attribute Attr = DAG.getMachineFunction().getFunction().getFnAttribute(
6501 Attribute::VScaleRange);
6506 IndicesValid =
true;
6512 if (IndicesValid && InVec.
isUndef() &&
N->getConstantOperandVal(2) == 0)
6517 "INSERT_SUBVECTOR");
6520SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_SUBVECTOR(
SDNode *
N) {
6521 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6523 N->getValueType(0), InOp,
N->getOperand(1));
6526SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_VECTOR_ELT(
SDNode *
N) {
6527 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6529 N->getValueType(0), InOp,
N->getOperand(1));
6532SDValue DAGTypeLegalizer::WidenVecOp_EXTEND_VECTOR_INREG(
SDNode *
N) {
6533 SDValue InOp = GetWidenedVector(
N->getOperand(0));
6534 return DAG.getNode(
N->getOpcode(),
SDLoc(
N),
N->getValueType(0), InOp);
6542 if (!
ST->getMemoryVT().getScalarType().isByteSized())
6545 if (
ST->isTruncatingStore())
6564 StVal = GetWidenedVector(StVal);
6568 return DAG.getStoreVP(
ST->getChain(),
DL, StVal,
ST->getBasePtr(),
6569 DAG.getUNDEF(
ST->getBasePtr().getValueType()), Mask,
6570 EVL, StVT,
ST->getMemOperand(),
6571 ST->getAddressingMode());
6575 if (GenWidenVectorStores(StChain, ST)) {
6576 if (StChain.
size() == 1)
6585SDValue DAGTypeLegalizer::WidenVecOp_VP_STORE(
SDNode *
N,
unsigned OpNo) {
6586 assert((OpNo == 1 || OpNo == 3) &&
6587 "Can widen only data or mask operand of vp_store");
6595 StVal = GetWidenedVector(StVal);
6601 "Unable to widen VP store");
6602 Mask = GetWidenedVector(Mask);
6604 Mask = GetWidenedVector(Mask);
6610 "Unable to widen VP store");
6611 StVal = GetWidenedVector(StVal);
6614 assert(
Mask.getValueType().getVectorElementCount() ==
6616 "Mask and data vectors should have the same number of elements");
6617 return DAG.getStoreVP(
ST->getChain(), dl, StVal,
ST->getBasePtr(),
6618 ST->getOffset(), Mask,
ST->getVectorLength(),
6619 ST->getMemoryVT(),
ST->getMemOperand(),
6620 ST->getAddressingMode(),
ST->isTruncatingStore(),
6621 ST->isCompressingStore());
6626 assert((OpNo == 1 || OpNo == 4) &&
6627 "Can widen only data or mask operand of vp_strided_store");
6636 "Unable to widen VP strided store");
6640 "Unable to widen VP strided store");
6642 StVal = GetWidenedVector(StVal);
6643 Mask = GetWidenedVector(Mask);
6646 Mask.getValueType().getVectorElementCount() &&
6647 "Data and mask vectors should have the same number of elements");
6649 return DAG.getStridedStoreVP(
6656SDValue DAGTypeLegalizer::WidenVecOp_MSTORE(
SDNode *
N,
unsigned OpNo) {
6657 assert((OpNo == 1 || OpNo == 4) &&
6658 "Can widen only data or mask operand of mstore");
6661 EVT MaskVT =
Mask.getValueType();
6667 StVal = GetWidenedVector(StVal);
6674 Mask = ModifyToType(Mask, WideMaskVT,
true);
6678 Mask = ModifyToType(Mask, WideMaskVT,
true);
6684 StVal = ModifyToType(StVal, WideVT);
6687 assert(
Mask.getValueType().getVectorNumElements() ==
6689 "Mask and data vectors should have the same number of elements");
6696SDValue DAGTypeLegalizer::WidenVecOp_MGATHER(
SDNode *
N,
unsigned OpNo) {
6697 assert(OpNo == 4 &&
"Can widen only the index of mgather");
6698 auto *MG = cast<MaskedGatherSDNode>(
N);
6699 SDValue DataOp = MG->getPassThru();
6701 SDValue Scale = MG->getScale();
6709 SDValue Res = DAG.getMaskedGather(MG->getVTList(), MG->getMemoryVT(), dl, Ops,
6710 MG->getMemOperand(), MG->getIndexType(),
6711 MG->getExtensionType());
6717SDValue DAGTypeLegalizer::WidenVecOp_MSCATTER(
SDNode *
N,
unsigned OpNo) {
6726 DataOp = GetWidenedVector(DataOp);
6730 EVT IndexVT =
Index.getValueType();
6736 EVT MaskVT =
Mask.getValueType();
6739 Mask = ModifyToType(Mask, WideMaskVT,
true);
6744 }
else if (OpNo == 4) {
6752 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), WideMemVT,
SDLoc(
N),
6757SDValue DAGTypeLegalizer::WidenVecOp_VP_SCATTER(
SDNode *
N,
unsigned OpNo) {
6766 DataOp = GetWidenedVector(DataOp);
6769 Mask = GetWidenedMask(Mask, WideEC);
6772 }
else if (OpNo == 3) {
6781 return DAG.getScatterVP(DAG.getVTList(MVT::Other), WideMemVT,
SDLoc(
N), Ops,
6786 SDValue InOp0 = GetWidenedVector(
N->getOperand(0));
6787 SDValue InOp1 = GetWidenedVector(
N->getOperand(1));
6789 EVT VT =
N->getValueType(0);
6804 SVT, InOp0, InOp1,
N->getOperand(2));
6811 DAG.getVectorIdxConstant(0, dl));
6813 EVT OpVT =
N->getOperand(0).getValueType();
6816 return DAG.getNode(ExtendCode, dl, VT,
CC);
6826 EVT VT =
N->getValueType(0);
6828 EVT TmpEltVT =
LHS.getValueType().getVectorElementType();
6835 for (
unsigned i = 0; i != NumElts; ++i) {
6837 DAG.getVectorIdxConstant(i, dl));
6839 DAG.getVectorIdxConstant(i, dl));
6841 Scalars[i] = DAG.getNode(
N->getOpcode(), dl, {MVT::i1, MVT::Other},
6842 {Chain, LHSElem, RHSElem, CC});
6843 Chains[i] = Scalars[i].getValue(1);
6844 Scalars[i] = DAG.getSelect(dl, EltVT, Scalars[i],
6845 DAG.getBoolConstant(
true, dl, EltVT, VT),
6846 DAG.getBoolConstant(
false, dl, EltVT, VT));
6850 ReplaceValueWith(
SDValue(
N, 1), NewChain);
6852 return DAG.getBuildVector(VT, dl, Scalars);
6857 SDValue Op = GetWidenedVector(
N->getOperand(0));
6858 EVT OrigVT =
N->getOperand(0).getValueType();
6859 EVT WideVT =
Op.getValueType();
6863 unsigned Opc =
N->getOpcode();
6865 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
6866 assert(NeutralElem &&
"Neutral element must exist");
6873 unsigned GCD = std::gcd(OrigElts, WideElts);
6876 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
6877 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx =
Idx + GCD)
6879 DAG.getVectorIdxConstant(
Idx, dl));
6880 return DAG.getNode(Opc, dl,
N->getValueType(0),
Op, Flags);
6883 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx++)
6885 DAG.getVectorIdxConstant(
Idx, dl));
6887 return DAG.getNode(Opc, dl,
N->getValueType(0),
Op, Flags);
6897 EVT WideVT =
Op.getValueType();
6901 unsigned Opc =
N->getOpcode();
6903 SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
6910 unsigned GCD = std::gcd(OrigElts, WideElts);
6913 SDValue SplatNeutral = DAG.getSplatVector(SplatVT, dl, NeutralElem);
6914 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx =
Idx + GCD)
6916 DAG.getVectorIdxConstant(
Idx, dl));
6917 return DAG.getNode(Opc, dl,
N->getValueType(0), AccOp,
Op, Flags);
6920 for (
unsigned Idx = OrigElts;
Idx < WideElts;
Idx++)
6922 DAG.getVectorIdxConstant(
Idx, dl));
6924 return DAG.getNode(Opc, dl,
N->getValueType(0), AccOp,
Op, Flags);
6928 assert(
N->isVPOpcode() &&
"Expected VP opcode");
6931 SDValue Op = GetWidenedVector(
N->getOperand(1));
6933 Op.getValueType().getVectorElementCount());
6935 return DAG.getNode(
N->getOpcode(), dl,
N->getValueType(0),
6936 {N->getOperand(0), Op, Mask, N->getOperand(3)},
6944 EVT VT =
N->getValueType(0);
6955 DAG.getVectorIdxConstant(0,
DL));
6965 return DAG.getNode(
N->getOpcode(),
DL,
N->getValueType(0),
6966 {Source, Mask, N->getOperand(2)},
N->getFlags());
6983 unsigned WidenEx = 0) {
6988 unsigned AlignInBits =
Align*8;
6991 EVT RetVT = WidenEltVT;
6992 if (!Scalable && Width == WidenEltWidth)
7006 (WidenWidth % MemVTWidth) == 0 &&
7008 (MemVTWidth <= Width ||
7009 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
7010 if (MemVTWidth == WidenWidth)
7029 (WidenWidth % MemVTWidth) == 0 &&
7031 (MemVTWidth <= Width ||
7032 (
Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
7041 return std::nullopt;
7052 unsigned Start,
unsigned End) {
7053 SDLoc dl(LdOps[Start]);
7054 EVT LdTy = LdOps[Start].getValueType();
7062 for (
unsigned i = Start + 1; i !=
End; ++i) {
7063 EVT NewLdTy = LdOps[i].getValueType();
7064 if (NewLdTy != LdTy) {
7085 EVT LdVT =
LD->getMemoryVT();
7099 TypeSize WidthDiff = WidenWidth - LdWidth;
7106 std::optional<EVT> FirstVT =
7107 findMemType(DAG, TLI, LdWidth.getKnownMinValue(), WidenVT, LdAlign,
7114 TypeSize FirstVTWidth = FirstVT->getSizeInBits();
7119 std::optional<EVT> NewVT = FirstVT;
7121 TypeSize NewVTWidth = FirstVTWidth;
7123 RemainingWidth -= NewVTWidth;
7130 NewVTWidth = NewVT->getSizeInBits();
7136 SDValue LdOp = DAG.getLoad(*FirstVT, dl, Chain, BasePtr,
LD->getPointerInfo(),
7137 LD->getOriginalAlign(), MMOFlags, AAInfo);
7141 if (MemVTs.
empty()) {
7143 if (!FirstVT->isVector()) {
7150 if (FirstVT == WidenVT)
7155 unsigned NumConcat =
7158 SDValue UndefVal = DAG.getUNDEF(*FirstVT);
7159 ConcatOps[0] = LdOp;
7160 for (
unsigned i = 1; i != NumConcat; ++i)
7161 ConcatOps[i] = UndefVal;
7173 IncrementPointer(cast<LoadSDNode>(LdOp), *FirstVT, MPI, BasePtr,
7176 for (
EVT MemVT : MemVTs) {
7177 Align NewAlign = ScaledOffset == 0
7178 ?
LD->getOriginalAlign()
7181 DAG.getLoad(MemVT, dl, Chain, BasePtr, MPI, NewAlign, MMOFlags, AAInfo);
7185 IncrementPointer(cast<LoadSDNode>(L), MemVT, MPI, BasePtr, &ScaledOffset);
7190 if (!LdOps[0].getValueType().
isVector())
7200 EVT LdTy = LdOps[i].getValueType();
7203 for (--i; i >= 0; --i) {
7204 LdTy = LdOps[i].getValueType();
7211 ConcatOps[--
Idx] = LdOps[i];
7212 for (--i; i >= 0; --i) {
7213 EVT NewLdTy = LdOps[i].getValueType();
7214 if (NewLdTy != LdTy) {
7225 WidenOps[j] = ConcatOps[
Idx+j];
7226 for (;
j != NumOps; ++
j)
7227 WidenOps[j] = DAG.getUNDEF(LdTy);
7234 ConcatOps[--
Idx] = LdOps[i];
7245 SDValue UndefVal = DAG.getUNDEF(LdTy);
7248 for (; i !=
End-
Idx; ++i)
7249 WidenOps[i] = ConcatOps[
Idx+i];
7250 for (; i != NumOps; ++i)
7251 WidenOps[i] = UndefVal;
7263 EVT LdVT =
LD->getMemoryVT();
7276 "not yet supported");
7287 DAG.getExtLoad(ExtType, dl, EltVT, Chain, BasePtr,
LD->getPointerInfo(),
7288 LdEltVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
7294 Ops[i] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, NewBasePtr,
7295 LD->getPointerInfo().getWithOffset(
Offset), LdEltVT,
7296 LD->getOriginalAlign(), MMOFlags, AAInfo);
7301 SDValue UndefVal = DAG.getUNDEF(EltVT);
7302 for (; i != WidenNumElts; ++i)
7305 return DAG.getBuildVector(WidenVT, dl, Ops);
7317 SDValue ValOp = GetWidenedVector(
ST->getValue());
7320 EVT StVT =
ST->getMemoryVT();
7328 "Mismatch between store and value types");
7342 std::optional<EVT> NewVT =
7347 TypeSize NewVTWidth = NewVT->getSizeInBits();
7350 StWidth -= NewVTWidth;
7351 MemVTs.
back().second++;
7355 for (
const auto &Pair : MemVTs) {
7356 EVT NewVT = Pair.first;
7357 unsigned Count = Pair.second;
7363 Align NewAlign = ScaledOffset == 0
7364 ?
ST->getOriginalAlign()
7367 DAG.getVectorIdxConstant(
Idx, dl));
7368 SDValue PartStore = DAG.getStore(Chain, dl, EOp, BasePtr, MPI, NewAlign,
7373 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr,
7385 DAG.getVectorIdxConstant(
Idx++, dl));
7387 DAG.getStore(Chain, dl, EOp, BasePtr, MPI,
ST->getOriginalAlign(),
7391 IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr);
7405 bool FillWithZeroes) {
7410 "input and widen element type must match");
7412 "cannot modify scalable vectors in this way");
7424 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, InVT) :
7427 for (
unsigned i = 1; i != NumConcat; ++i)
7435 DAG.getVectorIdxConstant(0, dl));
7438 "Scalable vectors should have been handled already.");
7446 unsigned MinNumElts = std::min(WidenNumElts, InNumElts);
7450 DAG.getVectorIdxConstant(
Idx, dl));
7452 SDValue UndefVal = DAG.getUNDEF(EltVT);
7453 for (;
Idx < WidenNumElts; ++
Idx)
7454 Ops[
Idx] = UndefVal;
7456 SDValue Widened = DAG.getBuildVector(NVT, dl, Ops);
7457 if (!FillWithZeroes)
7461 "We expect to never want to FillWithZeroes for non-integral types.");
7464 MaskOps.append(MinNumElts, DAG.getAllOnesConstant(dl, EltVT));
7465 MaskOps.append(WidenNumElts - MinNumElts, DAG.getConstant(0, dl, EltVT));
7467 return DAG.getNode(
ISD::AND, dl, NVT, Widened,
7468 DAG.getBuildVector(NVT, dl,
MaskOps));
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu AMDGPU Register Bank Select
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static bool isUndef(ArrayRef< int > Mask)
static SDValue BuildVectorFromScalar(SelectionDAG &DAG, EVT VecTy, SmallVectorImpl< SDValue > &LdOps, unsigned Start, unsigned End)
static EVT getSETCCOperandType(SDValue N)
static bool isSETCCOp(unsigned Opcode)
static bool isLogicalMaskOp(unsigned Opcode)
static bool isSETCCorConvertedSETCC(SDValue N)
static SDValue CollectOpsToWiden(SelectionDAG &DAG, const TargetLowering &TLI, SmallVectorImpl< SDValue > &ConcatOps, unsigned ConcatEnd, EVT VT, EVT MaxVT, EVT WidenVT)
static std::optional< EVT > findMemType(SelectionDAG &DAG, const TargetLowering &TLI, unsigned Width, EVT WidenVT, unsigned Align=0, unsigned WidenEx=0)
mir Rename Register Operands
This file provides utility analysis objects describing memory locations.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements the SmallBitVector class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Class for arbitrary precision integers.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
This class represents an Operation in the Expression.
static constexpr ElementCount getScalable(ScalarTy MinVal)
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
static auto integer_valuetypes()
static auto vector_valuetypes()
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
This class implements a map that also provides access to all stored values in a deterministic order.
This class is used to represent an MGATHER node.
const SDValue & getIndex() const
const SDValue & getScale() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
This class is used to represent an MLOAD node.
const SDValue & getBasePtr() const
bool isExpandingLoad() const
ISD::LoadExtType getExtensionType() const
const SDValue & getMask() const
const SDValue & getPassThru() const
const SDValue & getOffset() const
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent an MSCATTER node.
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if the op does a truncation before store.
This class is used to represent an MSTORE node.
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
const SDValue & getOffset() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
This is an abstract virtual class for memory operations.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
Align getOriginalAlign() const
Returns alignment and volatility of the memory access.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isStrictFPOpcode()
Test if this node is a strict floating point pseudo-op.
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
std::pair< SDValue, SDValue > SplitVectorOperand(const SDNode *N, unsigned OpNo)
Split the node's operand with EXTRACT_SUBVECTOR and return the low/high part.
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
std::pair< EVT, EVT > GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, bool *HiIsEmpty) const
Compute the VTs needed for the low/hi parts of a type, dependent on an enveloping VT that has been sp...
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
Vector takeVector()
Clear the SetVector and return the underlying vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
@ TypeScalarizeScalableVector
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
Returns the type for the shift amount of a shift opcode.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
BooleanContent
Enum that describes how the target represents true/false values.
@ ZeroOrOneBooleanContent
@ UndefinedBooleanContent
@ ZeroOrNegativeOneBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::VECTOR_SPLICE.
SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, EVT SubVecVT, SDValue Index) const
Get a pointer to a sub-vector of type SubVecVT at index Idx located in memory for a vector of type Ve...
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored.
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
This class is used to represent an VP_GATHER node.
const SDValue & getScale() const
ISD::MemIndexType getIndexType() const
How is Index applied to BasePtr when computing addresses.
const SDValue & getVectorLength() const
const SDValue & getIndex() const
const SDValue & getBasePtr() const
const SDValue & getMask() const
This class is used to represent a VP_LOAD node.
This class is used to represent an VP_SCATTER node.
const SDValue & getValue() const
This class is used to represent a VP_STORE node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
const SDValue & getMask() const
ISD::LoadExtType getExtensionType() const
bool isExpandingLoad() const
const SDValue & getStride() const
const SDValue & getOffset() const
const SDValue & getVectorLength() const
const SDValue & getBasePtr() const
This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
const SDValue & getBasePtr() const
const SDValue & getMask() const
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if this is a truncating store.
const SDValue & getOffset() const
const SDValue & getVectorLength() const
const SDValue & getStride() const
bool isCompressingStore() const
Returns true if the op does a compression to the vector before storing.
LLVM Value Representation.
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isNonZero() const
constexpr ScalarTy getKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns a value X where RHS.multiplyCoefficientBy(X) will result in a value whose quantity matches ou...
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr bool isKnownEven() const
A return value of true indicates we know at compile time that the number of elements (vscale * Min) i...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ SIGN_EXTEND
Conversion operators.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ SSUBO
Same for subtraction.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the same...
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the sa...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
bool isUNINDEXEDLoad(const SDNode *N)
Returns true if the specified node is an unindexed load.
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
LegalityPredicate isVector(unsigned TypeIdx)
True iff the specified type index is a vector.
This is an optimization pass for GlobalISel generic memory operations.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
auto reverse(ContainerTy &&C)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr int PoisonMaskElem
void processShuffleMasks(ArrayRef< int > Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs, unsigned NumOfUsedRegs, function_ref< void()> NoInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> SingleInputAction, function_ref< void(ArrayRef< int >, unsigned, unsigned)> ManyInputsAction)
Splits and processes shuffle mask depending on the number of input and output registers.
DWARFExpression::Operation Op
OutputIt copy(R &&Range, OutputIt Out)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and the bit indexes (Mask) nee...
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
EVT widenIntegerVectorElementType(LLVMContext &Context) const
Return a VT for an integer vector type with the size of the elements doubled.
bool isFixedLengthVector() const
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
bool knownBitsGE(EVT VT) const
Return true if we know at compile time this has more than or the same bits as VT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
This class contains a discriminated union of information about pointers in memory operands,...
unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.