21#include "llvm/IR/IntrinsicsHexagon.h"
32 cl::desc(
"Lower threshold (in bytes) for widening to HVX vectors"));
34static const MVT LegalV64[] = { MVT::v64i8, MVT::v32i16, MVT::v16i32 };
35static const MVT LegalW64[] = { MVT::v128i8, MVT::v64i16, MVT::v32i32 };
36static const MVT LegalV128[] = { MVT::v128i8, MVT::v64i16, MVT::v32i32 };
37static const MVT LegalW128[] = { MVT::v256i8, MVT::v128i16, MVT::v64i32 };
44 return std::make_tuple(5, 15, 10);
46 return std::make_tuple(8, 127, 23);
48 return std::make_tuple(11, 1023, 52);
56HexagonTargetLowering::initializeHVXLowering() {
98 MVT ByteV = Use64b ? MVT::v64i8 : MVT::v128i8;
99 MVT WordV = Use64b ? MVT::v16i32 : MVT::v32i32;
100 MVT ByteW = Use64b ? MVT::v128i8 : MVT::v256i8;
102 auto setPromoteTo = [
this] (
unsigned Opc,
MVT FromTy,
MVT ToTy) {
123 static const MVT FloatV[] = { MVT::v64f16, MVT::v32f32 };
124 static const MVT FloatW[] = { MVT::v128f16, MVT::v64f32 };
126 for (
MVT T : FloatV) {
161 for (
MVT P : FloatW) {
192 for (
MVT T : LegalV) {
215 if (
T.getScalarType() != MVT::i32) {
224 if (
T.getScalarType() != MVT::i32) {
269 for (
MVT T : LegalW) {
320 if (
T.getScalarType() != MVT::i32) {
368 for (
MVT T : LegalW) {
385 for (
MVT T : LegalV) {
400 for (
MVT T: {MVT::v32i8, MVT::v32i16, MVT::v16i8, MVT::v16i16, MVT::v16i32})
403 for (
MVT T: {MVT::v64i8, MVT::v64i16, MVT::v32i8, MVT::v32i16, MVT::v32i32})
410 if (ElemTy == MVT::i1)
412 int ElemWidth = ElemTy.getFixedSizeInBits();
413 int MaxElems = (8*HwLen) / ElemWidth;
414 for (
int N = 2;
N < MaxElems;
N *= 2) {
443HexagonTargetLowering::getPreferredHvxVectorAction(
MVT VecTy)
const {
449 if (ElemTy == MVT::i1 && VecLen > HwLen)
455 if (ElemTy == MVT::i1) {
470 unsigned HwWidth = 8*HwLen;
471 if (VecWidth > 2*HwWidth)
477 if (VecWidth >= HwWidth/2 && VecWidth < HwWidth)
486HexagonTargetLowering::getCustomHvxOperationAction(
SDNode &
Op)
const {
487 unsigned Opc =
Op.getOpcode();
507HexagonTargetLowering::typeJoin(
const TypePair &Tys)
const {
508 assert(Tys.first.getVectorElementType() == Tys.second.getVectorElementType());
512 Tys.second.getVectorNumElements());
515HexagonTargetLowering::TypePair
516HexagonTargetLowering::typeSplit(
MVT VecTy)
const {
519 assert((NumElem % 2) == 0 &&
"Expecting even-sized vector type");
521 return { HalfTy, HalfTy };
525HexagonTargetLowering::typeExtElem(
MVT VecTy,
unsigned Factor)
const {
532HexagonTargetLowering::typeTruncElem(
MVT VecTy,
unsigned Factor)
const {
539HexagonTargetLowering::opCastElem(
SDValue Vec,
MVT ElemTy,
548HexagonTargetLowering::opJoin(
const VectorPair &Ops,
const SDLoc &dl,
551 Ops.first, Ops.second);
554HexagonTargetLowering::VectorPair
555HexagonTargetLowering::opSplit(
SDValue Vec,
const SDLoc &dl,
557 TypePair Tys = typeSplit(ty(Vec));
560 return DAG.
SplitVector(Vec, dl, Tys.first, Tys.second);
564HexagonTargetLowering::isHvxSingleTy(
MVT Ty)
const {
570HexagonTargetLowering::isHvxPairTy(
MVT Ty)
const {
576HexagonTargetLowering::isHvxBoolTy(
MVT Ty)
const {
581bool HexagonTargetLowering::allowsHvxMemoryAccess(
596bool HexagonTargetLowering::allowsHvxMisalignedMemoryAccesses(
606void HexagonTargetLowering::AdjustHvxInstrPostInstrSelection(
608 unsigned Opc =
MI.getOpcode();
614 auto At =
MI.getIterator();
617 case Hexagon::PS_vsplatib:
621 Register SplatV =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
623 .
add(
MI.getOperand(1));
625 BuildMI(MB, At,
DL,
TII.get(Hexagon::V6_lvsplatb), OutV)
630 Register SplatV =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
635 .
addImm(V << 24 | V << 16 | V << 8 | V);
641 case Hexagon::PS_vsplatrb:
645 BuildMI(MB, At,
DL,
TII.get(Hexagon::V6_lvsplatb), OutV)
646 .
add(
MI.getOperand(1));
648 Register SplatV =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
650 BuildMI(MB, At,
DL,
TII.get(Hexagon::S2_vsplatrb), SplatV)
653 BuildMI(MB, At,
DL,
TII.get(Hexagon::V6_lvsplatw), OutV)
658 case Hexagon::PS_vsplatih:
662 Register SplatV =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
664 .
add(
MI.getOperand(1));
666 BuildMI(MB, At,
DL,
TII.get(Hexagon::V6_lvsplath), OutV)
671 Register SplatV =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
682 case Hexagon::PS_vsplatrh:
686 BuildMI(MB, At,
DL,
TII.get(Hexagon::V6_lvsplath), OutV)
687 .
add(
MI.getOperand(1));
691 Register SplatV =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
693 BuildMI(MB, At,
DL,
TII.get(Hexagon::A2_combine_ll), SplatV)
701 case Hexagon::PS_vsplatiw:
702 case Hexagon::PS_vsplatrw:
703 if (Opc == Hexagon::PS_vsplatiw) {
705 Register SplatV =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
707 .
add(
MI.getOperand(1));
708 MI.getOperand(1).ChangeToRegister(SplatV,
false);
711 MI.setDesc(
TII.get(Hexagon::V6_lvsplatw));
717HexagonTargetLowering::convertToByteIndex(
SDValue ElemIdx,
MVT ElemTy,
727 const SDLoc &dl(ElemIdx);
733HexagonTargetLowering::getIndexInWord32(
SDValue Idx,
MVT ElemTy,
736 assert(ElemWidth >= 8 && ElemWidth <= 32);
740 if (ty(
Idx) != MVT::i32)
749HexagonTargetLowering::getByteShuffle(
const SDLoc &dl,
SDValue Op0,
756 if (ElemTy == MVT::i8)
760 MVT ResTy = tyVector(OpTy, MVT::i8);
766 for (
unsigned I = 0;
I != ElemSize; ++
I)
769 int NewM =
M*ElemSize;
770 for (
unsigned I = 0;
I != ElemSize; ++
I)
776 opCastElem(Op1, MVT::i8, DAG), ByteMask);
783 unsigned VecLen = Values.
size();
789 unsigned ElemSize = ElemWidth / 8;
790 assert(ElemSize*VecLen == HwLen);
796 assert((ElemSize == 1 || ElemSize == 2) &&
"Invalid element size");
797 unsigned OpsPerWord = (ElemSize == 1) ? 4 : 2;
799 for (
unsigned i = 0; i != VecLen; i += OpsPerWord) {
800 SDValue W = buildVector32(Values.
slice(i, OpsPerWord), dl, PartVT, DAG);
808 unsigned NumValues = Values.size();
811 for (
unsigned i = 0; i != NumValues; ++i) {
812 if (Values[i].isUndef())
815 if (!SplatV.getNode())
817 else if (SplatV != Values[i])
825 unsigned NumWords = Words.
size();
827 bool IsSplat =
isSplat(Words, SplatV);
828 if (IsSplat && isUndef(SplatV))
833 return getZero(dl, VecTy, DAG);
842 bool AllConst = getBuildVectorConstInts(Values, VecTy, DAG, Consts);
847 Align Alignment(HwLen);
861 auto IsBuildFromExtracts = [
this,&Values] (
SDValue &SrcVec,
866 SrcIdx.push_back(-1);
879 int I =
C->getSExtValue();
880 assert(
I >= 0 &&
"Negative element index");
889 if (IsBuildFromExtracts(ExtVec, ExtIdx)) {
890 MVT ExtTy = ty(ExtVec);
892 if (ExtLen == VecLen || ExtLen == 2*VecLen) {
899 for (
int M : ExtIdx) {
909 for (
unsigned I = 0;
I != ExtLen; ++
I) {
910 if (
Mask.size() == ExtLen)
918 return ExtLen == VecLen ? S : LoHalf(S, DAG);
929 for (
unsigned i = 0; i != NumWords; ++i) {
931 if (Words[i].isUndef())
933 for (
unsigned j = i;
j != NumWords; ++
j)
934 if (Words[i] == Words[j])
937 if (VecHist[i] > VecHist[n])
941 SDValue HalfV = getZero(dl, VecTy, DAG);
942 if (VecHist[n] > 1) {
945 {HalfV, SplatV, DAG.
getConstant(HwLen/2, dl, MVT::i32)});
957 for (
unsigned i = 0; i != NumWords/2; ++i) {
959 if (Words[i] != Words[n] || VecHist[n] <= 1) {
966 if (Words[i+NumWords/2] != Words[n] || VecHist[n] <= 1) {
970 {HalfV1, Words[i+NumWords/2]});
993HexagonTargetLowering::createHvxPrefixPred(
SDValue PredV,
const SDLoc &dl,
994 unsigned BitBytes,
bool ZeroFill,
SelectionDAG &DAG)
const {
995 MVT PredTy = ty(PredV);
1014 for (
unsigned i = 0; i != HwLen; ++i) {
1015 unsigned Num = i % Scale;
1016 unsigned Off = i / Scale;
1025 assert(BlockLen < HwLen &&
"vsetq(v1) prerequisite");
1027 SDValue Q = getInstr(Hexagon::V6_pred_scalar2, dl, BoolTy,
1034 assert(PredTy == MVT::v2i1 || PredTy == MVT::v4i1 || PredTy == MVT::v8i1);
1046 while (Bytes < BitBytes) {
1048 Words[IdxW].
clear();
1051 for (
const SDValue &W : Words[IdxW ^ 1]) {
1052 SDValue T = expandPredicate(W, dl, DAG);
1057 for (
const SDValue &W : Words[IdxW ^ 1]) {
1065 assert(Bytes == BitBytes);
1067 SDValue Vec = ZeroFill ? getZero(dl, ByteTy, DAG) : DAG.getUNDEF(ByteTy);
1069 for (
const SDValue &W : Words[IdxW]) {
1083 unsigned VecLen = Values.
size();
1085 assert(VecLen <= HwLen || VecLen == 8*HwLen);
1087 bool AllT =
true, AllF =
true;
1090 if (
const auto *
N = dyn_cast<ConstantSDNode>(
V.getNode()))
1091 return !
N->isZero();
1095 if (
const auto *
N = dyn_cast<ConstantSDNode>(
V.getNode()))
1100 if (VecLen <= HwLen) {
1104 assert(HwLen % VecLen == 0);
1105 unsigned BitBytes = HwLen / VecLen;
1112 for (
unsigned B = 0;
B != BitBytes; ++
B)
1119 for (
unsigned I = 0;
I != VecLen;
I += 8) {
1122 for (;
B != 8; ++
B) {
1123 if (!Values[
I+
B].isUndef())
1136 assert(Values[
I+
B].isUndef() || Values[
I+
B] ==
F);
1146 SDValue ByteVec = buildHvxVectorReg(Bytes, dl, ByteTy, DAG);
1151HexagonTargetLowering::extractHvxElementReg(
SDValue VecV,
SDValue IdxV,
1156 assert(ElemWidth >= 8 && ElemWidth <= 32);
1159 SDValue ByteIdx = convertToByteIndex(IdxV, ElemTy, DAG);
1162 if (ElemTy == MVT::i32)
1168 SDValue SubIdx = getIndexInWord32(IdxV, ElemTy, DAG);
1171 return extractVector(ExVec, SubIdx, dl, ElemTy, MVT::i32, DAG);
1175HexagonTargetLowering::extractHvxElementPred(
SDValue VecV,
SDValue IdxV,
1178 assert(ResTy == MVT::i1);
1188 SDValue ExtB = extractHvxElementReg(ByteVec, IdxV, dl, MVT::i32, DAG);
1190 return getInstr(Hexagon::C2_cmpgtui, dl, MVT::i1, {ExtB,
Zero}, DAG);
1194HexagonTargetLowering::insertHvxElementReg(
SDValue VecV,
SDValue IdxV,
1199 assert(ElemWidth >= 8 && ElemWidth <= 32);
1204 MVT VecTy = ty(VecV);
1216 SDValue ByteIdx = convertToByteIndex(IdxV, ElemTy, DAG);
1217 if (ElemTy == MVT::i32)
1218 return InsertWord(VecV, ValV, ByteIdx);
1224 SDValue Ext = extractHvxElementReg(opCastElem(VecV, MVT::i32, DAG), WordIdx,
1229 SDValue SubIdx = getIndexInWord32(IdxV, ElemTy, DAG);
1230 MVT SubVecTy = tyVector(ty(Ext), ElemTy);
1232 ValV, SubIdx, dl, ElemTy, DAG);
1235 return InsertWord(VecV, Ins, ByteIdx);
1239HexagonTargetLowering::insertHvxElementPred(
SDValue VecV,
SDValue IdxV,
1250 SDValue InsV = insertHvxElementReg(ByteVec, IdxV, ValV, dl, DAG);
1255HexagonTargetLowering::extractHvxSubvectorReg(
SDValue OrigOp,
SDValue VecV,
1257 MVT VecTy = ty(VecV);
1266 if (isHvxPairTy(VecTy)) {
1267 if (
Idx * ElemWidth >= 8*HwLen)
1271 if (typeSplit(VecTy).first == ResTy)
1279 MVT WordTy = tyVector(VecTy, MVT::i32);
1281 unsigned WordIdx = (
Idx*ElemWidth) / 32;
1284 SDValue W0 = extractHvxElementReg(WordVec, W0Idx, dl, MVT::i32, DAG);
1289 SDValue W1 = extractHvxElementReg(WordVec, W1Idx, dl, MVT::i32, DAG);
1290 SDValue WW = getCombine(W1, W0, dl, MVT::i64, DAG);
1295HexagonTargetLowering::extractHvxSubvectorPred(
SDValue VecV,
SDValue IdxV,
1297 MVT VecTy = ty(VecV);
1317 for (
unsigned i = 0; i != HwLen/Rep; ++i) {
1318 for (
unsigned j = 0;
j != Rep; ++
j)
1335 unsigned Rep = 8 / ResLen;
1338 for (
unsigned r = 0; r != HwLen/ResLen; ++r) {
1340 for (
unsigned i = 0; i != ResLen; ++i) {
1341 for (
unsigned j = 0;
j != Rep; ++
j)
1353 SDValue Vec64 = getCombine(W1, W0, dl, MVT::v8i8, DAG);
1354 return getInstr(Hexagon::A4_vcmpbgtui, dl, ResTy,
1359HexagonTargetLowering::insertHvxSubvectorReg(
SDValue VecV,
SDValue SubV,
1361 MVT VecTy = ty(VecV);
1362 MVT SubTy = ty(SubV);
1367 bool IsPair = isHvxPairTy(VecTy);
1375 V0 = LoHalf(VecV, DAG);
1376 V1 = HiHalf(VecV, DAG);
1381 if (isHvxSingleTy(SubTy)) {
1382 if (
const auto *CN = dyn_cast<const ConstantSDNode>(IdxV.
getNode())) {
1383 unsigned Idx = CN->getZExtValue();
1385 unsigned SubIdx = (
Idx == 0) ? Hexagon::vsub_lo : Hexagon::vsub_hi;
1407 auto *IdxN = dyn_cast<ConstantSDNode>(IdxV.
getNode());
1408 if (!IdxN || !IdxN->isZero()) {
1416 unsigned RolBase = HwLen;
1431 if (RolBase != 4 || !IdxN || !IdxN->isZero()) {
1446HexagonTargetLowering::insertHvxSubvectorPred(
SDValue VecV,
SDValue SubV,
1448 MVT VecTy = ty(VecV);
1449 MVT SubTy = ty(SubV);
1456 assert(HwLen % VecLen == 0 &&
"Unexpected vector type");
1459 unsigned BitBytes = HwLen / VecLen;
1460 unsigned BlockLen = HwLen / Scale;
1464 SDValue ByteSub = createHvxPrefixPred(SubV, dl, BitBytes,
false, DAG);
1467 auto *IdxN = dyn_cast<ConstantSDNode>(IdxV.
getNode());
1468 if (!IdxN || !IdxN->isZero()) {
1477 assert(BlockLen < HwLen &&
"vsetq(v1) prerequisite");
1479 SDValue Q = getInstr(Hexagon::V6_pred_scalar2, dl, BoolTy,
1481 ByteVec = getInstr(Hexagon::V6_vmux, dl, ByteTy, {Q, ByteSub, ByteVec}, DAG);
1483 if (!IdxN || !IdxN->isZero()) {
1492HexagonTargetLowering::extendHvxVectorPred(
SDValue VecV,
const SDLoc &dl,
1504 SDValue False = getZero(dl, ResTy, DAG);
1505 return DAG.
getSelect(dl, ResTy, VecV, True, False);
1509HexagonTargetLowering::compressHvxPred(
SDValue VecQ,
const SDLoc &dl,
1519 MVT PredTy = ty(VecQ);
1521 assert(HwLen % PredLen == 0);
1528 for (
unsigned i = 0; i != HwLen/8; ++i) {
1529 for (
unsigned j = 0;
j != 8; ++
j)
1530 Tmp.
push_back(ConstantInt::get(Int8Ty, 1ull << j));
1533 Align Alignment(HwLen);
1542 getZero(dl, VecTy, DAG));
1548 SDValue Vrmpy = getInstr(Hexagon::V6_vrmpyub, dl, ByteTy, {Sel, All1}, DAG);
1550 SDValue Rot = getInstr(Hexagon::V6_valignbi, dl, ByteTy,
1558 for (
unsigned i = 0; i != HwLen; ++i)
1559 Mask.push_back((8*i) % HwLen + i/(HwLen/8));
1569 MVT InpTy = ty(VecV);
1579 getZero(dl, MVT::i32, DAG));
1584 if (InpWidth < ResWidth) {
1586 return DAG.
getNode(ExtOpc, dl, ResTy, VecV);
1594HexagonTargetLowering::extractSubvector(
SDValue Vec,
MVT SubTy,
unsigned SubIdx,
1598 const SDLoc &dl(Vec);
1612 for (
unsigned i = 0; i !=
Size; ++i)
1620 MVT SingleTy = typeSplit(VecTy).first;
1621 SDValue V0 = buildHvxVectorReg(
A.take_front(
Size/2), dl, SingleTy, DAG);
1622 SDValue V1 = buildHvxVectorReg(
A.drop_front(
Size/2), dl, SingleTy, DAG);
1627 return buildHvxVectorPred(Ops, dl, VecTy, DAG);
1634 for (
unsigned i = 0; i !=
Size; i++)
1638 tyVector(VecTy, MVT::i16), NewOps);
1639 return DAG.
getBitcast(tyVector(VecTy, MVT::f16), T0);
1642 return buildHvxVectorReg(Ops, dl, VecTy, DAG);
1650 MVT ArgTy = ty(
Op.getOperand(0));
1652 if (ArgTy == MVT::f16) {
1684 for (
unsigned i = 0, e = Elems.
size(); i != e; ++i) {
1688 MVT NTy = typeLegalize(Ty, DAG);
1692 V.getOperand(0),
V.getOperand(1)),
1697 switch (
V.getOpcode()) {
1705 Elems[i] =
V.getOperand(0);
1731 MVT HalfTy = typeSplit(VecTy).first;
1733 Ops.take_front(NumOp/2));
1735 Ops.take_back(NumOp/2));
1744 for (
SDValue V :
Op.getNode()->op_values()) {
1745 SDValue P = createHvxPrefixPred(V, dl, BitBytes,
true, DAG);
1752 SDValue Res = getZero(dl, ByteTy, DAG);
1753 for (
unsigned i = 0, e = Prefixes.
size(); i != e; ++i) {
1768 if (ElemTy == MVT::i1)
1769 return extractHvxElementPred(VecV, IdxV, dl, ty(
Op), DAG);
1771 return extractHvxElementReg(VecV, IdxV, dl, ty(
Op), DAG);
1783 if (ElemTy == MVT::i1)
1784 return insertHvxElementPred(VecV, IdxV, ValV, dl, DAG);
1786 if (ElemTy == MVT::f16) {
1788 tyVector(VecTy, MVT::i16),
1789 DAG.
getBitcast(tyVector(VecTy, MVT::i16), VecV),
1791 return DAG.
getBitcast(tyVector(VecTy, MVT::f16), T0);
1794 return insertHvxElementReg(VecV, IdxV, ValV, dl, DAG);
1801 MVT SrcTy = ty(SrcV);
1810 if (ElemTy == MVT::i1)
1811 return extractHvxSubvectorPred(SrcV, IdxV, dl, DstTy, DAG);
1813 return extractHvxSubvectorReg(
Op, SrcV, IdxV, dl, DstTy, DAG);
1825 MVT VecTy = ty(VecV);
1827 if (ElemTy == MVT::i1)
1828 return insertHvxSubvectorPred(VecV, ValV, IdxV, dl, DAG);
1830 return insertHvxSubvectorReg(VecV, ValV, IdxV, dl, DAG);
1843 return LowerHvxSignExt(
Op, DAG);
1853 return extendHvxVectorPred(InpV,
SDLoc(
Op), ty(
Op),
false, DAG);
1863 return extendHvxVectorPred(InpV,
SDLoc(
Op), ty(
Op),
true, DAG);
1874 assert(ResTy == ty(InpV));
1907 unsigned Opc =
Op.getOpcode();
1924 unsigned Opc =
Op.getOpcode();
1929 if (
auto HiVal =
Op.getValue(1); HiVal.use_empty()) {
1942 return emitHvxMulLoHiV62(Vu, SignedVu, Vv, SignedVv, dl, DAG);
1947 if (
auto LoVal =
Op.getValue(0); LoVal.use_empty()) {
1948 SDValue Hi = emitHvxMulHsV60(Vu, Vv, dl, DAG);
1954 return emitHvxMulLoHiV60(Vu, SignedVu, Vv, SignedVv, dl, DAG);
1961 MVT ValTy = ty(Val);
1967 SDValue VQ = compressHvxPred(Val, dl, WordTy, DAG);
1982 for (
unsigned i = 0; i !=
BitWidth/32; ++i) {
1984 VQ, DAG.
getConstant(i, dl, MVT::i32), dl, MVT::i32, DAG);
1989 for (
unsigned i = 0, e = Words.
size(); i < e; i += 2) {
1990 SDValue C = getCombine(Words[i+1], Words[i], dl, MVT::i64, DAG);
2014 for (
unsigned I = 0;
I != HwLen / 8; ++
I) {
2018 for (
unsigned J = 0; J != 8; ++J) {
2026 SDValue I2V = buildHvxVectorReg(Bytes, dl, ConstantVecTy, DAG);
2053 assert(HwLen % VecLen == 0);
2054 unsigned ElemSize = HwLen / VecLen;
2066 if (
SDValue S = getVectorShiftByInt(
Op, DAG))
2072HexagonTargetLowering::LowerHvxFunnelShift(
SDValue Op,
2074 unsigned Opc =
Op.getOpcode();
2092 bool UseShifts = ElemTy != MVT::i8;
2096 if (
SDValue SplatV = getSplatValue(S, DAG); SplatV && UseShifts) {
2104 {DAG.
getConstant(ElemWidth, dl, MVT::i32), ModS});
2120 InpTy, dl, DAG.
getConstant(ElemWidth - 1, dl, ElemTy));
2130 unsigned IntNo =
Op.getConstantOperandVal(0);
2138 case Intrinsic::hexagon_V6_pred_typecast:
2139 case Intrinsic::hexagon_V6_pred_typecast_128B: {
2140 MVT ResTy = ty(
Op), InpTy = ty(Ops[1]);
2141 if (isHvxBoolTy(ResTy) && isHvxBoolTy(InpTy)) {
2148 case Intrinsic::hexagon_V6_vmpyss_parts:
2149 case Intrinsic::hexagon_V6_vmpyss_parts_128B:
2152 case Intrinsic::hexagon_V6_vmpyuu_parts:
2153 case Intrinsic::hexagon_V6_vmpyuu_parts_128B:
2156 case Intrinsic::hexagon_V6_vmpyus_parts:
2157 case Intrinsic::hexagon_V6_vmpyus_parts_128B: {
2171 auto *MaskN = cast<MaskedLoadStoreSDNode>(
Op.getNode());
2173 SDValue Chain = MaskN->getChain();
2177 unsigned Opc =
Op->getOpcode();
2183 SDValue Thru = cast<MaskedLoadSDNode>(MaskN)->getPassThru();
2194 unsigned StoreOpc = Hexagon::V6_vS32b_qpred_ai;
2195 SDValue Value = cast<MaskedStoreSDNode>(MaskN)->getValue();
2198 if (MaskN->getAlign().value() % HwLen == 0) {
2207 SDValue Z = getZero(dl, ty(V), DAG);
2211 SDValue LoV = getInstr(Hexagon::V6_vlalignb, dl, ty(V), {
V,
Z,
A}, DAG);
2212 SDValue HiV = getInstr(Hexagon::V6_vlalignb, dl, ty(V), {
Z,
V,
A}, DAG);
2213 return std::make_pair(LoV, HiV);
2219 VectorPair Tmp = StoreAlign(MaskV,
Base);
2222 VectorPair ValueU = StoreAlign(
Value,
Base);
2226 getInstr(StoreOpc, dl, MVT::Other,
2227 {MaskU.first,
Base, Offset0, ValueU.first, Chain}, DAG);
2229 getInstr(StoreOpc, dl, MVT::Other,
2230 {MaskU.second,
Base, Offset1, ValueU.second, Chain}, DAG);
2232 DAG.
setNodeMemRefs(cast<MachineSDNode>(StoreHi.getNode()), {MemOp});
2245 MVT ArgTy = ty(
Op.getOperand(0));
2247 assert(VecTy == MVT::v64f32 && ArgTy == MVT::v64f16);
2256 getInstr(Hexagon::V6_vmpy_qf32_hf, dl, VecTy, {F16Vec, Fp16Ones}, DAG);
2258 MVT HalfTy = typeSplit(VecTy).first;
2259 VectorPair Pair = opSplit(VmpyVec, dl, DAG);
2261 getInstr(Hexagon::V6_vconv_sf_qf32, dl, HalfTy, {Pair.first}, DAG);
2263 getInstr(Hexagon::V6_vconv_sf_qf32, dl, HalfTy, {Pair.second}, DAG);
2266 getInstr(Hexagon::V6_vshuffvdd, dl, VecTy,
2267 {HiVec, LoVec, DAG.
getConstant(-4, dl, MVT::i32)}, DAG);
2284 if (FpTy == MVT::f16) {
2286 assert(IntTy == MVT::i8 || IntTy == MVT::i16 || IntTy == MVT::i32);
2288 if (IntTy == MVT::i8 || IntTy == MVT::i16)
2294 return EqualizeFpIntConversion(
Op, DAG);
2296 return ExpandHvxFpToInt(
Op, DAG);
2311 if (FpTy == MVT::f16) {
2313 assert(IntTy == MVT::i8 || IntTy == MVT::i16 || IntTy == MVT::i32);
2315 if (IntTy == MVT::i8 || IntTy == MVT::i16)
2321 return EqualizeFpIntConversion(
Op, DAG);
2323 return ExpandHvxIntToFp(
Op, DAG);
2326HexagonTargetLowering::TypePair
2327HexagonTargetLowering::typeExtendToWider(
MVT Ty0,
MVT Ty1)
const {
2338 unsigned MaxWidth = std::max(Width0, Width1);
2340 auto getScalarWithWidth = [](
MVT ScalarTy,
unsigned Width) {
2347 MVT WideETy0 = getScalarWithWidth(ElemTy0, MaxWidth);
2348 MVT WideETy1 = getScalarWithWidth(ElemTy1, MaxWidth);
2352 return {WideETy0, WideETy1};
2363HexagonTargetLowering::TypePair
2364HexagonTargetLowering::typeWidenToWider(
MVT Ty0,
MVT Ty1)
const {
2374 unsigned MaxLen = std::max(Len0, Len1);
2387HexagonTargetLowering::typeWidenToHvx(
MVT Ty)
const {
2397HexagonTargetLowering::VectorPair
2428HexagonTargetLowering::VectorPair
2429HexagonTargetLowering::emitHvxShiftRightRnd(
SDValue Val,
unsigned Amt,
2434 const SDLoc &dl(Val);
2435 MVT ValTy = ty(Val);
2449 MVT IntTy = tyVector(ValTy, ElemTy);
2461 auto [Tmp0, Ovf] = emitHvxAddWithOverflow(Inp, LowBits, dl,
Signed, DAG);
2506 SDValue T0 = getInstr(Hexagon::V6_vmpyewuh, dl, VecTy, {
B,
A}, DAG);
2508 SDValue T1 = getInstr(Hexagon::V6_vasrw, dl, VecTy, {
A,
S16}, DAG);
2516 SDValue P1 = getInstr(Hexagon::V6_vadduhw, dl,
PairTy, {T0, T2}, DAG);
2518 SDValue P2 = getInstr(Hexagon::V6_vaddhw, dl,
PairTy, {T0, T2}, DAG);
2521 SDValue T3 = getInstr(Hexagon::V6_vasrw_acc, dl, VecTy,
2522 {HiHalf(P2, DAG), LoHalf(P1, DAG),
S16}, DAG);
2523 SDValue T4 = getInstr(Hexagon::V6_vasrw, dl, VecTy, {
B,
S16}, DAG);
2534HexagonTargetLowering::emitHvxMulLoHiV60(
SDValue A,
bool SignedA,
SDValue B,
2535 bool SignedB,
const SDLoc &dl,
2543 if (SignedA && !SignedB) {
2559 SDValue T0 = getInstr(Hexagon::V6_lvsplatw, dl, VecTy,
2560 {DAG.
getConstant(0x02020202, dl, MVT::i32)}, DAG);
2561 SDValue T1 = getInstr(Hexagon::V6_vdelta, dl, VecTy, {
B, T0}, DAG);
2570 {HiHalf(P1, DAG), LoHalf(P1, DAG)}, DAG);
2573 getInstr(Hexagon::V6_vlsrw, dl, VecTy, {LoHalf(P0, DAG),
S16}, DAG);
2577 SDValue T4 = getInstr(Hexagon::V6_vasrw_acc, dl, VecTy,
2578 {HiHalf(P2, DAG), T3,
S16}, DAG);
2581 Lo = getInstr(Hexagon::V6_vaslw_acc, dl, VecTy,
2582 {LoHalf(P0, DAG), LoHalf(P2, DAG),
S16}, DAG);
2586 assert(SignedB &&
"Signed A and unsigned B should have been inverted");
2593 SDValue X1 = getInstr(Hexagon::V6_vaddwq, dl, VecTy, {Q1, X0,
A}, DAG);
2594 Hi = getInstr(Hexagon::V6_vsubw, dl, VecTy, {
Hi, X1}, DAG);
2595 }
else if (SignedB) {
2601 Hi = getInstr(Hexagon::V6_vsubwq, dl, VecTy, {Q1,
Hi,
A}, DAG);
2603 assert(!SignedA && !SignedB);
2610HexagonTargetLowering::emitHvxMulLoHiV62(
SDValue A,
bool SignedA,
2618 if (SignedA && !SignedB) {
2625 SDValue P0 = getInstr(Hexagon::V6_vmpyewuh_64, dl,
PairTy, {
A,
B}, DAG);
2627 getInstr(Hexagon::V6_vmpyowh_64_acc, dl,
PairTy, {P0,
A,
B}, DAG);
2632 assert(!SignedA &&
"Signed A and unsigned B should have been inverted");
2644 SDValue T0 = getInstr(Hexagon::V6_vandvqv, dl, VecTy, {Q0,
B}, DAG);
2645 SDValue T1 = getInstr(Hexagon::V6_vaddwq, dl, VecTy, {Q1, T0,
A}, DAG);
2646 Hi = getInstr(Hexagon::V6_vaddw, dl, VecTy, {
Hi,
T1}, DAG);
2647 }
else if (!SignedA) {
2657 Hi = getInstr(Hexagon::V6_vaddwq, dl, VecTy, {Q0,
Hi,
B}, DAG);
2675 unsigned Opc =
Op.getOpcode();
2680 MVT InpTy = ty(Inp);
2689 auto [WInpTy, WResTy] = typeExtendToWider(InpTy, ResTy);
2698 unsigned Opc =
Op.getOpcode();
2703 MVT InpTy = ty(Op0);
2783 unsigned ElemWidth = 1 + ExpWidth + FracWidth;
2784 assert((1ull << (ExpWidth - 1)) == (1 + ExpBias));
2827 unsigned Opc =
Op.getOpcode();
2832 MVT InpTy = ty(Op0);
2865 unsigned ElemWidth = 1 + ExpWidth + FracWidth;
2875 auto [Frac, Ovf] = emitHvxShiftRightRnd(Frac0, ExpWidth + 1,
false, DAG);
2898 unsigned Opc =
Op.getOpcode();
2916 return DAG.
getNode(TLOpc, dl, ty(
Op),
Op.getOperand(0),
2925 unsigned Opc =
Op.getConstantOperandVal(2);
2929HexagonTargetLowering::VectorPair
2935 auto SplitVTNode = [&DAG,
this](
const VTSDNode *
N) {
2936 MVT Ty = typeSplit(
N->getVT().getSimpleVT()).first;
2938 return std::make_pair(TV, TV);
2945 switch (
Op.getOpcode()) {
2949 if (
const auto *
N = dyn_cast<const VTSDNode>(
A.getNode()))
2950 std::tie(
Lo,
Hi) = SplitVTNode(
N);
2958 MVT HalfTy = typeSplit(ResTy).first;
2966 auto *MemN = cast<MemSDNode>(
Op.getNode());
2968 MVT MemTy = MemN->getMemoryVT().getSimpleVT();
2969 if (!isHvxPairTy(MemTy))
2974 MVT SingleTy = typeSplit(MemTy).first;
2975 SDValue Chain = MemN->getChain();
2976 SDValue Base0 = MemN->getBasePtr();
2992 assert(cast<LoadSDNode>(
Op)->isUnindexed());
3001 assert(cast<StoreSDNode>(
Op)->isUnindexed());
3002 VectorPair Vals = opSplit(cast<StoreSDNode>(
Op)->getValue(), dl, DAG);
3010 auto MaskN = cast<MaskedLoadStoreSDNode>(
Op);
3011 assert(MaskN->isUnindexed());
3012 VectorPair Masks = opSplit(MaskN->getMask(), dl, DAG);
3017 opSplit(cast<MaskedLoadSDNode>(
Op)->getPassThru(), dl, DAG);
3032 VectorPair Vals = opSplit(cast<MaskedStoreSDNode>(
Op)->getValue(), dl, DAG);
3034 Masks.first, SingleTy, MOp0,
3037 Masks.second, SingleTy, MOp1,
3042 std::string
Name =
"Unexpected operation: " +
Op->getOperationName(&DAG);
3049 auto *LoadN = cast<LoadSDNode>(
Op.getNode());
3050 assert(LoadN->isUnindexed() &&
"Not widening indexed loads yet");
3051 assert(LoadN->getMemoryVT().getVectorElementType() != MVT::i1 &&
3052 "Not widening loads of i1 yet");
3054 SDValue Chain = LoadN->getChain();
3061 assert(ResLen < HwLen &&
"vsetq(v1) prerequisite");
3064 SDValue Mask = getInstr(Hexagon::V6_pred_scalar2, dl, BoolTy,
3081 auto *StoreN = cast<StoreSDNode>(
Op.getNode());
3082 assert(StoreN->isUnindexed() &&
"Not widening indexed stores yet");
3083 assert(StoreN->getMemoryVT().getVectorElementType() != MVT::i1 &&
3084 "Not widening stores of i1 yet");
3086 SDValue Chain = StoreN->getChain();
3090 SDValue Value = opCastElem(StoreN->getValue(), MVT::i8, DAG);
3096 for (
unsigned Len = ValueLen;
Len < HwLen; ) {
3100 assert(ty(
Value).getVectorNumElements() == HwLen);
3102 assert(ValueLen < HwLen &&
"vsetq(v1) prerequisite");
3104 SDValue Mask = getInstr(Hexagon::V6_pred_scalar2, dl, BoolTy,
3107 auto *
MemOp = MF.getMachineMemOperand(StoreN->getMemOperand(), 0, HwLen);
3115 SDValue Op0 =
Op.getOperand(0), Op1 =
Op.getOperand(1);
3125 SDValue WideOp0 = appendUndef(Op0, WideOpTy, DAG);
3126 SDValue WideOp1 = appendUndef(Op1, WideOpTy, DAG);
3130 {WideOp0, WideOp1,
Op.getOperand(2)});
3134 {SetCC, getZero(dl, MVT::i32, DAG)});
3139 unsigned Opc =
Op.getOpcode();
3140 bool IsPairOp = isHvxPairTy(ty(
Op)) ||
3142 return isHvxPairTy(ty(V));
3153 return SplitHvxMemOp(
Op, DAG);
3159 return opJoin(SplitVectorOp(
Op, DAG),
SDLoc(
Op), DAG);
3189 return opJoin(SplitVectorOp(
Op, DAG),
SDLoc(
Op), DAG);
3195 return opJoin(SplitVectorOp(
Op, DAG),
SDLoc(
Op), DAG);
3218 case ISD::SRL:
return LowerHvxShift(
Op, DAG);
3220 case ISD::FSHR:
return LowerHvxFunnelShift(
Op, DAG);
3259 unsigned Opc =
Op.getOpcode();
3279 MVT InpTy = ty(Inp);
3284 assert(InpWidth != ResWidth);
3286 if (InpWidth == 2 * ResWidth || ResWidth == 2 * InpWidth)
3293 auto repeatOp = [&](
unsigned NewWidth,
SDValue Arg) {
3301 return DAG.
getNode(Opc, dl, Ty, {Arg,
Op.getOperand(1),
Op.getOperand(2)});
3308 if (InpWidth < ResWidth) {
3310 while (InpWidth * 2 <= ResWidth)
3311 S = repeatOp(InpWidth *= 2, S);
3315 while (InpWidth / 2 >= ResWidth)
3316 S = repeatOp(InpWidth /= 2, S);
3324 MVT InpTy = ty(Inp0);
3328 unsigned Opc =
Op.getOpcode();
3330 if (shouldWidenToHvx(InpTy, DAG) || shouldWidenToHvx(ResTy, DAG)) {
3335 auto [WInpTy, WResTy] =
3336 InpWidth < ResWidth ? typeWidenToWider(typeWidenToHvx(InpTy), ResTy)
3337 : typeWidenToWider(InpTy, typeWidenToHvx(ResTy));
3338 SDValue W = appendUndef(Inp0, WInpTy, DAG);
3346 SDValue T = ExpandHvxResizeIntoSteps(S, DAG);
3347 return extractSubvector(
T, typeLegalize(ResTy, DAG), 0, DAG);
3348 }
else if (shouldSplitToHvx(InpWidth < ResWidth ? ResTy : InpTy, DAG)) {
3349 return opJoin(SplitVectorOp(
Op, DAG),
SDLoc(
Op), DAG);
3352 return RemoveTLWrapper(
Op, DAG);
3358HexagonTargetLowering::LowerHvxOperationWrapper(
SDNode *
N,
3360 unsigned Opc =
N->getOpcode();
3363 if (
N->getNumOperands() > 0)
3364 Inp0 =
Op.getOperand(0);
3373 Results.push_back(CreateTLWrapper(
Op, DAG));
3377 if (shouldWidenToHvx(ty(Inp0), DAG)) {
3383 if (shouldWidenToHvx(ty(cast<StoreSDNode>(
N)->getValue()), DAG)) {
3390 if (isHvxPairTy(ty(
Op))) {
3398 if (isHvxPairTy(ty(
Op->getOperand(1)))) {
3407 if (ty(
Op).getSizeInBits() != ty(Inp0).getSizeInBits()) {
3408 SDValue T = EqualizeFpIntConversion(
Op, DAG);
3416 Results.push_back(LegalizeHvxResize(
Op, DAG));
3424HexagonTargetLowering::ReplaceHvxNodeResults(
SDNode *
N,
3426 unsigned Opc =
N->getOpcode();
3429 if (
N->getNumOperands() > 0)
3430 Inp0 =
Op.getOperand(0);
3439 Results.push_back(CreateTLWrapper(
Op, DAG));
3443 if (shouldWidenToHvx(ty(
Op), DAG)) {
3449 if (shouldWidenToHvx(ty(
Op), DAG)) {
3458 if (isHvxBoolTy(ty(Inp0))) {
3465 if (ty(
Op).getSizeInBits() != ty(Inp0).getSizeInBits()) {
3466 SDValue T = EqualizeFpIntConversion(
Op, DAG);
3474 Results.push_back(LegalizeHvxResize(
Op, DAG));
3482HexagonTargetLowering::combineTruncateBeforeLegal(
SDValue Op,
3483 DAGCombinerInfo &DCI)
const {
3496 EVT TruncTy =
Op.getValueType();
3498 EVT SrcTy = Src.getValueType();
3505 if (2 * CastLen != SrcLen)
3509 for (
int i = 0; i !=
static_cast<int>(CastLen); ++i) {
3511 Mask[i + CastLen] = 2 * i + 1;
3515 return opSplit(Deal, dl, DAG).first;
3519HexagonTargetLowering::combineConcatVectorsBeforeLegal(
3520 SDValue Op, DAGCombinerInfo &DCI)
const {
3544 if (Order.
size() > 2)
3554 auto AppendToMask = [&](
SDValue Shuffle) {
3555 auto *SV = cast<ShuffleVectorSDNode>(Shuffle.getNode());
3559 for (
int M : Mask) {
3564 SDValue Src =
static_cast<unsigned>(
M) < InpLen ?
X :
Y;
3565 if (
static_cast<unsigned>(M) >= InpLen)
3568 int OutOffset = Order[0] == Src ? 0 : InpLen;
3585HexagonTargetLowering::PerformHvxDAGCombine(
SDNode *
N, DAGCombinerInfo &DCI)
3590 unsigned Opc =
Op.getOpcode();
3595 return combineTruncateBeforeLegal(
Op, DCI);
3597 return combineConcatVectorsBeforeLegal(
Op, DCI);
3599 if (DCI.isBeforeLegalizeOps())
3615 if (
const auto *
C = dyn_cast<ConstantSDNode>(Ops[0].getOperand(0)))
3625 return getZero(dl, ty(
Op), DAG);
3628 if (isUndef(Ops[1]))
3633 SDValue Vec = Ops[0].getOperand(0);
3634 SDValue Rot0 = Ops[1], Rot1 = Ops[0].getOperand(1);
3646HexagonTargetLowering::shouldSplitToHvx(
MVT Ty,
SelectionDAG &DAG)
const {
3649 auto Action = getPreferredHvxVectorAction(Ty);
3656HexagonTargetLowering::shouldWidenToHvx(
MVT Ty,
SelectionDAG &DAG)
const {
3659 auto Action = getPreferredHvxVectorAction(Ty);
3671 auto IsHvxTy = [
this](
EVT Ty) {
3672 return Ty.isSimple() && Subtarget.
isHVXVectorType(Ty.getSimpleVT(),
true);
3675 return Op.getValueType().isSimple() &&
3682 auto IsWidenedToHvx = [
this, &DAG](
SDValue Op) {
3683 if (!
Op.getValueType().isSimple())
3686 return ValTy.
isVector() && shouldWidenToHvx(ValTy, DAG);
3689 for (
int i = 0, e =
N->getNumValues(); i != e; ++i) {
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
static std::tuple< unsigned, unsigned, unsigned > getIEEEProperties(MVT Ty)
static const MVT LegalV128[]
static const MVT LegalW128[]
static const MVT LegalW64[]
static const MVT LegalV64[]
static cl::opt< unsigned > HvxWidenThreshold("hexagon-hvx-widen", cl::Hidden, cl::init(16), cl::desc("Lower threshold (in bytes) for widening to HVX vectors"))
static bool isSplat(Value *V)
Return true if V is a splat of a value (which is used when multiplying a matrix with a scalar).
std::pair< MCSymbol *, MachineModuleInfoImpl::StubValueTy > PairTy
This file provides utility analysis objects describing memory locations.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallVector class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
const HexagonInstrInfo * getInstrInfo() const override
bool isHVXVectorType(EVT VecTy, bool IncludeBool=false) const
bool useHVXV62Ops() const
ArrayRef< MVT > getHVXElementTypes() const
bool useHVXFloatingPoint() const
bool useHVXQFloatOps() const
unsigned getVectorLength() const
bool useHVX128BOps() const
bool useHVXV68Ops() const
bool useHVXIEEEFPOps() const
bool useHVXV65Ops() const
bool useHVX64BOps() const
bool isHVXElementType(MVT Ty, bool IncludeBool=false) const
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &, LLVMContext &C, EVT VT) const override
Return the ValueType of the result of SETCC operations.
LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
static MVT getFloatingPointVT(unsigned BitWidth)
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
MVT changeTypeToInteger()
Return the type converted to an equivalently sized integer or vector with integer element type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
MachineFunction & getMachineFunction() const
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
const value_type & front() const
Return the first element of the SetVector.
const value_type & back() const
Return the last element of the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
static IntegerType * getInt8Ty(LLVMContext &C)
This class is used to represent EVT's, which are used to parameterize some operations.
LLVM Value Representation.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ MLOAD
Masked load and store - consecutive vector load and store operations with additional mask operand tha...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SIGN_EXTEND
Conversion operators.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.