29#include "llvm/IR/IntrinsicsAArch64.h"
32#include <initializer_list>
34#define DEBUG_TYPE "aarch64-legalinfo"
101 std::initializer_list<LLT> PackedVectorAllTypeList = {
107 std::initializer_list<LLT> ScalarAndPtrTypesList = {s8, s16, s32, s64, p0};
111 const TargetMachine &TM = ST.getTargetLowering()->getTargetMachine();
114 if (!ST.hasNEON() || !ST.hasFPARMv8()) {
121 const bool HasFP16 = ST.hasFullFP16();
122 const LLT &MinFPScalar = HasFP16 ? f16 : f32;
124 const bool HasCSSC = ST.hasCSSC();
125 const bool HasRCPC3 = ST.hasRCPC3();
126 const bool HasSVE = ST.hasSVE();
129 {G_IMPLICIT_DEF, G_FREEZE, G_CONSTANT_FOLD_BARRIER})
130 .legalFor({p0, s8, s16, s32, s64})
131 .legalFor({v2s8, v4s8, v8s8, v16s8, v2s16, v4s16, v8s16, v2s32, v4s32,
133 .widenScalarToNextPow2(0)
146 .legalFor(PackedVectorAllTypeList)
160 .widenScalarToNextPow2(0)
165 .maxScalarIf(
typeInSet(0, {s64, p0}), 1, s32);
170 .widenScalarToNextPow2(1)
175 .maxScalarIf(
typeInSet(1, {s64, p0}), 0, s32)
176 .maxScalarIf(
typeInSet(1, {s128}), 0, s64);
179 .legalFor({i32, i64, v8i8, v16i8, v4i16, v8i16, v2i32, v4i32, v2i64})
180 .legalFor(HasSVE, {nxv16s8, nxv8s16, nxv4s32, nxv2s64})
181 .widenScalarToNextPow2(0)
189 return Query.
Types[0].getNumElements() <= 2;
194 return Query.
Types[0].getNumElements() <= 4;
199 return Query.
Types[0].getNumElements() <= 16;
206 .
legalFor({i32, i64, v8i8, v16i8, v4i16, v8i16, v2i32, v4i32, v2i64})
207 .widenScalarToNextPow2(0)
215 return Query.
Types[0].getNumElements() <= 2;
220 return Query.
Types[0].getNumElements() <= 4;
225 return Query.
Types[0].getNumElements() <= 16;
233 const auto &SrcTy = Query.
Types[0];
234 const auto &AmtTy = Query.
Types[1];
235 return !SrcTy.isVector() && SrcTy.getSizeInBits() == 32 &&
236 AmtTy.getSizeInBits() == 32;
250 .widenScalarToNextPow2(0)
264 .
legalFor({{p0, i64}, {v2p0, v2i64}})
265 .clampScalarOrElt(1, s64, s64)
271 .legalFor({i32, i64})
273 .clampScalar(0, s32, s64)
278 .lowerFor({i8, i16, i32, i64, v2i32, v4i32, v2i64})
287 .widenScalarToNextPow2(0, 32)
292 .legalFor({s64, v16s8, v8s16, v4s32})
296 .legalFor({v8i8, v16i8, v4i16, v8i16, v2i32, v4i32})
297 .legalFor(HasCSSC, {i32, i64})
298 .minScalar(HasCSSC, 0, s32)
307 .legalFor({v16i8, v8i16, v4i32, v2i64, v2p0, v8i8, v4i16, v2i32})
311 return SrcTy.isScalar() && SrcTy.getSizeInBits() < 128;
315 [=](
const LegalityQuery &Query) {
return std::make_pair(0, v4i16); })
318 [=](
const LegalityQuery &Query) {
return std::make_pair(0, v2i32); })
319 .clampNumElements(0, v8s8, v16s8)
327 {G_ABDS, G_ABDU, G_UAVGFLOOR, G_UAVGCEIL, G_SAVGFLOOR, G_SAVGCEIL})
328 .legalFor({v8s8, v16s8, v4s16, v8s16, v2s32, v4s32})
332 {G_SADDE, G_SSUBE, G_UADDE, G_USUBE, G_SADDO, G_SSUBO, G_UADDO, G_USUBO})
333 .legalFor({{s32, s32}, {s64, s32}})
334 .clampScalar(0, s32, s64)
339 .customFor({{i32, i32}, {i32, i64}, {i64, i64}})
345 return Q.
Types[0].isScalar() && Q.
Types[1].getScalarSizeInBits() < 64;
351 .customFor({{s32, s32}, {s64, s64}});
355 .
legalFor(HasCSSC, {{s32, s32}, {s64, s64}})
356 .legalFor({{v8s8, v8s8}, {v16s8, v16s8}})
357 .customFor(!HasCSSC, {{s32, s32}, {s64, s64}})
358 .customFor({{s128, s128},
364 .clampScalar(0, s32, s128)
377 .legalFor({{s32, s32},
385 .widenScalarToNextPow2(1, 32)
403 .customFor(!HasCSSC, {s32, s64});
409 .widenScalarToNextPow2(0, 32)
421 .
legalFor({s32, s64, v4s16, v8s16, v2s32, v4s32, v2s64})
430 .legalFor({v8s8, v16s8, v4s16, v8s16, v2s32, v4s32, v2s64})
431 .legalFor(HasSVE, {nxv16s8, nxv8s16, nxv4s32, nxv2s64})
432 .clampNumElements(0, v8s8, v16s8)
441 {G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMA, G_FSQRT, G_FMAXNUM, G_FMINNUM,
442 G_FMAXIMUM, G_FMINIMUM, G_FCEIL, G_FFLOOR, G_FRINT, G_FNEARBYINT,
443 G_INTRINSIC_TRUNC, G_INTRINSIC_ROUND, G_INTRINSIC_ROUNDEVEN})
444 .legalFor({f32, f64, v2f32, v4f32, v2f64})
445 .legalFor(HasFP16, {f16, v4f16, v8f16})
455 .legalFor({f32, f64, v2f32, v4f32, v2f64})
456 .legalFor(HasFP16, {f16, v4f16, v8f16})
471 G_FLOG10, G_FTAN, G_FEXP, G_FEXP2, G_FEXP10,
472 G_FACOS, G_FASIN, G_FATAN, G_FATAN2, G_FCOSH,
473 G_FSINH, G_FTANH, G_FMODF})
482 .
libcallFor({{f32, i32}, {f64, i32}, {f128, i32}});
485 .legalFor({{s32, s32}, {s32, s64}, {s64, s32}, {s64, s64}})
486 .legalFor(HasFP16, {{s32, s16}, {s64, s16}})
491 .legalFor({{s64, s32}, {s64, s64}})
492 .legalFor(HasFP16, {{s64, s16}})
510 for (
unsigned Op : {G_SEXTLOAD, G_ZEXTLOAD}) {
513 if (
Op == G_SEXTLOAD)
518 .legalForTypesWithMemDesc({{s32, p0, s8, 8},
526 {v2s32, p0, s64, 8}})
527 .widenScalarToNextPow2(0)
528 .clampScalar(0, s32, s64)
531 .unsupportedIfMemSizeNotPow2()
543 return HasRCPC3 && Query.
Types[0] == s128 &&
547 return Query.
Types[0] == s128 &&
550 .legalForTypesWithMemDesc({{s8, p0, s8, 8},
557 {v16s8, p0, s128, 8},
559 {v8s16, p0, s128, 8},
561 {v4s32, p0, s128, 8},
562 {v2s64, p0, s128, 8}})
564 .legalForTypesWithMemDesc(
565 {{s32, p0, s8, 8}, {s32, p0, s16, 8}, {s64, p0, s32, 8}})
566 .legalForTypesWithMemDesc({
568 {nxv16s8, p0, nxv16s8, 8},
569 {nxv8s16, p0, nxv8s16, 8},
570 {nxv4s32, p0, nxv4s32, 8},
571 {nxv2s64, p0, nxv2s64, 8},
573 .widenScalarToNextPow2(0, 8)
584 return Query.
Types[0].isScalar() &&
586 Query.
Types[0].getSizeInBits() > 32;
595 .customIf(IsPtrVecPred)
601 return HasRCPC3 && Query.
Types[0] == s128 &&
605 return Query.
Types[0] == s128 &&
613 {{s8, p0, s8, 8}, {s16, p0, s8, 8},
616 {s16, p0, s16, 8}, {s32, p0, s16, 8},
618 {s32, p0, s8, 8}, {s32, p0, s16, 8}, {s32, p0, s32, 8},
619 {s64, p0, s64, 8}, {s64, p0, s32, 8},
620 {p0, p0, s64, 8}, {s128, p0, s128, 8}, {v16s8, p0, s128, 8},
621 {v8s8, p0, s64, 8}, {v4s16, p0, s64, 8}, {v8s16, p0, s128, 8},
622 {v2s32, p0, s64, 8}, {v4s32, p0, s128, 8}, {v2s64, p0, s128, 8}})
623 .legalForTypesWithMemDesc({
628 {nxv16s8, p0, nxv16s8, 8},
629 {nxv8s16, p0, nxv8s16, 8},
630 {nxv4s32, p0, nxv4s32, 8},
631 {nxv2s64, p0, nxv2s64, 8},
633 .clampScalar(0, s8, s64)
636 return Query.
Types[0].isScalar() &&
640 .clampMaxNumElements(0, s8, 16)
649 return Query.
Types[0].getSizeInBits() ==
650 Query.
MMODescrs[0].MemoryTy.getSizeInBits();
656 .customIf(IsPtrVecPred)
674 {p0, v16s8, v16s8, 8},
675 {p0, v4s16, v4s16, 8},
676 {p0, v8s16, v8s16, 8},
677 {p0, v2s32, v2s32, 8},
678 {p0, v4s32, v4s32, 8},
679 {p0, v2s64, v2s64, 8},
685 auto IndexedLoadBasicPred = [=](
const LegalityQuery &Query) {
713 return MemTy == s8 || MemTy == s16;
715 return MemTy == s8 || MemTy == s16 || MemTy == s32;
723 .widenScalarToNextPow2(0)
727 .clampScalar(0, MinFPScalar, s128);
731 .
legalFor({{i32, i32}, {i32, i64}, {i32, p0}})
741 return Ty.isVector() && !SrcTy.isPointerVector() &&
742 Ty.getElementType() != SrcTy.getElementType();
750 return Query.
Types[1].isPointerVector();
767 .legalFor(HasFP16, {{s32, f16}, {v4s16, v4f16}, {v8s16, v8f16}})
776 return Ty.isVector() && !SrcTy.isPointerVector() &&
777 Ty.getElementType() != SrcTy.getElementType();
780 .clampNumElements(1, v4s16, v8s16)
788 unsigned DstSize = Query.
Types[0].getSizeInBits();
791 if (Query.
Types[0].isVector())
794 if (DstSize < 8 || DstSize >= 128 || !
isPowerOf2_32(DstSize))
802 unsigned SrcSize = SrcTy.getSizeInBits();
809 .legalIf(ExtLegalFunc)
810 .
legalFor({{v8s16, v8s8}, {v4s32, v4s16}, {v2s64, v2s32}})
811 .clampScalar(0, s64, s64)
818 return (Query.
Types[0].getScalarSizeInBits() >
819 Query.
Types[1].getScalarSizeInBits() * 2) &&
820 Query.
Types[0].isVector() &&
821 (Query.
Types[1].getScalarSizeInBits() == 8 ||
822 Query.
Types[1].getScalarSizeInBits() == 16);
824 .clampMinNumElements(1, s8, 8)
829 .
legalFor({{v8s8, v8s16}, {v4s16, v4s32}, {v2s32, v2s64}})
840 return DstTy.
isVector() && SrcTy.getSizeInBits() > 128 &&
843 .clampMinNumElements(0, s8, 8)
848 .legalFor({{v8s8, v8s16}, {v4s16, v4s32}, {v2s32, v2s64}})
849 .clampNumElements(0, v2s32, v2s32);
853 .legalFor(PackedVectorAllTypeList)
864 {{f16, f32}, {f16, f64}, {f32, f64}, {v4f16, v4f32}, {v2f32, v2f64}})
865 .libcallFor({{f16, f128}, {f32, f128}, {f64, f128}})
871 SrcTy.getScalarSizeInBits() == 64 &&
875 .clampNumElements(1, v4s32, v4s32)
887 .libcallFor({{f128, f64}, {f128, f32}, {f128, f16}})
893 return SrcTy.isVector() && DstTy.
isVector() &&
894 SrcTy.getScalarSizeInBits() == 16 &&
898 .clampNumElements(0, v4s32, v4s32)
904 .legalFor({{i32, f32},
912 {{i32, f16}, {i64, f16}, {v4i16, v4f16}, {v8i16, v8f16}})
919 return Query.
Types[1] == f16 && Query.
Types[0].getSizeInBits() > 64;
928 return Query.
Types[0].getScalarSizeInBits() <= 64 &&
929 Query.
Types[0].getScalarSizeInBits() >
930 Query.
Types[1].getScalarSizeInBits();
935 return Query.
Types[1].getScalarSizeInBits() <= 64 &&
936 Query.
Types[0].getScalarSizeInBits() <
937 Query.
Types[1].getScalarSizeInBits();
940 .clampNumElements(0, v4s16, v8s16)
944 {{i32, f128}, {i64, f128}, {i128, f128}, {i128, f32}, {i128, f64}});
947 .legalFor({{i32, f32},
956 {{i16, f16}, {i32, f16}, {i64, f16}, {v4i16, v4f16}, {v8i16, v8f16}})
964 return Query.
Types[1] == f16 && Query.
Types[0].getSizeInBits() > 64;
974 unsigned ITySize = Query.
Types[0].getScalarSizeInBits();
975 return (ITySize == 16 || ITySize == 32 || ITySize == 64) &&
976 ITySize > Query.
Types[1].getScalarSizeInBits();
981 unsigned FTySize = Query.
Types[1].getScalarSizeInBits();
982 return (FTySize == 16 || FTySize == 32 || FTySize == 64) &&
983 Query.
Types[0].getScalarSizeInBits() < FTySize;
992 .legalFor({{f32, i32},
1000 {{f16, i32}, {f16, i64}, {v4f16, v4i16}, {v8f16, v8i16}})
1007 return Query.
Types[1].isVector() &&
1008 Query.
Types[1].getScalarSizeInBits() == 64 &&
1009 Query.
Types[0].getScalarSizeInBits() == 16;
1013 return Query.
Types[0].getScalarType() == bf16;
1016 .widenScalarOrEltToNextPow2OrMinSize(0, HasFP16 ? 16 : 32)
1020 return Query.
Types[0].getScalarSizeInBits() == 32 &&
1021 Query.
Types[1].getScalarSizeInBits() == 64;
1026 return Query.
Types[1].getScalarSizeInBits() <= 64 &&
1027 Query.
Types[0].getScalarSizeInBits() <
1028 Query.
Types[1].getScalarSizeInBits();
1033 return Query.
Types[0].getScalarSizeInBits() <= 64 &&
1034 Query.
Types[0].getScalarSizeInBits() >
1035 Query.
Types[1].getScalarSizeInBits();
1038 .clampNumElements(0, v4s16, v8s16)
1052 .clampScalar(0, s32, s32);
1056 .
legalFor({{s32, s32}, {s64, s32}, {p0, s32}})
1057 .widenScalarToNextPow2(0)
1076 .
legalFor({{i64, p0}, {v2i64, v2p0}})
1077 .widenScalarToNextPow2(0, 64)
1083 return Query.
Types[0].getSizeInBits() != Query.
Types[1].getSizeInBits();
1085 .legalFor({{p0, i64}, {v2p0, v2i64}})
1086 .clampMaxNumElements(1, s64, 2);
1093 .legalForCartesianProduct({s32, v2s16, v4s8})
1094 .legalForCartesianProduct({s64, v8s8, v4s16, v2s32})
1095 .legalForCartesianProduct({s128, v16s8, v8s16, v4s32, v2s64, v2p0})
1100 return DstTy.
isScalar() && SrcTy.isVector() &&
1101 SrcTy.getScalarSizeInBits() == 1;
1104 return Query.
Types[0].isVector() != Query.
Types[1].isVector();
1119 .clampScalar(0, s8, s64)
1126 bool UseOutlineAtomics = ST.outlineAtomics() && !ST.hasLSE();
1129 .
legalFor(!UseOutlineAtomics, {{s32, p0}, {s64, p0}})
1130 .customFor(!UseOutlineAtomics, {{s128, p0}})
1131 .libcallFor(UseOutlineAtomics,
1132 {{s8, p0}, {s16, p0}, {s32, p0}, {s64, p0}, {s128, p0}})
1133 .clampScalar(0, s32, s64);
1136 G_ATOMICRMW_SUB, G_ATOMICRMW_AND, G_ATOMICRMW_OR,
1138 .legalFor(!UseOutlineAtomics, {{s32, p0}, {s64, p0}})
1139 .libcallFor(UseOutlineAtomics,
1140 {{s8, p0}, {s16, p0}, {s32, p0}, {s64, p0}})
1141 .clampScalar(0, s32, s64);
1146 {G_ATOMICRMW_MIN, G_ATOMICRMW_MAX, G_ATOMICRMW_UMIN, G_ATOMICRMW_UMAX})
1148 .clampScalar(0, s32, s64);
1153 for (
unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
1154 unsigned BigTyIdx =
Op == G_MERGE_VALUES ? 0 : 1;
1155 unsigned LitTyIdx =
Op == G_MERGE_VALUES ? 1 : 0;
1162 switch (Q.
Types[BigTyIdx].getSizeInBits()) {
1170 switch (Q.
Types[LitTyIdx].getSizeInBits()) {
1184 .
legalFor(HasSVE, {{s16, nxv16s8, s64},
1185 {s16, nxv8s16, s64},
1186 {s32, nxv4s32, s64},
1187 {s64, nxv2s64, s64}})
1189 const LLT &EltTy = Query.
Types[1].getElementType();
1190 if (Query.
Types[1].isScalableVector())
1192 return Query.
Types[0] != EltTy;
1197 return VecTy == v8s8 || VecTy == v16s8 || VecTy == v2s16 ||
1198 VecTy == v4s16 || VecTy == v8s16 || VecTy == v2s32 ||
1199 VecTy == v4s32 || VecTy == v2s64 || VecTy == v2p0;
1205 return Query.
Types[1].isFixedVector() &&
1206 Query.
Types[1].getNumElements() <= 2;
1211 return Query.
Types[1].isFixedVector() &&
1212 Query.
Types[1].getNumElements() <= 4;
1217 return Query.
Types[1].isFixedVector() &&
1218 Query.
Types[1].getNumElements() <= 8;
1223 return Query.
Types[1].isFixedVector() &&
1224 Query.
Types[1].getNumElements() <= 16;
1227 .minScalarOrElt(0, s8)
1238 typeInSet(0, {v8s8, v16s8, v4s16, v8s16, v2s32, v4s32, v2s64, v2p0}))
1239 .legalFor(HasSVE, {{nxv16s8, s32, s64},
1240 {nxv8s16, s32, s64},
1241 {nxv4s32, s32, s64},
1242 {nxv2s64, s64, s64}})
1261 .clampNumElements(0, v4s32, v4s32)
1279 {v8s8, v16s8, v4s16, v8s16, v2s32, v4s32, v2s64}, DstTy);
1283 return Query.
Types[0].getNumElements() >
1284 Query.
Types[1].getNumElements();
1290 return Query.
Types[0].getNumElements() <
1291 Query.
Types[1].getNumElements();
1294 .widenScalarOrEltToNextPow2OrMinSize(0, 8)
1308 .
legalFor({{v16s8, v8s8}, {v8s16, v4s16}, {v4s32, v2s32}})
1310 return Query.
Types[0].isFixedVector() &&
1311 Query.
Types[0].getScalarSizeInBits() < 8;
1315 return Query.
Types[0].isFixedVector() &&
1316 Query.
Types[1].isFixedVector() &&
1317 Query.
Types[0].getScalarSizeInBits() >= 8 &&
1319 Query.
Types[0].getSizeInBits() <= 128 &&
1320 Query.
Types[1].getSizeInBits() <= 64;
1329 SrcTy.getNumElements())));
1333 .
legalFor({{v8s8, v16s8}, {v4s16, v8s16}, {v2s32, v4s32}})
1339 .
legalFor(HasSVE, {{nxv4s32, s32}, {nxv2s64, s64}});
1358 .customForCartesianProduct({p0}, {s8}, {s64})
1362 .legalForCartesianProduct({p0}, {p0}, {s64})
1378 .
legalFor({{f32, v2f32}, {f32, v4f32}, {f64, v2f64}})
1379 .legalFor(HasFP16, {{f16, v4f16}, {f16, v8f16}})
1380 .minScalarOrElt(0, MinFPScalar)
1422 G_VECREDUCE_FMINIMUM, G_VECREDUCE_FMAXIMUM})
1423 .legalFor({{f32, v2f32}, {f32, v4f32}, {f64, v2f64}})
1424 .legalFor(HasFP16, {{f16, v4f16}, {f16, v8f16}})
1425 .minScalarOrElt(0, MinFPScalar)
1440 {G_VECREDUCE_SMIN, G_VECREDUCE_SMAX, G_VECREDUCE_UMIN, G_VECREDUCE_UMAX})
1441 .legalFor({{i8, v8i8},
1449 return Query.
Types[1].isVector() &&
1450 Query.
Types[1].getElementType() != s8 &&
1451 Query.
Types[1].getNumElements() & 1;
1454 .clampMaxNumElements(1, s64, 2)
1462 {G_VECREDUCE_OR, G_VECREDUCE_AND, G_VECREDUCE_XOR})
1469 if (SrcTy.isScalar())
1474 return SrcTy.getSizeInBits() > 64;
1478 return std::make_pair(1, SrcTy.divide(2));
1488 G_GET_FPMODE, G_SET_FPMODE, G_RESET_FPMODE})
1498 verify(*ST.getInstrInfo());
1507 switch (
MI.getOpcode()) {
1511 case TargetOpcode::G_VAARG:
1512 return legalizeVaArg(
MI, MRI, MIRBuilder);
1513 case TargetOpcode::G_LOAD:
1514 case TargetOpcode::G_STORE:
1515 return legalizeLoadStore(
MI, MRI, MIRBuilder, Observer);
1516 case TargetOpcode::G_SHL:
1517 case TargetOpcode::G_ASHR:
1518 case TargetOpcode::G_LSHR:
1519 return legalizeShlAshrLshr(
MI, MRI, MIRBuilder, Observer);
1520 case TargetOpcode::G_GLOBAL_VALUE:
1521 return legalizeSmallCMGlobalValue(
MI, MRI, MIRBuilder, Observer);
1522 case TargetOpcode::G_SBFX:
1523 case TargetOpcode::G_UBFX:
1524 return legalizeBitfieldExtract(
MI, MRI, Helper);
1525 case TargetOpcode::G_FSHL:
1526 case TargetOpcode::G_FSHR:
1527 return legalizeFunnelShift(
MI, MRI, MIRBuilder, Observer, Helper);
1528 case TargetOpcode::G_ROTR:
1529 return legalizeRotate(
MI, MRI, Helper);
1530 case TargetOpcode::G_CTPOP:
1531 return legalizeCTPOP(
MI, MRI, Helper);
1532 case TargetOpcode::G_ATOMIC_CMPXCHG:
1533 return legalizeAtomicCmpxchg128(
MI, MRI, Helper);
1534 case TargetOpcode::G_CTTZ:
1535 return legalizeCTTZ(
MI, Helper);
1536 case TargetOpcode::G_BZERO:
1537 case TargetOpcode::G_MEMCPY:
1538 case TargetOpcode::G_MEMMOVE:
1539 case TargetOpcode::G_MEMSET:
1540 return legalizeMemOps(
MI, Helper);
1541 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
1542 return legalizeExtractVectorElt(
MI, MRI, Helper);
1543 case TargetOpcode::G_DYN_STACKALLOC:
1544 return legalizeDynStackAlloc(
MI, Helper);
1545 case TargetOpcode::G_PREFETCH:
1546 return legalizePrefetch(
MI, Helper);
1547 case TargetOpcode::G_ABS:
1549 case TargetOpcode::G_ICMP:
1550 return legalizeICMP(
MI, MRI, MIRBuilder);
1551 case TargetOpcode::G_BITCAST:
1552 return legalizeBitcast(
MI, Helper);
1553 case TargetOpcode::G_CONCAT_VECTORS:
1554 return legalizeConcatVectors(
MI, MRI, MIRBuilder);
1555 case TargetOpcode::G_FPTRUNC:
1558 return legalizeFptrunc(
MI, MIRBuilder, MRI);
1566 assert(
MI.getOpcode() == TargetOpcode::G_BITCAST &&
"Unexpected opcode");
1567 auto [DstReg, DstTy, SrcReg, SrcTy] =
MI.getFirst2RegLLTs();
1570 if (!DstTy.isScalar() || !SrcTy.isVector() ||
1575 MI.eraseFromParent();
1584 assert(
MI.getOpcode() == TargetOpcode::G_FSHL ||
1585 MI.getOpcode() == TargetOpcode::G_FSHR);
1589 Register ShiftNo =
MI.getOperand(3).getReg();
1595 LLT OperationTy = MRI.
getType(
MI.getOperand(0).getReg());
1599 if (!VRegAndVal || VRegAndVal->Value.urem(
BitWidth) == 0)
1605 Amount =
MI.getOpcode() == TargetOpcode::G_FSHL ?
BitWidth - Amount : Amount;
1609 if (ShiftTy.
getSizeInBits() == 64 &&
MI.getOpcode() == TargetOpcode::G_FSHR &&
1616 if (
MI.getOpcode() == TargetOpcode::G_FSHR) {
1618 MI.getOperand(3).setReg(Cast64.getReg(0));
1623 else if (
MI.getOpcode() == TargetOpcode::G_FSHL) {
1624 MIRBuilder.
buildInstr(TargetOpcode::G_FSHR, {
MI.getOperand(0).getReg()},
1625 {
MI.getOperand(1).getReg(),
MI.getOperand(2).getReg(),
1627 MI.eraseFromParent();
1636 Register SrcReg1 =
MI.getOperand(2).getReg();
1637 Register SrcReg2 =
MI.getOperand(3).getReg();
1638 LLT DstTy = MRI.
getType(DstReg);
1639 LLT SrcTy = MRI.
getType(SrcReg1);
1656 MIRBuilder.
buildNot(DstReg, CmpReg);
1658 MI.eraseFromParent();
1668 LLT AmtTy = MRI.
getType(AmtReg);
1674 MI.getOperand(2).setReg(NewAmt.getReg(0));
1679bool AArch64LegalizerInfo::legalizeSmallCMGlobalValue(
1682 assert(
MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
1687 auto &GlobalOp =
MI.getOperand(1);
1689 if (GlobalOp.isSymbol())
1691 const auto* GV = GlobalOp.getGlobal();
1692 if (GV->isThreadLocal())
1695 auto &TM = ST->getTargetLowering()->getTargetMachine();
1696 unsigned OpFlags = ST->ClassifyGlobalReference(GV, TM);
1701 auto Offset = GlobalOp.getOffset();
1706 MRI.
setRegClass(ADRP.getReg(0), &AArch64::GPR64RegClass);
1723 "Should not have folded in an offset for a tagged global!");
1725 .addGlobalAddress(GV, 0x100000000,
1728 MRI.
setRegClass(ADRP.getReg(0), &AArch64::GPR64RegClass);
1731 MIRBuilder.
buildInstr(AArch64::G_ADD_LOW, {DstReg}, {ADRP})
1732 .addGlobalAddress(GV,
Offset,
1734 MI.eraseFromParent();
1743 auto LowerUnaryOp = [&
MI, &MIB](
unsigned Opcode) {
1745 MI.eraseFromParent();
1748 auto LowerBinOp = [&
MI, &MIB](
unsigned Opcode) {
1750 {
MI.getOperand(2),
MI.getOperand(3)});
1751 MI.eraseFromParent();
1754 auto LowerTriOp = [&
MI, &MIB](
unsigned Opcode) {
1756 {
MI.getOperand(2),
MI.getOperand(3),
MI.getOperand(4)});
1757 MI.eraseFromParent();
1762 switch (IntrinsicID) {
1763 case Intrinsic::vacopy: {
1764 unsigned PtrSize = ST->isTargetILP32() ? 4 : 8;
1765 unsigned VaListSize =
1766 (ST->isTargetDarwin() || ST->isTargetWindows())
1768 : ST->isTargetILP32() ? 20 : 32;
1776 VaListSize,
Align(PtrSize)));
1780 VaListSize,
Align(PtrSize)));
1781 MI.eraseFromParent();
1784 case Intrinsic::get_dynamic_area_offset: {
1786 MI.eraseFromParent();
1789 case Intrinsic::aarch64_mops_memset_tag: {
1790 assert(
MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
1793 auto &
Value =
MI.getOperand(3);
1795 Value.setReg(ExtValueReg);
1798 case Intrinsic::aarch64_prefetch: {
1799 auto &AddrVal =
MI.getOperand(1);
1801 int64_t IsWrite =
MI.getOperand(2).getImm();
1802 int64_t
Target =
MI.getOperand(3).getImm();
1803 int64_t IsStream =
MI.getOperand(4).getImm();
1804 int64_t IsData =
MI.getOperand(5).getImm();
1806 unsigned PrfOp = (IsWrite << 4) |
1812 MI.eraseFromParent();
1815 case Intrinsic::aarch64_range_prefetch: {
1816 auto &AddrVal =
MI.getOperand(1);
1818 int64_t IsWrite =
MI.getOperand(2).getImm();
1819 int64_t IsStream =
MI.getOperand(3).getImm();
1820 unsigned PrfOp = (IsStream << 2) | IsWrite;
1822 MIB.
buildInstr(AArch64::G_AARCH64_RANGE_PREFETCH)
1825 .
addUse(
MI.getOperand(4).getReg());
1826 MI.eraseFromParent();
1829 case Intrinsic::aarch64_prefetch_ir: {
1830 auto &AddrVal =
MI.getOperand(1);
1832 MI.eraseFromParent();
1835 case Intrinsic::aarch64_neon_uaddv:
1836 case Intrinsic::aarch64_neon_saddv:
1837 case Intrinsic::aarch64_neon_umaxv:
1838 case Intrinsic::aarch64_neon_smaxv:
1839 case Intrinsic::aarch64_neon_uminv:
1840 case Intrinsic::aarch64_neon_sminv: {
1841 bool IsSigned = IntrinsicID == Intrinsic::aarch64_neon_saddv ||
1842 IntrinsicID == Intrinsic::aarch64_neon_smaxv ||
1843 IntrinsicID == Intrinsic::aarch64_neon_sminv;
1845 auto OldDst =
MI.getOperand(0).getReg();
1846 auto OldDstTy = MRI.
getType(OldDst);
1848 if (OldDstTy == NewDstTy)
1854 MI.getOperand(0).setReg(NewDst);
1858 MIB.
buildExtOrTrunc(IsSigned ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT,
1863 case Intrinsic::aarch64_neon_uaddlp:
1864 case Intrinsic::aarch64_neon_saddlp: {
1865 unsigned Opc = IntrinsicID == Intrinsic::aarch64_neon_uaddlp
1867 : AArch64::G_SADDLP;
1869 MI.eraseFromParent();
1873 case Intrinsic::aarch64_neon_uaddlv:
1874 case Intrinsic::aarch64_neon_saddlv: {
1875 unsigned Opc = IntrinsicID == Intrinsic::aarch64_neon_uaddlv
1877 : AArch64::G_SADDLV;
1904 MI.eraseFromParent();
1908 case Intrinsic::aarch64_neon_smax:
1909 return LowerBinOp(TargetOpcode::G_SMAX);
1910 case Intrinsic::aarch64_neon_smin:
1911 return LowerBinOp(TargetOpcode::G_SMIN);
1912 case Intrinsic::aarch64_neon_umax:
1913 return LowerBinOp(TargetOpcode::G_UMAX);
1914 case Intrinsic::aarch64_neon_umin:
1915 return LowerBinOp(TargetOpcode::G_UMIN);
1916 case Intrinsic::aarch64_neon_fmax:
1917 return LowerBinOp(TargetOpcode::G_FMAXIMUM);
1918 case Intrinsic::aarch64_neon_fmin:
1919 return LowerBinOp(TargetOpcode::G_FMINIMUM);
1920 case Intrinsic::aarch64_neon_fmaxnm:
1921 return LowerBinOp(TargetOpcode::G_FMAXNUM);
1922 case Intrinsic::aarch64_neon_fminnm:
1923 return LowerBinOp(TargetOpcode::G_FMINNUM);
1924 case Intrinsic::aarch64_neon_pmull:
1925 case Intrinsic::aarch64_neon_pmull64:
1926 return LowerBinOp(AArch64::G_PMULL);
1927 case Intrinsic::aarch64_neon_smull:
1928 return LowerBinOp(AArch64::G_SMULL);
1929 case Intrinsic::aarch64_neon_umull:
1930 return LowerBinOp(AArch64::G_UMULL);
1931 case Intrinsic::aarch64_neon_sabd:
1932 return LowerBinOp(TargetOpcode::G_ABDS);
1933 case Intrinsic::aarch64_neon_uabd:
1934 return LowerBinOp(TargetOpcode::G_ABDU);
1935 case Intrinsic::aarch64_neon_uhadd:
1936 return LowerBinOp(TargetOpcode::G_UAVGFLOOR);
1937 case Intrinsic::aarch64_neon_urhadd:
1938 return LowerBinOp(TargetOpcode::G_UAVGCEIL);
1939 case Intrinsic::aarch64_neon_shadd:
1940 return LowerBinOp(TargetOpcode::G_SAVGFLOOR);
1941 case Intrinsic::aarch64_neon_srhadd:
1942 return LowerBinOp(TargetOpcode::G_SAVGCEIL);
1943 case Intrinsic::aarch64_neon_sqshrn: {
1948 {MRI.
getType(
MI.getOperand(2).getReg())},
1949 {
MI.getOperand(2),
MI.getOperand(3).getImm()});
1951 MIB.
buildInstr(TargetOpcode::G_TRUNC_SSAT_S, {
MI.getOperand(0)}, {Shr});
1952 MI.eraseFromParent();
1955 case Intrinsic::aarch64_neon_sqshrun: {
1960 {MRI.
getType(
MI.getOperand(2).getReg())},
1961 {
MI.getOperand(2),
MI.getOperand(3).getImm()});
1963 MIB.
buildInstr(TargetOpcode::G_TRUNC_SSAT_U, {
MI.getOperand(0)}, {Shr});
1964 MI.eraseFromParent();
1967 case Intrinsic::aarch64_neon_sqrshrn: {
1971 auto Shr = MIB.
buildInstr(AArch64::G_SRSHR_I,
1972 {MRI.
getType(
MI.getOperand(2).getReg())},
1973 {
MI.getOperand(2),
MI.getOperand(3).getImm()});
1975 MIB.
buildInstr(TargetOpcode::G_TRUNC_SSAT_S, {
MI.getOperand(0)}, {Shr});
1976 MI.eraseFromParent();
1979 case Intrinsic::aarch64_neon_sqrshrun: {
1983 auto Shr = MIB.
buildInstr(AArch64::G_SRSHR_I,
1984 {MRI.
getType(
MI.getOperand(2).getReg())},
1985 {
MI.getOperand(2),
MI.getOperand(3).getImm()});
1987 MIB.
buildInstr(TargetOpcode::G_TRUNC_SSAT_U, {
MI.getOperand(0)}, {Shr});
1988 MI.eraseFromParent();
1991 case Intrinsic::aarch64_neon_uqrshrn: {
1995 auto Shr = MIB.
buildInstr(AArch64::G_URSHR_I,
1996 {MRI.
getType(
MI.getOperand(2).getReg())},
1997 {
MI.getOperand(2),
MI.getOperand(3).getImm()});
1999 MIB.
buildInstr(TargetOpcode::G_TRUNC_USAT_U, {
MI.getOperand(0)}, {Shr});
2000 MI.eraseFromParent();
2003 case Intrinsic::aarch64_neon_uqshrn: {
2008 {MRI.
getType(
MI.getOperand(2).getReg())},
2009 {
MI.getOperand(2),
MI.getOperand(3).getImm()});
2011 MIB.
buildInstr(TargetOpcode::G_TRUNC_USAT_U, {
MI.getOperand(0)}, {Shr});
2012 MI.eraseFromParent();
2015 case Intrinsic::aarch64_neon_sqshlu: {
2021 MIB.
buildInstr(AArch64::G_SQSHLU_I, {
MI.getOperand(0)},
2023 .addImm(ShiftAmount->getSExtValue());
2024 MI.eraseFromParent();
2029 case Intrinsic::aarch64_neon_vsli: {
2031 AArch64::G_SLI, {
MI.getOperand(0)},
2032 {
MI.getOperand(2),
MI.getOperand(3),
MI.getOperand(4).getImm()});
2033 MI.eraseFromParent();
2036 case Intrinsic::aarch64_neon_vsri: {
2038 AArch64::G_SRI, {
MI.getOperand(0)},
2039 {
MI.getOperand(2),
MI.getOperand(3),
MI.getOperand(4).getImm()});
2040 MI.eraseFromParent();
2043 case Intrinsic::aarch64_neon_abs: {
2045 MIB.
buildInstr(TargetOpcode::G_ABS, {
MI.getOperand(0)}, {
MI.getOperand(2)});
2046 MI.eraseFromParent();
2049 case Intrinsic::aarch64_neon_sqadd: {
2051 return LowerBinOp(TargetOpcode::G_SADDSAT);
2054 case Intrinsic::aarch64_neon_sqsub: {
2056 return LowerBinOp(TargetOpcode::G_SSUBSAT);
2059 case Intrinsic::aarch64_neon_uqadd: {
2061 return LowerBinOp(TargetOpcode::G_UADDSAT);
2064 case Intrinsic::aarch64_neon_uqsub: {
2066 return LowerBinOp(TargetOpcode::G_USUBSAT);
2069 case Intrinsic::aarch64_neon_udot:
2070 return LowerTriOp(AArch64::G_UDOT);
2071 case Intrinsic::aarch64_neon_sdot:
2072 return LowerTriOp(AArch64::G_SDOT);
2073 case Intrinsic::aarch64_neon_usdot:
2074 return LowerTriOp(AArch64::G_USDOT);
2075 case Intrinsic::aarch64_neon_sqxtn:
2076 return LowerUnaryOp(TargetOpcode::G_TRUNC_SSAT_S);
2077 case Intrinsic::aarch64_neon_sqxtun:
2078 return LowerUnaryOp(TargetOpcode::G_TRUNC_SSAT_U);
2079 case Intrinsic::aarch64_neon_uqxtn:
2080 return LowerUnaryOp(TargetOpcode::G_TRUNC_USAT_U);
2081 case Intrinsic::aarch64_neon_fcvtzu:
2082 return LowerUnaryOp(TargetOpcode::G_FPTOUI_SAT);
2083 case Intrinsic::aarch64_neon_fcvtzs:
2084 return LowerUnaryOp(TargetOpcode::G_FPTOSI_SAT);
2086 case Intrinsic::vector_reverse:
2094bool AArch64LegalizerInfo::legalizeShlAshrLshr(
2097 assert(
MI.getOpcode() == TargetOpcode::G_ASHR ||
2098 MI.getOpcode() == TargetOpcode::G_LSHR ||
2099 MI.getOpcode() == TargetOpcode::G_SHL);
2114 MI.getOperand(2).setReg(ExtCst.getReg(0));
2135bool AArch64LegalizerInfo::legalizeLoadStore(
2138 assert(
MI.getOpcode() == TargetOpcode::G_STORE ||
2139 MI.getOpcode() == TargetOpcode::G_LOAD);
2150 const LLT ValTy = MRI.
getType(ValReg);
2155 bool IsLoad =
MI.getOpcode() == TargetOpcode::G_LOAD;
2159 ST->hasLSE2() && ST->hasRCPC3() && (IsLoadAcquire || IsStoreRelease);
2165 Opcode = IsLoad ? AArch64::LDIAPPX : AArch64::STILPX;
2171 assert(ST->hasLSE2() &&
"ldp/stp not single copy atomic without +lse2");
2173 Opcode = IsLoad ? AArch64::LDPXi : AArch64::STPXi;
2176 MachineInstrBuilder NewI;
2178 NewI = MIRBuilder.
buildInstr(Opcode, {s64, s64}, {});
2184 Opcode, {}, {
Split->getOperand(0),
Split->getOperand(1)});
2188 NewI.
addUse(
MI.getOperand(1).getReg());
2200 *ST->getRegBankInfo());
2201 MI.eraseFromParent();
2207 LLVM_DEBUG(
dbgs() <<
"Tried to do custom legalization on wrong load/store");
2213 auto &MMO = **
MI.memoperands_begin();
2216 if (
MI.getOpcode() == TargetOpcode::G_STORE) {
2220 auto NewLoad = MIRBuilder.
buildLoad(NewTy,
MI.getOperand(1), MMO);
2223 MI.eraseFromParent();
2230 MachineFunction &MF = MIRBuilder.
getMF();
2231 Align Alignment(
MI.getOperand(2).getImm());
2233 Register ListPtr =
MI.getOperand(1).getReg();
2235 LLT PtrTy = MRI.
getType(ListPtr);
2245 MachineInstrBuilder DstPtr;
2246 if (Alignment > PtrAlign) {
2250 auto ListTmp = MIRBuilder.
buildPtrAdd(PtrTy,
List, AlignMinus1.getReg(0));
2260 ValTy, std::max(Alignment, PtrAlign)));
2271 MI.eraseFromParent();
2275bool AArch64LegalizerInfo::legalizeBitfieldExtract(
2306 MachineIRBuilder &MIRBuilder = Helper.
MIRBuilder;
2318 "Expected src and dst to have the same type!");
2325 auto Add = MIRBuilder.
buildAdd(i64, CTPOP1, CTPOP2);
2328 MI.eraseFromParent();
2332 if (!ST->hasNEON() ||
2333 MI.getMF()->getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) {
2345 assert((Size == 32 || Size == 64 || Size == 128) &&
"Expected only 32, 64, or 128 bit scalars!");
2347 Val = MIRBuilder.buildZExt(i64, Val).getReg(0);
2358 LLT Dt = Ty == LLT::fixed_vector(2, i64) ? LLT::fixed_vector(4, i32) : Ty;
2359 auto Zeros = MIRBuilder.buildConstant(Dt, 0);
2360 auto Ones = MIRBuilder.buildConstant(VTy, 1);
2361 MachineInstrBuilder Sum;
2363 if (Ty == LLT::fixed_vector(2, i64)) {
2365 MIRBuilder.buildInstr(AArch64::G_UDOT, {Dt}, {Zeros, Ones, CTPOP});
2366 Sum = MIRBuilder.buildInstr(AArch64::G_UADDLP, {Ty}, {UDOT});
2368 Sum = MIRBuilder.
buildInstr(AArch64::G_UDOT, {Dt}, {Zeros, Ones,
CTPOP});
2370 Sum = MIRBuilder.
buildInstr(AArch64::G_UDOT, {Dt}, {Zeros, Ones,
CTPOP});
2376 MI.eraseFromParent();
2384 Opc = Intrinsic::aarch64_neon_uaddlv;
2385 HAddTys.push_back(i32);
2387 Opc = Intrinsic::aarch64_neon_uaddlp;
2390 Opc = Intrinsic::aarch64_neon_uaddlp;
2394 Opc = Intrinsic::aarch64_neon_uaddlp;
2399 Opc = Intrinsic::aarch64_neon_uaddlp;
2402 Opc = Intrinsic::aarch64_neon_uaddlp;
2408 for (
LLT HTy : HAddTys) {
2418 MI.eraseFromParent();
2422bool AArch64LegalizerInfo::legalizeAtomicCmpxchg128(
2424 MachineIRBuilder &MIRBuilder = Helper.
MIRBuilder;
2426 auto Addr =
MI.getOperand(1).getReg();
2427 auto DesiredI = MIRBuilder.
buildUnmerge({i64, i64},
MI.getOperand(2));
2428 auto NewI = MIRBuilder.
buildUnmerge({i64, i64},
MI.getOperand(3));
2432 MachineInstrBuilder CAS;
2443 auto Ordering = (*
MI.memoperands_begin())->getMergedOrdering();
2447 Opcode = AArch64::CASPAX;
2450 Opcode = AArch64::CASPLX;
2454 Opcode = AArch64::CASPALX;
2457 Opcode = AArch64::CASPX;
2465 MIRBuilder.
buildInstr(TargetOpcode::REG_SEQUENCE, {CASDesired}, {})
2466 .addUse(DesiredI->getOperand(0).getReg())
2468 .
addUse(DesiredI->getOperand(1).getReg())
2469 .
addImm(AArch64::subo64);
2470 MIRBuilder.
buildInstr(TargetOpcode::REG_SEQUENCE, {CASNew}, {})
2474 .
addImm(AArch64::subo64);
2476 CAS = MIRBuilder.
buildInstr(Opcode, {CASDst}, {CASDesired, CASNew, Addr});
2484 auto Ordering = (*
MI.memoperands_begin())->getMergedOrdering();
2488 Opcode = AArch64::CMP_SWAP_128_ACQUIRE;
2491 Opcode = AArch64::CMP_SWAP_128_RELEASE;
2495 Opcode = AArch64::CMP_SWAP_128;
2498 Opcode = AArch64::CMP_SWAP_128_MONOTONIC;
2503 CAS = MIRBuilder.
buildInstr(Opcode, {DstLo, DstHi, Scratch},
2504 {Addr, DesiredI->getOperand(0),
2505 DesiredI->getOperand(1), NewI->
getOperand(0),
2512 *ST->getRegBankInfo());
2515 MI.eraseFromParent();
2521 MachineIRBuilder &MIRBuilder = Helper.
MIRBuilder;
2522 MachineRegisterInfo &MRI = *MIRBuilder.
getMRI();
2523 LLT Ty = MRI.
getType(
MI.getOperand(1).getReg());
2525 MIRBuilder.
buildCTLZ(
MI.getOperand(0).getReg(), BitReverse);
2526 MI.eraseFromParent();
2532 MachineIRBuilder &MIRBuilder = Helper.
MIRBuilder;
2535 if (
MI.getOpcode() == TargetOpcode::G_MEMSET) {
2538 auto &
Value =
MI.getOperand(1);
2541 Value.setReg(ExtValueReg);
2548bool AArch64LegalizerInfo::legalizeExtractVectorElt(
2562bool AArch64LegalizerInfo::legalizeDynStackAlloc(
2564 MachineFunction &MF = *
MI.getParent()->getParent();
2565 MachineIRBuilder &MIRBuilder = Helper.
MIRBuilder;
2566 MachineRegisterInfo &MRI = *MIRBuilder.
getMRI();
2578 Register AllocSize =
MI.getOperand(1).getReg();
2582 "Unexpected type for dynamic alloca");
2584 "Unexpected type for dynamic alloca");
2592 MIRBuilder.
buildInstr(AArch64::PROBED_STACKALLOC_DYN, {}, {SPTmp});
2593 MRI.
setRegClass(NewMI.getReg(0), &AArch64::GPR64commonRegClass);
2594 MIRBuilder.
setInsertPt(*NewMI->getParent(), NewMI);
2597 MI.eraseFromParent();
2604 auto &AddrVal =
MI.getOperand(0);
2606 int64_t IsWrite =
MI.getOperand(1).getImm();
2607 int64_t Locality =
MI.getOperand(2).getImm();
2608 int64_t
IsData =
MI.getOperand(3).getImm();
2610 bool IsStream = Locality == 0;
2611 if (Locality != 0) {
2612 assert(Locality <= 3 &&
"Prefetch locality out-of-range");
2616 Locality = 3 - Locality;
2619 unsigned PrfOp = (IsWrite << 4) | (!IsData << 3) | (Locality << 1) | IsStream;
2622 MI.eraseFromParent();
2626bool AArch64LegalizerInfo::legalizeConcatVectors(
2633 LLT DstTy = MRI.
getType(DstReg);
2636 unsigned WideEltSize =
2643 for (
unsigned I = 0;
I <
Concat.getNumSources(); ++
I) {
2650 MI.eraseFromParent();
2657 auto [Dst, DstTy, Src, SrcTy] =
MI.getFirst2RegLLTs();
2659 "Expected a power of 2 elements");
2663 LLT v2s16 = DstTy.changeElementCount(2);
2664 LLT v4s16 = DstTy.changeElementCount(4);
2676 int StepSize = ElemCount % 4 ? 2 : 4;
2683 for (
unsigned i = 0; i < ElemCount / 2; ++i)
2690 for (
auto SrcReg : RegsToUnmergeTo) {
2692 MIRBuilder.
buildInstr(AArch64::G_FPTRUNC_ODD, {v2s32}, {SrcReg})
2700 for (
unsigned LoopIter = 0; LoopIter < ElemCount / StepSize; ++LoopIter) {
2701 if (StepSize == 4) {
2705 {v4s32}, {TruncOddDstRegs[
Index++], TruncOddDstRegs[
Index++]})
2717 if (RegsToMerge.
size() == 1) {
2719 MI.eraseFromParent();
2726 MI.eraseFromParent();
static void matchLDPSTPAddrMode(Register Root, Register &Base, int &Offset, MachineRegisterInfo &MRI)
This file declares the targeting of the Machinelegalizer class for AArch64.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
Interface for Targets to specify which operations they can successfully select and how the others sho...
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Promote Memory to Register
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static constexpr MCPhysReg SPReg
static constexpr int Concat[]
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI, LostDebugLocObserver &LocObserver) const override
Called for instructions with the Custom LegalizationAction.
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
AArch64LegalizerInfo(const AArch64Subtarget &ST)
Class for arbitrary precision integers.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
int64_t getSExtValue() const
Get sign extended value.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Abstract class that contains various methods for clients to notify about changes.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
static constexpr LLT float64()
Get a 64-bit IEEE double value.
LLT changeElementCount(ElementCount EC) const
Return a vector or scalar with the same element type and the new element count.
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
static constexpr LLT scalable_vector(unsigned MinNumElements, unsigned ScalarSizeInBits)
Get a low-level scalable vector of some number of elements and element width.
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
LLT getScalarType() const
constexpr bool isPointerVector() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
static constexpr LLT float128()
Get a 128-bit IEEE quad value.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr ElementCount getElementCount() const
static constexpr LLT float16()
Get a 16-bit IEEE half value.
constexpr unsigned getAddressSpace() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
static LLT integer(unsigned SizeInBits)
static constexpr LLT bfloat16()
LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
static constexpr LLT float32()
Get a 32-bit IEEE float value.
LLT changeElementSize(unsigned NewEltSize) const
If this type is a vector, return a vector with the same number of elements but the new element size.
LLVM_ABI void computeTables()
Compute any ancillary tables needed to quickly decide how an operation should be handled.
LegalizeRuleSet & minScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at least as wide as Ty.
LegalizeRuleSet & widenScalarOrEltToNextPow2OrMinSize(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar or vector element type to the next power of two that is at least MinSize.
LegalizeRuleSet & legalFor(std::initializer_list< LLT > Types)
The instruction is legal when type index 0 is any type in the given list.
LegalizeRuleSet & maxScalarEltSameAsIf(LegalityPredicate Predicate, unsigned TypeIdx, unsigned SmallTypeIdx)
Conditionally narrow the scalar or elt to match the size of another.
LegalizeRuleSet & unsupported()
The instruction is unsupported.
LegalizeRuleSet & scalarSameSizeAs(unsigned TypeIdx, unsigned SameSizeIdx)
Change the type TypeIdx to have the same scalar size as type SameSizeIdx.
LegalizeRuleSet & bitcastIf(LegalityPredicate Predicate, LegalizeMutation Mutation)
The specified type index is coerced if predicate is true.
LegalizeRuleSet & libcallFor(std::initializer_list< LLT > Types)
LegalizeRuleSet & maxScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at most as wide as Ty.
LegalizeRuleSet & minScalarOrElt(unsigned TypeIdx, const LLT Ty)
Ensure the scalar or element is at least as wide as Ty.
LegalizeRuleSet & clampMaxNumElements(unsigned TypeIdx, const LLT EltTy, unsigned MaxElements)
Limit the number of elements in EltTy vectors to at most MaxElements.
LegalizeRuleSet & clampMinNumElements(unsigned TypeIdx, const LLT EltTy, unsigned MinElements)
Limit the number of elements in EltTy vectors to at least MinElements.
LegalizeRuleSet & widenVectorEltsToVectorMinSize(unsigned TypeIdx, unsigned VectorSize)
Ensure the vector size is at least as wide as VectorSize by promoting the element.
LegalizeRuleSet & lowerIfMemSizeNotPow2()
Lower a memory operation if the memory size, rounded to bytes, is not a power of 2.
LegalizeRuleSet & minScalarEltSameAsIf(LegalityPredicate Predicate, unsigned TypeIdx, unsigned LargeTypeIdx)
Conditionally widen the scalar or elt to match the size of another.
LegalizeRuleSet & customForCartesianProduct(std::initializer_list< LLT > Types)
LegalizeRuleSet & lowerIfMemSizeNotByteSizePow2()
Lower a memory operation if the memory access size is not a round power of 2 byte size.
LegalizeRuleSet & moreElementsToNextPow2(unsigned TypeIdx)
Add more elements to the vector to reach the next power of two.
LegalizeRuleSet & narrowScalarIf(LegalityPredicate Predicate, LegalizeMutation Mutation)
Narrow the scalar to the one selected by the mutation if the predicate is true.
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & moreElementsIf(LegalityPredicate Predicate, LegalizeMutation Mutation)
Add more elements to reach the type selected by the mutation if the predicate is true.
LegalizeRuleSet & lowerFor(std::initializer_list< LLT > Types)
The instruction is lowered when type index 0 is any type in the given list.
LegalizeRuleSet & scalarizeIf(LegalityPredicate Predicate, unsigned TypeIdx)
LegalizeRuleSet & lowerIf(LegalityPredicate Predicate)
The instruction is lowered if predicate is true.
LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the range of scalar sizes to MinTy and MaxTy.
LegalizeRuleSet & custom()
Unconditionally custom lower.
LegalizeRuleSet & minScalarSameAs(unsigned TypeIdx, unsigned LargeTypeIdx)
Widen the scalar to match the size of another.
LegalizeRuleSet & unsupportedIf(LegalityPredicate Predicate)
LegalizeRuleSet & minScalarOrEltIf(LegalityPredicate Predicate, unsigned TypeIdx, const LLT Ty)
Ensure the scalar or element is at least as wide as Ty.
LegalizeRuleSet & widenScalarIf(LegalityPredicate Predicate, LegalizeMutation Mutation)
Widen the scalar to the one selected by the mutation if the predicate is true.
LegalizeRuleSet & alwaysLegal()
LegalizeRuleSet & clampNumElements(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the number of elements for the given vectors to at least MinTy's number of elements and at most...
LegalizeRuleSet & maxScalarIf(LegalityPredicate Predicate, unsigned TypeIdx, const LLT Ty)
Conditionally limit the maximum size of the scalar.
LegalizeRuleSet & customIf(LegalityPredicate Predicate)
LegalizeRuleSet & widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar to the next power of two that is at least MinSize.
LegalizeRuleSet & scalarize(unsigned TypeIdx)
LegalizeRuleSet & legalForCartesianProduct(std::initializer_list< LLT > Types)
The instruction is legal when type indexes 0 and 1 are both in the given list.
LegalizeRuleSet & legalForTypesWithMemDesc(std::initializer_list< LegalityPredicates::TypePairAndMemDesc > TypesAndMemDesc)
The instruction is legal when type indexes 0 and 1 along with the memory size and minimum alignment i...
unsigned immIdx(unsigned ImmIdx)
LegalizeRuleSet & widenScalarOrEltToNextPow2(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar or vector element type to the next power of two that is at least MinSize.
LegalizeRuleSet & legalIf(LegalityPredicate Predicate)
The instruction is legal if predicate is true.
LLVM_ABI LegalizeResult lowerDynStackAlloc(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerBitCount(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerExtractInsertVectorElt(MachineInstr &MI)
Lower a vector extract or insert by writing the vector to a stack temporary and reloading the element...
LLVM_ABI LegalizeResult lowerAbsToCNeg(MachineInstr &MI)
const TargetLowering & getTargetLowering() const
LLVM_ABI LegalizeResult lowerFunnelShiftAsShifts(MachineInstr &MI)
LLVM_ABI MachineInstrBuilder createStackStoreLoad(const DstOp &Res, const SrcOp &Val)
Create a store of Val to a stack temporary and return a load as the same type as Res.
@ Legalized
Instruction has been legalized and the MachineFunction changed.
@ UnableToLegalize
Some kind of error has occurred and we could not legalize this instruction.
GISelChangeObserver & Observer
To keep track of changes made by the LegalizerHelper.
LLVM_ABI Register getDynStackAllocTargetPtr(Register SPReg, Register AllocSize, Align Alignment, LLT PtrTy)
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const LegacyLegalizerInfo & getLegacyLegalizerInfo() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)
Set the insertion point before the specified position.
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildNot(const DstOp &Dst, const SrcOp &Src0)
Build and insert a bitwise not, NegOne = G_CONSTANT -1 Res = G_OR Op0, NegOne.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildCTLZ(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_CTLZ Op0, Src0.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildBitReverse(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_BITREVERSE Src.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCTPOP(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_CTPOP Op0, Src0.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildBitcast(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_BITCAST Src.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTRMASK Op0, G_CONSTANT (1 << NumBits) - 1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
Representation of each machine instruction.
const MachineOperand & getOperand(unsigned i) const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
const TargetRegisterInfo * getTargetRegisterInfo() const
LLVM_ABI void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
Wrapper class representing virtual and physical registers.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
Primary interface to the complete machine description for the target machine.
CodeModel::Model getCodeModel() const
Returns the code model.
Target - Wrapper for Target specific information.
LLVM Value Representation.
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
@ MO_PREL
MO_PREL - Indicates that the bits of the symbol operand represented by MO_G0 etc are PC relative.
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
@ MO_G3
MO_G3 - A symbol operand with this flag (granule 3) represents the high 16-bits of a 64-bit address,...
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
LLVM_ABI LegalityPredicate scalarOrEltWiderThan(unsigned TypeIdx, unsigned Size)
True iff the specified type index is a scalar or a vector with an element type that's wider than the ...
LLVM_ABI LegalityPredicate isPointerVector(unsigned TypeIdx)
True iff the specified type index is a vector of pointers (with any address space).
LLVM_ABI LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
LLVM_ABI LegalityPredicate smallerThan(unsigned TypeIdx0, unsigned TypeIdx1)
True iff the first type index has a smaller total bit size than second type index.
LLVM_ABI LegalityPredicate atomicOrderingAtLeastOrStrongerThan(unsigned MMOIdx, AtomicOrdering Ordering)
True iff the specified MMO index has at an atomic ordering of at Ordering or stronger.
Predicate any(Predicate P0, Predicate P1)
True iff P0 or P1 are true.
LLVM_ABI LegalityPredicate isVector(unsigned TypeIdx)
True iff the specified type index is a vector.
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
LLVM_ABI LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified type.
LLVM_ABI LegalityPredicate scalarWiderThan(unsigned TypeIdx, unsigned Size)
True iff the specified type index is a scalar that's wider than the given size.
LLVM_ABI LegalityPredicate scalarNarrowerThan(unsigned TypeIdx, unsigned Size)
True iff the specified type index is a scalar that's narrower than the given size.
@ Bitcast
Perform the operation on a different, but equivalently sized type.
LLVM_ABI LegalizeMutation moreElementsToNextPow2(unsigned TypeIdx, unsigned Min=0)
Add more elements to the type for the given type index to the next power of.
LLVM_ABI LegalizeMutation scalarize(unsigned TypeIdx)
Break up the vector type for the given type index into the element type.
LLVM_ABI LegalizeMutation changeElementTo(unsigned TypeIdx, unsigned FromTypeIdx)
Keep the same scalar or element type as the given type index.
LLVM_ABI LegalizeMutation widenScalarOrEltToNextPow2(unsigned TypeIdx, unsigned Min=0)
Widen the scalar type or vector element type for the given type index to the next power of 2.
LLVM_ABI LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty)
Select this specific type for the given type index.
LLVM_ABI LegalizeMutation changeElementSizeTo(unsigned TypeIdx, unsigned FromTypeIdx)
Change the scalar size or element size to have the same scalar size as type index FromIndex.
operand_type_match m_Reg()
ConstantMatch< APInt > m_ICst(APInt &Cst)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_PTR_ADD, false > m_GPtrAdd(const LHS &L, const RHS &R)
Invariant opcodes: All instruction sets have these as their low opcodes.
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
LLVM_ABI void constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
LLVM_ABI std::optional< APInt > isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a constant integer or a splat vector of constant integers.
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
std::function< bool(const LegalityQuery &)> LegalityPredicate
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
AtomicOrdering
Atomic ordering for LLVM's memory model.
DWARFExpression::Operation Op
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
unsigned Log2(Align A)
Returns the log2 of the alignment.
This struct is a compact representation of a valid (non-zero power of two) alignment.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
ArrayRef< MemDesc > MMODescrs
Operations which require memory can use this to place requirements on the memory type for each MMO.
This class contains a discriminated union of information about pointers in memory operands,...