71 SVEPredicateAsCounter,
77enum class MatrixKind {
Array, Tile, Row, Col };
79enum RegConstraintEqualityTy {
94 static PrefixInfo CreateFromInst(
const MCInst &Inst,
uint64_t TSFlags) {
97 case AArch64::MOVPRFX_ZZ:
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
109 "No destructive element size set for movprfx");
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
121 "No destructive element size set for movprfx");
132 PrefixInfo() =
default;
133 bool isActive()
const {
return Active; }
135 unsigned getElementSize()
const {
148 unsigned ElementSize;
164 std::string &Suggestion);
166 unsigned matchRegisterNameAlias(
StringRef Name, RegKind Kind);
168 bool parseSymbolicImmVal(
const MCExpr *&ImmVal);
174 bool invertCondCode);
175 bool parseImmExpr(int64_t &Out);
177 bool parseRegisterInRange(
unsigned &Out,
unsigned Base,
unsigned First,
183 bool parseAuthExpr(
const MCExpr *&Res,
SMLoc &EndLoc);
185 bool parseDirectiveArch(
SMLoc L);
186 bool parseDirectiveArchExtension(
SMLoc L);
187 bool parseDirectiveCPU(
SMLoc L);
188 bool parseDirectiveInst(
SMLoc L);
190 bool parseDirectiveTLSDescCall(
SMLoc L);
193 bool parseDirectiveLtorg(
SMLoc L);
196 bool parseDirectiveUnreq(
SMLoc L);
197 bool parseDirectiveCFINegateRAState();
198 bool parseDirectiveCFINegateRAStateWithPC();
199 bool parseDirectiveCFIBKeyFrame();
200 bool parseDirectiveCFIMTETaggedFrame();
202 bool parseDirectiveVariantPCS(
SMLoc L);
204 bool parseDirectiveSEHAllocStack(
SMLoc L);
205 bool parseDirectiveSEHPrologEnd(
SMLoc L);
206 bool parseDirectiveSEHSaveR19R20X(
SMLoc L);
207 bool parseDirectiveSEHSaveFPLR(
SMLoc L);
208 bool parseDirectiveSEHSaveFPLRX(
SMLoc L);
209 bool parseDirectiveSEHSaveReg(
SMLoc L);
210 bool parseDirectiveSEHSaveRegX(
SMLoc L);
211 bool parseDirectiveSEHSaveRegP(
SMLoc L);
212 bool parseDirectiveSEHSaveRegPX(
SMLoc L);
213 bool parseDirectiveSEHSaveLRPair(
SMLoc L);
214 bool parseDirectiveSEHSaveFReg(
SMLoc L);
215 bool parseDirectiveSEHSaveFRegX(
SMLoc L);
216 bool parseDirectiveSEHSaveFRegP(
SMLoc L);
217 bool parseDirectiveSEHSaveFRegPX(
SMLoc L);
218 bool parseDirectiveSEHSetFP(
SMLoc L);
219 bool parseDirectiveSEHAddFP(
SMLoc L);
220 bool parseDirectiveSEHNop(
SMLoc L);
221 bool parseDirectiveSEHSaveNext(
SMLoc L);
222 bool parseDirectiveSEHEpilogStart(
SMLoc L);
223 bool parseDirectiveSEHEpilogEnd(
SMLoc L);
224 bool parseDirectiveSEHTrapFrame(
SMLoc L);
225 bool parseDirectiveSEHMachineFrame(
SMLoc L);
226 bool parseDirectiveSEHContext(
SMLoc L);
227 bool parseDirectiveSEHECContext(
SMLoc L);
228 bool parseDirectiveSEHClearUnwoundToCall(
SMLoc L);
229 bool parseDirectiveSEHPACSignLR(
SMLoc L);
230 bool parseDirectiveSEHSaveAnyReg(
SMLoc L,
bool Paired,
bool Writeback);
232 bool validateInstruction(
MCInst &Inst,
SMLoc &IDLoc,
234 unsigned getNumRegsForRegKind(RegKind K);
238 bool MatchingInlineAsm)
override;
242#define GET_ASSEMBLER_HEADER
243#include "AArch64GenAsmMatcher.inc"
257 template <
bool IsSVEPrefetch = false>
264 template <
bool AddFPZeroAsLiteral>
272 template <
bool ParseShiftExtend,
273 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
276 template <
bool ParseShiftExtend,
bool ParseSuffix>
278 template <RegKind RK>
282 template <RegKind VectorKind>
284 bool ExpectMatch =
false);
294 enum AArch64MatchResultTy {
296#define GET_OPERAND_DIAGNOSTIC_TYPES
297#include "AArch64GenAsmMatcher.inc"
300 bool IsWindowsArm64EC;
331 SMLoc &EndLoc)
override;
334 unsigned Kind)
override;
338 static bool classifySymbolRef(
const MCExpr *Expr,
371 SMLoc StartLoc, EndLoc;
380 struct ShiftExtendOp {
383 bool HasExplicitAmount;
393 RegConstraintEqualityTy EqualityTy;
409 ShiftExtendOp ShiftExtend;
414 unsigned ElementWidth;
418 struct MatrixTileListOp {
419 unsigned RegMask = 0;
422 struct VectorListOp {
426 unsigned NumElements;
427 unsigned ElementWidth;
428 RegKind RegisterKind;
431 struct VectorIndexOp {
439 struct ShiftedImmOp {
441 unsigned ShiftAmount;
502 unsigned PStateField;
508 struct MatrixRegOp MatrixReg;
509 struct MatrixTileListOp MatrixTileList;
510 struct VectorListOp VectorList;
511 struct VectorIndexOp VectorIndex;
513 struct ShiftedImmOp ShiftedImm;
514 struct ImmRangeOp ImmRange;
516 struct FPImmOp FPImm;
518 struct SysRegOp SysReg;
519 struct SysCRImmOp SysCRImm;
521 struct PSBHintOp PSBHint;
522 struct PHintOp PHint;
523 struct BTIHintOp BTIHint;
524 struct ShiftExtendOp ShiftExtend;
537 StartLoc =
o.StartLoc;
547 ShiftedImm =
o.ShiftedImm;
550 ImmRange =
o.ImmRange;
564 case k_MatrixRegister:
565 MatrixReg =
o.MatrixReg;
567 case k_MatrixTileList:
568 MatrixTileList =
o.MatrixTileList;
571 VectorList =
o.VectorList;
574 VectorIndex =
o.VectorIndex;
580 SysCRImm =
o.SysCRImm;
595 ShiftExtend =
o.ShiftExtend;
604 SMLoc getStartLoc()
const override {
return StartLoc; }
606 SMLoc getEndLoc()
const override {
return EndLoc; }
609 assert(Kind == k_Token &&
"Invalid access!");
613 bool isTokenSuffix()
const {
614 assert(Kind == k_Token &&
"Invalid access!");
618 const MCExpr *getImm()
const {
619 assert(Kind == k_Immediate &&
"Invalid access!");
623 const MCExpr *getShiftedImmVal()
const {
624 assert(Kind == k_ShiftedImm &&
"Invalid access!");
625 return ShiftedImm.Val;
628 unsigned getShiftedImmShift()
const {
629 assert(Kind == k_ShiftedImm &&
"Invalid access!");
630 return ShiftedImm.ShiftAmount;
633 unsigned getFirstImmVal()
const {
634 assert(Kind == k_ImmRange &&
"Invalid access!");
635 return ImmRange.First;
638 unsigned getLastImmVal()
const {
639 assert(Kind == k_ImmRange &&
"Invalid access!");
640 return ImmRange.Last;
644 assert(Kind == k_CondCode &&
"Invalid access!");
649 assert (Kind == k_FPImm &&
"Invalid access!");
650 return APFloat(APFloat::IEEEdouble(),
APInt(64, FPImm.Val,
true));
653 bool getFPImmIsExact()
const {
654 assert (Kind == k_FPImm &&
"Invalid access!");
655 return FPImm.IsExact;
658 unsigned getBarrier()
const {
659 assert(Kind == k_Barrier &&
"Invalid access!");
664 assert(Kind == k_Barrier &&
"Invalid access!");
668 bool getBarriernXSModifier()
const {
669 assert(Kind == k_Barrier &&
"Invalid access!");
674 assert(Kind == k_Register &&
"Invalid access!");
678 unsigned getMatrixReg()
const {
679 assert(Kind == k_MatrixRegister &&
"Invalid access!");
680 return MatrixReg.RegNum;
683 unsigned getMatrixElementWidth()
const {
684 assert(Kind == k_MatrixRegister &&
"Invalid access!");
685 return MatrixReg.ElementWidth;
688 MatrixKind getMatrixKind()
const {
689 assert(Kind == k_MatrixRegister &&
"Invalid access!");
690 return MatrixReg.Kind;
693 unsigned getMatrixTileListRegMask()
const {
694 assert(isMatrixTileList() &&
"Invalid access!");
695 return MatrixTileList.RegMask;
698 RegConstraintEqualityTy getRegEqualityTy()
const {
699 assert(Kind == k_Register &&
"Invalid access!");
700 return Reg.EqualityTy;
703 unsigned getVectorListStart()
const {
704 assert(Kind == k_VectorList &&
"Invalid access!");
705 return VectorList.RegNum;
708 unsigned getVectorListCount()
const {
709 assert(Kind == k_VectorList &&
"Invalid access!");
710 return VectorList.Count;
713 unsigned getVectorListStride()
const {
714 assert(Kind == k_VectorList &&
"Invalid access!");
715 return VectorList.Stride;
718 int getVectorIndex()
const {
719 assert(Kind == k_VectorIndex &&
"Invalid access!");
720 return VectorIndex.Val;
724 assert(Kind == k_SysReg &&
"Invalid access!");
725 return StringRef(SysReg.Data, SysReg.Length);
728 unsigned getSysCR()
const {
729 assert(Kind == k_SysCR &&
"Invalid access!");
733 unsigned getPrefetch()
const {
734 assert(Kind == k_Prefetch &&
"Invalid access!");
738 unsigned getPSBHint()
const {
739 assert(Kind == k_PSBHint &&
"Invalid access!");
743 unsigned getPHint()
const {
744 assert(Kind == k_PHint &&
"Invalid access!");
749 assert(Kind == k_PSBHint &&
"Invalid access!");
750 return StringRef(PSBHint.Data, PSBHint.Length);
754 assert(Kind == k_PHint &&
"Invalid access!");
755 return StringRef(PHint.Data, PHint.Length);
758 unsigned getBTIHint()
const {
759 assert(Kind == k_BTIHint &&
"Invalid access!");
764 assert(Kind == k_BTIHint &&
"Invalid access!");
765 return StringRef(BTIHint.Data, BTIHint.Length);
769 assert(Kind == k_SVCR &&
"Invalid access!");
770 return StringRef(SVCR.Data, SVCR.Length);
774 assert(Kind == k_Prefetch &&
"Invalid access!");
779 if (Kind == k_ShiftExtend)
780 return ShiftExtend.Type;
781 if (Kind == k_Register)
782 return Reg.ShiftExtend.Type;
786 unsigned getShiftExtendAmount()
const {
787 if (Kind == k_ShiftExtend)
788 return ShiftExtend.Amount;
789 if (Kind == k_Register)
790 return Reg.ShiftExtend.Amount;
794 bool hasShiftExtendAmount()
const {
795 if (Kind == k_ShiftExtend)
796 return ShiftExtend.HasExplicitAmount;
797 if (Kind == k_Register)
798 return Reg.ShiftExtend.HasExplicitAmount;
802 bool isImm()
const override {
return Kind == k_Immediate; }
803 bool isMem()
const override {
return false; }
805 bool isUImm6()
const {
812 return (Val >= 0 && Val < 64);
815 template <
int W
idth>
bool isSImm()
const {
return isSImmScaled<Width, 1>(); }
818 return isImmScaled<Bits, Scale>(
true);
821 template <
int Bits,
int Scale,
int Offset = 0,
bool IsRange = false>
823 if (IsRange && isImmRange() &&
824 (getLastImmVal() != getFirstImmVal() +
Offset))
825 return DiagnosticPredicateTy::NoMatch;
827 return isImmScaled<Bits, Scale, IsRange>(
false);
830 template <
int Bits,
int Scale,
bool IsRange = false>
832 if ((!
isImm() && !isImmRange()) || (
isImm() && IsRange) ||
833 (isImmRange() && !IsRange))
834 return DiagnosticPredicateTy::NoMatch;
838 Val = getFirstImmVal();
842 return DiagnosticPredicateTy::NoMatch;
846 int64_t MinVal, MaxVal;
848 int64_t Shift =
Bits - 1;
849 MinVal = (int64_t(1) << Shift) * -Scale;
850 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
853 MaxVal = ((int64_t(1) <<
Bits) - 1) * Scale;
856 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
857 return DiagnosticPredicateTy::Match;
859 return DiagnosticPredicateTy::NearMatch;
864 return DiagnosticPredicateTy::NoMatch;
865 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
867 return DiagnosticPredicateTy::NoMatch;
869 if (Val >= 0 && Val < 32)
870 return DiagnosticPredicateTy::Match;
871 return DiagnosticPredicateTy::NearMatch;
876 return DiagnosticPredicateTy::NoMatch;
877 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
879 return DiagnosticPredicateTy::NoMatch;
881 if (Val >= 0 && Val <= 1)
882 return DiagnosticPredicateTy::Match;
883 return DiagnosticPredicateTy::NearMatch;
886 bool isSymbolicUImm12Offset(
const MCExpr *Expr)
const {
890 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
924 template <
int Scale>
bool isUImm12Offset()
const {
930 return isSymbolicUImm12Offset(getImm());
933 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
936 template <
int N,
int M>
937 bool isImmInRange()
const {
944 return (Val >=
N && Val <= M);
949 template <
typename T>
950 bool isLogicalImm()
const {
967 bool isShiftedImm()
const {
return Kind == k_ShiftedImm; }
969 bool isImmRange()
const {
return Kind == k_ImmRange; }
974 template <
unsigned W
idth>
975 std::optional<std::pair<int64_t, unsigned>> getShiftedVal()
const {
976 if (isShiftedImm() && Width == getShiftedImmShift())
977 if (
auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
978 return std::make_pair(
CE->getValue(), Width);
981 if (
auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
982 int64_t Val =
CE->getValue();
984 return std::make_pair(Val >> Width, Width);
986 return std::make_pair(Val, 0u);
992 bool isAddSubImm()
const {
993 if (!isShiftedImm() && !
isImm())
999 if (isShiftedImm()) {
1000 unsigned Shift = ShiftedImm.ShiftAmount;
1001 Expr = ShiftedImm.Val;
1002 if (Shift != 0 && Shift != 12)
1011 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
1012 DarwinRefKind, Addend)) {
1031 if (
auto ShiftedVal = getShiftedVal<12>())
1032 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1039 bool isAddSubImmNeg()
const {
1040 if (!isShiftedImm() && !
isImm())
1044 if (
auto ShiftedVal = getShiftedVal<12>())
1045 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1055 template <
typename T>
1057 if (!isShiftedImm() && (!
isImm() || !isa<MCConstantExpr>(getImm())))
1058 return DiagnosticPredicateTy::NoMatch;
1060 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>
::value ||
1061 std::is_same<int8_t, T>::value;
1062 if (
auto ShiftedImm = getShiftedVal<8>())
1063 if (!(IsByte && ShiftedImm->second) &&
1064 AArch64_AM::isSVECpyImm<T>(
uint64_t(ShiftedImm->first)
1065 << ShiftedImm->second))
1066 return DiagnosticPredicateTy::Match;
1068 return DiagnosticPredicateTy::NearMatch;
1075 if (!isShiftedImm() && (!
isImm() || !isa<MCConstantExpr>(getImm())))
1076 return DiagnosticPredicateTy::NoMatch;
1078 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>
::value ||
1079 std::is_same<int8_t, T>::value;
1080 if (
auto ShiftedImm = getShiftedVal<8>())
1081 if (!(IsByte && ShiftedImm->second) &&
1082 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1083 << ShiftedImm->second))
1084 return DiagnosticPredicateTy::Match;
1086 return DiagnosticPredicateTy::NearMatch;
1090 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1091 return DiagnosticPredicateTy::Match;
1092 return DiagnosticPredicateTy::NoMatch;
1095 bool isCondCode()
const {
return Kind == k_CondCode; }
1097 bool isSIMDImmType10()
const {
1107 bool isBranchTarget()
const {
1116 assert(
N > 0 &&
"Branch target immediate cannot be 0 bits!");
1117 return (Val >= -((1<<(
N-1)) << 2) && Val <= (((1<<(
N-1))-1) << 2));
1128 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1129 DarwinRefKind, Addend)) {
1138 bool isMovWSymbolG3()
const {
1142 bool isMovWSymbolG2()
const {
1143 return isMovWSymbol(
1150 bool isMovWSymbolG1()
const {
1151 return isMovWSymbol(
1159 bool isMovWSymbolG0()
const {
1160 return isMovWSymbol(
1168 template<
int RegW
idth,
int Shift>
1169 bool isMOVZMovAlias()
const {
1170 if (!
isImm())
return false;
1183 template<
int RegW
idth,
int Shift>
1184 bool isMOVNMovAlias()
const {
1185 if (!
isImm())
return false;
1188 if (!CE)
return false;
1194 bool isFPImm()
const {
1195 return Kind == k_FPImm &&
1199 bool isBarrier()
const {
1200 return Kind == k_Barrier && !getBarriernXSModifier();
1202 bool isBarriernXS()
const {
1203 return Kind == k_Barrier && getBarriernXSModifier();
1205 bool isSysReg()
const {
return Kind == k_SysReg; }
1207 bool isMRSSystemRegister()
const {
1208 if (!isSysReg())
return false;
1210 return SysReg.MRSReg != -1U;
1213 bool isMSRSystemRegister()
const {
1214 if (!isSysReg())
return false;
1215 return SysReg.MSRReg != -1U;
1218 bool isSystemPStateFieldWithImm0_1()
const {
1219 if (!isSysReg())
return false;
1220 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1223 bool isSystemPStateFieldWithImm0_15()
const {
1226 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1229 bool isSVCR()
const {
1232 return SVCR.PStateField != -1U;
1235 bool isReg()
const override {
1236 return Kind == k_Register;
1239 bool isVectorList()
const {
return Kind == k_VectorList; }
1241 bool isScalarReg()
const {
1242 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar;
1245 bool isNeonVectorReg()
const {
1246 return Kind == k_Register &&
Reg.Kind == RegKind::NeonVector;
1249 bool isNeonVectorRegLo()
const {
1250 return Kind == k_Register &&
Reg.Kind == RegKind::NeonVector &&
1251 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1253 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1257 bool isNeonVectorReg0to7()
const {
1258 return Kind == k_Register &&
Reg.Kind == RegKind::NeonVector &&
1259 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1263 bool isMatrix()
const {
return Kind == k_MatrixRegister; }
1264 bool isMatrixTileList()
const {
return Kind == k_MatrixTileList; }
1266 template <
unsigned Class>
bool isSVEPredicateAsCounterReg()
const {
1269 case AArch64::PPRRegClassID:
1270 case AArch64::PPR_3bRegClassID:
1271 case AArch64::PPR_p8to15RegClassID:
1272 case AArch64::PNRRegClassID:
1273 case AArch64::PNR_p8to15RegClassID:
1274 case AArch64::PPRorPNRRegClassID:
1275 RK = RegKind::SVEPredicateAsCounter;
1281 return (Kind == k_Register &&
Reg.Kind == RK) &&
1282 AArch64MCRegisterClasses[
Class].contains(
getReg());
1285 template <
unsigned Class>
bool isSVEVectorReg()
const {
1288 case AArch64::ZPRRegClassID:
1289 case AArch64::ZPR_3bRegClassID:
1290 case AArch64::ZPR_4bRegClassID:
1291 case AArch64::ZPRMul2_LoRegClassID:
1292 case AArch64::ZPRMul2_HiRegClassID:
1293 case AArch64::ZPR_KRegClassID:
1294 RK = RegKind::SVEDataVector;
1296 case AArch64::PPRRegClassID:
1297 case AArch64::PPR_3bRegClassID:
1298 case AArch64::PPR_p8to15RegClassID:
1299 case AArch64::PNRRegClassID:
1300 case AArch64::PNR_p8to15RegClassID:
1301 case AArch64::PPRorPNRRegClassID:
1302 RK = RegKind::SVEPredicateVector;
1308 return (Kind == k_Register &&
Reg.Kind == RK) &&
1309 AArch64MCRegisterClasses[
Class].contains(
getReg());
1312 template <
unsigned Class>
bool isFPRasZPR()
const {
1313 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1314 AArch64MCRegisterClasses[
Class].contains(
getReg());
1317 template <
int ElementW
idth,
unsigned Class>
1319 if (Kind != k_Register ||
Reg.Kind != RegKind::SVEPredicateVector)
1320 return DiagnosticPredicateTy::NoMatch;
1322 if (isSVEVectorReg<Class>() && (
Reg.ElementWidth == ElementWidth))
1323 return DiagnosticPredicateTy::Match;
1325 return DiagnosticPredicateTy::NearMatch;
1328 template <
int ElementW
idth,
unsigned Class>
1330 if (Kind != k_Register || (
Reg.Kind != RegKind::SVEPredicateAsCounter &&
1331 Reg.Kind != RegKind::SVEPredicateVector))
1332 return DiagnosticPredicateTy::NoMatch;
1334 if ((isSVEPredicateAsCounterReg<Class>() ||
1335 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1336 Reg.ElementWidth == ElementWidth)
1337 return DiagnosticPredicateTy::Match;
1339 return DiagnosticPredicateTy::NearMatch;
1342 template <
int ElementW
idth,
unsigned Class>
1344 if (Kind != k_Register ||
Reg.Kind != RegKind::SVEPredicateAsCounter)
1345 return DiagnosticPredicateTy::NoMatch;
1347 if (isSVEPredicateAsCounterReg<Class>() && (
Reg.ElementWidth == ElementWidth))
1348 return DiagnosticPredicateTy::Match;
1350 return DiagnosticPredicateTy::NearMatch;
1353 template <
int ElementW
idth,
unsigned Class>
1355 if (Kind != k_Register ||
Reg.Kind != RegKind::SVEDataVector)
1356 return DiagnosticPredicateTy::NoMatch;
1358 if (isSVEVectorReg<Class>() &&
Reg.ElementWidth == ElementWidth)
1359 return DiagnosticPredicateTy::Match;
1361 return DiagnosticPredicateTy::NearMatch;
1364 template <
int ElementWidth,
unsigned Class,
1366 bool ShiftWidthAlwaysSame>
1368 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1369 if (!VectorMatch.isMatch())
1370 return DiagnosticPredicateTy::NoMatch;
1375 bool MatchShift = getShiftExtendAmount() ==
Log2_32(ShiftWidth / 8);
1378 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1379 return DiagnosticPredicateTy::NoMatch;
1381 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1382 return DiagnosticPredicateTy::Match;
1384 return DiagnosticPredicateTy::NearMatch;
1387 bool isGPR32as64()
const {
1388 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1389 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(
Reg.RegNum);
1392 bool isGPR64as32()
const {
1393 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1394 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(
Reg.RegNum);
1397 bool isGPR64x8()
const {
1398 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1399 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1403 bool isWSeqPair()
const {
1404 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1405 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1409 bool isXSeqPair()
const {
1410 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1411 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1415 bool isSyspXzrPair()
const {
1416 return isGPR64<AArch64::GPR64RegClassID>() &&
Reg.RegNum == AArch64::XZR;
1419 template<
int64_t Angle,
int64_t Remainder>
1421 if (!
isImm())
return DiagnosticPredicateTy::NoMatch;
1424 if (!CE)
return DiagnosticPredicateTy::NoMatch;
1427 if (
Value % Angle == Remainder &&
Value <= 270)
1428 return DiagnosticPredicateTy::Match;
1429 return DiagnosticPredicateTy::NearMatch;
1432 template <
unsigned RegClassID>
bool isGPR64()
const {
1433 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1434 AArch64MCRegisterClasses[RegClassID].contains(
getReg());
1437 template <
unsigned RegClassID,
int ExtW
idth>
1439 if (Kind != k_Register ||
Reg.Kind != RegKind::Scalar)
1440 return DiagnosticPredicateTy::NoMatch;
1442 if (isGPR64<RegClassID>() && getShiftExtendType() ==
AArch64_AM::LSL &&
1443 getShiftExtendAmount() ==
Log2_32(ExtWidth / 8))
1444 return DiagnosticPredicateTy::Match;
1445 return DiagnosticPredicateTy::NearMatch;
1450 template <RegKind VectorKind,
unsigned NumRegs,
bool IsConsecutive = false>
1451 bool isImplicitlyTypedVectorList()
const {
1452 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1453 VectorList.NumElements == 0 &&
1454 VectorList.RegisterKind == VectorKind &&
1455 (!IsConsecutive || (VectorList.Stride == 1));
1458 template <RegKind VectorKind,
unsigned NumRegs,
unsigned NumElements,
1459 unsigned ElementWidth,
unsigned Stride = 1>
1460 bool isTypedVectorList()
const {
1461 if (Kind != k_VectorList)
1463 if (VectorList.Count != NumRegs)
1465 if (VectorList.RegisterKind != VectorKind)
1467 if (VectorList.ElementWidth != ElementWidth)
1469 if (VectorList.Stride != Stride)
1471 return VectorList.NumElements == NumElements;
1474 template <RegKind VectorKind,
unsigned NumRegs,
unsigned NumElements,
1475 unsigned ElementWidth,
unsigned RegClass>
1478 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1480 return DiagnosticPredicateTy::NoMatch;
1481 if (!AArch64MCRegisterClasses[RegClass].
contains(VectorList.RegNum))
1482 return DiagnosticPredicateTy::NearMatch;
1483 return DiagnosticPredicateTy::Match;
1486 template <RegKind VectorKind,
unsigned NumRegs,
unsigned Stride,
1487 unsigned ElementWidth>
1489 bool Res = isTypedVectorList<VectorKind, NumRegs, 0,
1490 ElementWidth, Stride>();
1492 return DiagnosticPredicateTy::NoMatch;
1493 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1494 ((VectorList.RegNum >= AArch64::Z16) &&
1495 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1496 return DiagnosticPredicateTy::Match;
1497 return DiagnosticPredicateTy::NoMatch;
1500 template <
int Min,
int Max>
1502 if (Kind != k_VectorIndex)
1503 return DiagnosticPredicateTy::NoMatch;
1504 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1505 return DiagnosticPredicateTy::Match;
1506 return DiagnosticPredicateTy::NearMatch;
1509 bool isToken()
const override {
return Kind == k_Token; }
1511 bool isTokenEqual(
StringRef Str)
const {
1512 return Kind == k_Token && getToken() == Str;
1514 bool isSysCR()
const {
return Kind == k_SysCR; }
1515 bool isPrefetch()
const {
return Kind == k_Prefetch; }
1516 bool isPSBHint()
const {
return Kind == k_PSBHint; }
1517 bool isPHint()
const {
return Kind == k_PHint; }
1518 bool isBTIHint()
const {
return Kind == k_BTIHint; }
1519 bool isShiftExtend()
const {
return Kind == k_ShiftExtend; }
1520 bool isShifter()
const {
1521 if (!isShiftExtend())
1531 if (Kind != k_FPImm)
1532 return DiagnosticPredicateTy::NoMatch;
1534 if (getFPImmIsExact()) {
1536 auto *
Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1540 APFloat RealVal(APFloat::IEEEdouble());
1542 RealVal.convertFromString(
Desc->Repr, APFloat::rmTowardZero);
1543 if (
errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1546 if (
getFPImm().bitwiseIsEqual(RealVal))
1547 return DiagnosticPredicateTy::Match;
1550 return DiagnosticPredicateTy::NearMatch;
1553 template <
unsigned ImmA,
unsigned ImmB>
1556 if ((Res = isExactFPImm<ImmA>()))
1557 return DiagnosticPredicateTy::Match;
1558 if ((Res = isExactFPImm<ImmB>()))
1559 return DiagnosticPredicateTy::Match;
1563 bool isExtend()
const {
1564 if (!isShiftExtend())
1573 getShiftExtendAmount() <= 4;
1576 bool isExtend64()
const {
1586 bool isExtendLSL64()
const {
1592 getShiftExtendAmount() <= 4;
1595 bool isLSLImm3Shift()
const {
1596 if (!isShiftExtend())
1602 template<
int W
idth>
bool isMemXExtend()
const {
1607 (getShiftExtendAmount() ==
Log2_32(Width / 8) ||
1608 getShiftExtendAmount() == 0);
1611 template<
int W
idth>
bool isMemWExtend()
const {
1616 (getShiftExtendAmount() ==
Log2_32(Width / 8) ||
1617 getShiftExtendAmount() == 0);
1620 template <
unsigned w
idth>
1621 bool isArithmeticShifter()
const {
1631 template <
unsigned w
idth>
1632 bool isLogicalShifter()
const {
1640 getShiftExtendAmount() < width;
1643 bool isMovImm32Shifter()
const {
1651 uint64_t Val = getShiftExtendAmount();
1652 return (Val == 0 || Val == 16);
1655 bool isMovImm64Shifter()
const {
1663 uint64_t Val = getShiftExtendAmount();
1664 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1667 bool isLogicalVecShifter()
const {
1672 unsigned Shift = getShiftExtendAmount();
1674 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1677 bool isLogicalVecHalfWordShifter()
const {
1678 if (!isLogicalVecShifter())
1682 unsigned Shift = getShiftExtendAmount();
1684 (Shift == 0 || Shift == 8);
1687 bool isMoveVecShifter()
const {
1688 if (!isShiftExtend())
1692 unsigned Shift = getShiftExtendAmount();
1694 (Shift == 8 || Shift == 16);
1703 bool isSImm9OffsetFB()
const {
1704 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1707 bool isAdrpLabel()
const {
1714 int64_t Val =
CE->getValue();
1715 int64_t Min = - (4096 * (1LL << (21 - 1)));
1716 int64_t
Max = 4096 * ((1LL << (21 - 1)) - 1);
1717 return (Val % 4096) == 0 && Val >= Min && Val <=
Max;
1723 bool isAdrLabel()
const {
1730 int64_t Val =
CE->getValue();
1731 int64_t Min = - (1LL << (21 - 1));
1732 int64_t
Max = ((1LL << (21 - 1)) - 1);
1733 return Val >= Min && Val <=
Max;
1739 template <MatrixKind Kind,
unsigned EltSize,
unsigned RegClass>
1742 return DiagnosticPredicateTy::NoMatch;
1743 if (getMatrixKind() != Kind ||
1744 !AArch64MCRegisterClasses[RegClass].
contains(getMatrixReg()) ||
1745 EltSize != getMatrixElementWidth())
1746 return DiagnosticPredicateTy::NearMatch;
1747 return DiagnosticPredicateTy::Match;
1750 bool isPAuthPCRelLabel16Operand()
const {
1762 return (Val <= 0) && (Val > -(1 << 18));
1769 else if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1775 void addRegOperands(
MCInst &Inst,
unsigned N)
const {
1776 assert(
N == 1 &&
"Invalid number of operands!");
1780 void addMatrixOperands(
MCInst &Inst,
unsigned N)
const {
1781 assert(
N == 1 &&
"Invalid number of operands!");
1785 void addGPR32as64Operands(
MCInst &Inst,
unsigned N)
const {
1786 assert(
N == 1 &&
"Invalid number of operands!");
1788 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].
contains(
getReg()));
1797 void addGPR64as32Operands(
MCInst &Inst,
unsigned N)
const {
1798 assert(
N == 1 &&
"Invalid number of operands!");
1800 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].
contains(
getReg()));
1809 template <
int W
idth>
1810 void addFPRasZPRRegOperands(
MCInst &Inst,
unsigned N)
const {
1813 case 8:
Base = AArch64::B0;
break;
1814 case 16:
Base = AArch64::H0;
break;
1815 case 32:
Base = AArch64::S0;
break;
1816 case 64:
Base = AArch64::D0;
break;
1817 case 128:
Base = AArch64::Q0;
break;
1824 void addPPRorPNRRegOperands(
MCInst &Inst,
unsigned N)
const {
1825 assert(
N == 1 &&
"Invalid number of operands!");
1828 if (
Reg >= AArch64::PN0 &&
Reg <= AArch64::PN15)
1829 Reg =
Reg - AArch64::PN0 + AArch64::P0;
1833 void addPNRasPPRRegOperands(
MCInst &Inst,
unsigned N)
const {
1834 assert(
N == 1 &&
"Invalid number of operands!");
1839 void addVectorReg64Operands(
MCInst &Inst,
unsigned N)
const {
1840 assert(
N == 1 &&
"Invalid number of operands!");
1842 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].
contains(
getReg()));
1846 void addVectorReg128Operands(
MCInst &Inst,
unsigned N)
const {
1847 assert(
N == 1 &&
"Invalid number of operands!");
1849 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].
contains(
getReg()));
1853 void addVectorRegLoOperands(
MCInst &Inst,
unsigned N)
const {
1854 assert(
N == 1 &&
"Invalid number of operands!");
1858 void addVectorReg0to7Operands(
MCInst &Inst,
unsigned N)
const {
1859 assert(
N == 1 &&
"Invalid number of operands!");
1863 enum VecListIndexType {
1864 VecListIdx_DReg = 0,
1865 VecListIdx_QReg = 1,
1866 VecListIdx_ZReg = 2,
1867 VecListIdx_PReg = 3,
1870 template <VecListIndexType RegTy,
unsigned NumRegs,
1871 bool IsConsecutive =
false>
1872 void addVectorListOperands(
MCInst &Inst,
unsigned N)
const {
1873 assert(
N == 1 &&
"Invalid number of operands!");
1874 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1875 "Expected consecutive registers");
1876 static const unsigned FirstRegs[][5] = {
1878 AArch64::D0, AArch64::D0_D1,
1879 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1881 AArch64::Q0, AArch64::Q0_Q1,
1882 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1884 AArch64::Z0, AArch64::Z0_Z1,
1885 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1887 AArch64::P0, AArch64::P0_P1 }
1890 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1891 " NumRegs must be <= 4 for ZRegs");
1893 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1894 " NumRegs must be <= 2 for PRegs");
1896 unsigned FirstReg = FirstRegs[(
unsigned)RegTy][NumRegs];
1898 FirstRegs[(
unsigned)RegTy][0]));
1901 template <
unsigned NumRegs>
1902 void addStridedVectorListOperands(
MCInst &Inst,
unsigned N)
const {
1903 assert(
N == 1 &&
"Invalid number of operands!");
1904 assert((NumRegs == 2 || NumRegs == 4) &&
" NumRegs must be 2 or 4");
1908 if (getVectorListStart() < AArch64::Z16) {
1909 assert((getVectorListStart() < AArch64::Z8) &&
1910 (getVectorListStart() >= AArch64::Z0) &&
"Invalid Register");
1912 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1914 assert((getVectorListStart() < AArch64::Z24) &&
1915 (getVectorListStart() >= AArch64::Z16) &&
"Invalid Register");
1917 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1921 if (getVectorListStart() < AArch64::Z16) {
1922 assert((getVectorListStart() < AArch64::Z4) &&
1923 (getVectorListStart() >= AArch64::Z0) &&
"Invalid Register");
1925 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1927 assert((getVectorListStart() < AArch64::Z20) &&
1928 (getVectorListStart() >= AArch64::Z16) &&
"Invalid Register");
1930 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1938 void addMatrixTileListOperands(
MCInst &Inst,
unsigned N)
const {
1939 assert(
N == 1 &&
"Invalid number of operands!");
1940 unsigned RegMask = getMatrixTileListRegMask();
1941 assert(RegMask <= 0xFF &&
"Invalid mask!");
1945 void addVectorIndexOperands(
MCInst &Inst,
unsigned N)
const {
1946 assert(
N == 1 &&
"Invalid number of operands!");
1950 template <
unsigned ImmIs0,
unsigned ImmIs1>
1951 void addExactFPImmOperands(
MCInst &Inst,
unsigned N)
const {
1952 assert(
N == 1 &&
"Invalid number of operands!");
1953 assert(
bool(isExactFPImm<ImmIs0, ImmIs1>()) &&
"Invalid operand");
1957 void addImmOperands(
MCInst &Inst,
unsigned N)
const {
1958 assert(
N == 1 &&
"Invalid number of operands!");
1962 addExpr(Inst, getImm());
1965 template <
int Shift>
1966 void addImmWithOptionalShiftOperands(
MCInst &Inst,
unsigned N)
const {
1967 assert(
N == 2 &&
"Invalid number of operands!");
1968 if (
auto ShiftedVal = getShiftedVal<Shift>()) {
1971 }
else if (isShiftedImm()) {
1972 addExpr(Inst, getShiftedImmVal());
1975 addExpr(Inst, getImm());
1980 template <
int Shift>
1981 void addImmNegWithOptionalShiftOperands(
MCInst &Inst,
unsigned N)
const {
1982 assert(
N == 2 &&
"Invalid number of operands!");
1983 if (
auto ShiftedVal = getShiftedVal<Shift>()) {
1990 void addCondCodeOperands(
MCInst &Inst,
unsigned N)
const {
1991 assert(
N == 1 &&
"Invalid number of operands!");
1995 void addAdrpLabelOperands(
MCInst &Inst,
unsigned N)
const {
1996 assert(
N == 1 &&
"Invalid number of operands!");
1999 addExpr(Inst, getImm());
2004 void addAdrLabelOperands(
MCInst &Inst,
unsigned N)
const {
2005 addImmOperands(Inst,
N);
2009 void addUImm12OffsetOperands(
MCInst &Inst,
unsigned N)
const {
2010 assert(
N == 1 &&
"Invalid number of operands!");
2020 void addUImm6Operands(
MCInst &Inst,
unsigned N)
const {
2021 assert(
N == 1 &&
"Invalid number of operands!");
2026 template <
int Scale>
2027 void addImmScaledOperands(
MCInst &Inst,
unsigned N)
const {
2028 assert(
N == 1 &&
"Invalid number of operands!");
2033 template <
int Scale>
2034 void addImmScaledRangeOperands(
MCInst &Inst,
unsigned N)
const {
2035 assert(
N == 1 &&
"Invalid number of operands!");
2039 template <
typename T>
2040 void addLogicalImmOperands(
MCInst &Inst,
unsigned N)
const {
2041 assert(
N == 1 &&
"Invalid number of operands!");
2043 std::make_unsigned_t<T> Val = MCE->
getValue();
2048 template <
typename T>
2049 void addLogicalImmNotOperands(
MCInst &Inst,
unsigned N)
const {
2050 assert(
N == 1 &&
"Invalid number of operands!");
2052 std::make_unsigned_t<T> Val = ~MCE->getValue();
2057 void addSIMDImmType10Operands(
MCInst &Inst,
unsigned N)
const {
2058 assert(
N == 1 &&
"Invalid number of operands!");
2064 void addBranchTarget26Operands(
MCInst &Inst,
unsigned N)
const {
2068 assert(
N == 1 &&
"Invalid number of operands!");
2071 addExpr(Inst, getImm());
2074 assert(MCE &&
"Invalid constant immediate operand!");
2078 void addPAuthPCRelLabel16Operands(
MCInst &Inst,
unsigned N)
const {
2082 assert(
N == 1 &&
"Invalid number of operands!");
2085 addExpr(Inst, getImm());
2091 void addPCRelLabel19Operands(
MCInst &Inst,
unsigned N)
const {
2095 assert(
N == 1 &&
"Invalid number of operands!");
2098 addExpr(Inst, getImm());
2101 assert(MCE &&
"Invalid constant immediate operand!");
2105 void addPCRelLabel9Operands(
MCInst &Inst,
unsigned N)
const {
2109 assert(
N == 1 &&
"Invalid number of operands!");
2112 addExpr(Inst, getImm());
2115 assert(MCE &&
"Invalid constant immediate operand!");
2119 void addBranchTarget14Operands(
MCInst &Inst,
unsigned N)
const {
2123 assert(
N == 1 &&
"Invalid number of operands!");
2126 addExpr(Inst, getImm());
2129 assert(MCE &&
"Invalid constant immediate operand!");
2133 void addFPImmOperands(
MCInst &Inst,
unsigned N)
const {
2134 assert(
N == 1 &&
"Invalid number of operands!");
2139 void addBarrierOperands(
MCInst &Inst,
unsigned N)
const {
2140 assert(
N == 1 &&
"Invalid number of operands!");
2144 void addBarriernXSOperands(
MCInst &Inst,
unsigned N)
const {
2145 assert(
N == 1 &&
"Invalid number of operands!");
2149 void addMRSSystemRegisterOperands(
MCInst &Inst,
unsigned N)
const {
2150 assert(
N == 1 &&
"Invalid number of operands!");
2155 void addMSRSystemRegisterOperands(
MCInst &Inst,
unsigned N)
const {
2156 assert(
N == 1 &&
"Invalid number of operands!");
2161 void addSystemPStateFieldWithImm0_1Operands(
MCInst &Inst,
unsigned N)
const {
2162 assert(
N == 1 &&
"Invalid number of operands!");
2167 void addSVCROperands(
MCInst &Inst,
unsigned N)
const {
2168 assert(
N == 1 &&
"Invalid number of operands!");
2173 void addSystemPStateFieldWithImm0_15Operands(
MCInst &Inst,
unsigned N)
const {
2174 assert(
N == 1 &&
"Invalid number of operands!");
2179 void addSysCROperands(
MCInst &Inst,
unsigned N)
const {
2180 assert(
N == 1 &&
"Invalid number of operands!");
2184 void addPrefetchOperands(
MCInst &Inst,
unsigned N)
const {
2185 assert(
N == 1 &&
"Invalid number of operands!");
2189 void addPSBHintOperands(
MCInst &Inst,
unsigned N)
const {
2190 assert(
N == 1 &&
"Invalid number of operands!");
2194 void addPHintOperands(
MCInst &Inst,
unsigned N)
const {
2195 assert(
N == 1 &&
"Invalid number of operands!");
2199 void addBTIHintOperands(
MCInst &Inst,
unsigned N)
const {
2200 assert(
N == 1 &&
"Invalid number of operands!");
2204 void addShifterOperands(
MCInst &Inst,
unsigned N)
const {
2205 assert(
N == 1 &&
"Invalid number of operands!");
2211 void addLSLImm3ShifterOperands(
MCInst &Inst,
unsigned N)
const {
2212 assert(
N == 1 &&
"Invalid number of operands!");
2213 unsigned Imm = getShiftExtendAmount();
2217 void addSyspXzrPairOperand(
MCInst &Inst,
unsigned N)
const {
2218 assert(
N == 1 &&
"Invalid number of operands!");
2226 if (
Reg != AArch64::XZR)
2232 void addExtendOperands(
MCInst &Inst,
unsigned N)
const {
2233 assert(
N == 1 &&
"Invalid number of operands!");
2240 void addExtend64Operands(
MCInst &Inst,
unsigned N)
const {
2241 assert(
N == 1 &&
"Invalid number of operands!");
2248 void addMemExtendOperands(
MCInst &Inst,
unsigned N)
const {
2249 assert(
N == 2 &&
"Invalid number of operands!");
2260 void addMemExtend8Operands(
MCInst &Inst,
unsigned N)
const {
2261 assert(
N == 2 &&
"Invalid number of operands!");
2269 void addMOVZMovAliasOperands(
MCInst &Inst,
unsigned N)
const {
2270 assert(
N == 1 &&
"Invalid number of operands!");
2277 addExpr(Inst, getImm());
2282 void addMOVNMovAliasOperands(
MCInst &Inst,
unsigned N)
const {
2283 assert(
N == 1 &&
"Invalid number of operands!");
2290 void addComplexRotationEvenOperands(
MCInst &Inst,
unsigned N)
const {
2291 assert(
N == 1 &&
"Invalid number of operands!");
2296 void addComplexRotationOddOperands(
MCInst &Inst,
unsigned N)
const {
2297 assert(
N == 1 &&
"Invalid number of operands!");
2304 static std::unique_ptr<AArch64Operand>
2306 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2307 Op->Tok.Data = Str.data();
2308 Op->Tok.Length = Str.size();
2309 Op->Tok.IsSuffix = IsSuffix;
2315 static std::unique_ptr<AArch64Operand>
2317 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2319 unsigned ShiftAmount = 0,
2320 unsigned HasExplicitAmount =
false) {
2321 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2322 Op->Reg.RegNum = RegNum;
2324 Op->Reg.ElementWidth = 0;
2325 Op->Reg.EqualityTy = EqTy;
2326 Op->Reg.ShiftExtend.Type = ExtTy;
2327 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2328 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2334 static std::unique_ptr<AArch64Operand>
2335 CreateVectorReg(
unsigned RegNum, RegKind Kind,
unsigned ElementWidth,
2338 unsigned ShiftAmount = 0,
2339 unsigned HasExplicitAmount =
false) {
2340 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2341 Kind == RegKind::SVEPredicateVector ||
2342 Kind == RegKind::SVEPredicateAsCounter) &&
2343 "Invalid vector kind");
2344 auto Op = CreateReg(RegNum, Kind, S,
E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2346 Op->Reg.ElementWidth = ElementWidth;
2350 static std::unique_ptr<AArch64Operand>
2351 CreateVectorList(
unsigned RegNum,
unsigned Count,
unsigned Stride,
2352 unsigned NumElements,
unsigned ElementWidth,
2354 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2355 Op->VectorList.RegNum = RegNum;
2356 Op->VectorList.Count = Count;
2357 Op->VectorList.Stride = Stride;
2358 Op->VectorList.NumElements = NumElements;
2359 Op->VectorList.ElementWidth = ElementWidth;
2360 Op->VectorList.RegisterKind = RegisterKind;
2366 static std::unique_ptr<AArch64Operand>
2368 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2369 Op->VectorIndex.Val =
Idx;
2375 static std::unique_ptr<AArch64Operand>
2377 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2378 Op->MatrixTileList.RegMask = RegMask;
2385 const unsigned ElementWidth) {
2386 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2388 {{0, AArch64::ZAB0},
2389 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2390 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2391 {{8, AArch64::ZAB0},
2392 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2393 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2394 {{16, AArch64::ZAH0},
2395 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2396 {{16, AArch64::ZAH1},
2397 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2398 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2399 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2400 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2401 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2404 if (ElementWidth == 64)
2407 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth,
Reg)];
2408 assert(!Regs.empty() &&
"Invalid tile or element width!");
2409 for (
auto OutReg : Regs)
2414 static std::unique_ptr<AArch64Operand> CreateImm(
const MCExpr *Val,
SMLoc S,
2416 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2423 static std::unique_ptr<AArch64Operand> CreateShiftedImm(
const MCExpr *Val,
2424 unsigned ShiftAmount,
2427 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2428 Op->ShiftedImm .Val = Val;
2429 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2435 static std::unique_ptr<AArch64Operand> CreateImmRange(
unsigned First,
2439 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2441 Op->ImmRange.Last =
Last;
2446 static std::unique_ptr<AArch64Operand>
2448 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2449 Op->CondCode.Code =
Code;
2455 static std::unique_ptr<AArch64Operand>
2457 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2459 Op->FPImm.IsExact = IsExact;
2465 static std::unique_ptr<AArch64Operand> CreateBarrier(
unsigned Val,
2469 bool HasnXSModifier) {
2470 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2471 Op->Barrier.Val = Val;
2472 Op->Barrier.Data = Str.data();
2473 Op->Barrier.Length = Str.size();
2474 Op->Barrier.HasnXSModifier = HasnXSModifier;
2480 static std::unique_ptr<AArch64Operand> CreateSysReg(
StringRef Str,
SMLoc S,
2485 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2486 Op->SysReg.Data = Str.data();
2487 Op->SysReg.Length = Str.size();
2488 Op->SysReg.MRSReg = MRSReg;
2489 Op->SysReg.MSRReg = MSRReg;
2490 Op->SysReg.PStateField = PStateField;
2496 static std::unique_ptr<AArch64Operand>
2498 auto Op = std::make_unique<AArch64Operand>(k_PHint, Ctx);
2499 Op->PHint.Val = Val;
2500 Op->PHint.Data = Str.data();
2501 Op->PHint.Length = Str.size();
2507 static std::unique_ptr<AArch64Operand> CreateSysCR(
unsigned Val,
SMLoc S,
2509 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2510 Op->SysCRImm.Val = Val;
2516 static std::unique_ptr<AArch64Operand> CreatePrefetch(
unsigned Val,
2520 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2521 Op->Prefetch.Val = Val;
2522 Op->Barrier.Data = Str.data();
2523 Op->Barrier.Length = Str.size();
2529 static std::unique_ptr<AArch64Operand> CreatePSBHint(
unsigned Val,
2533 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2534 Op->PSBHint.Val = Val;
2535 Op->PSBHint.Data = Str.data();
2536 Op->PSBHint.Length = Str.size();
2542 static std::unique_ptr<AArch64Operand> CreateBTIHint(
unsigned Val,
2546 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2547 Op->BTIHint.Val = Val | 32;
2548 Op->BTIHint.Data = Str.data();
2549 Op->BTIHint.Length = Str.size();
2555 static std::unique_ptr<AArch64Operand>
2556 CreateMatrixRegister(
unsigned RegNum,
unsigned ElementWidth, MatrixKind Kind,
2558 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2559 Op->MatrixReg.RegNum = RegNum;
2560 Op->MatrixReg.ElementWidth = ElementWidth;
2561 Op->MatrixReg.Kind =
Kind;
2567 static std::unique_ptr<AArch64Operand>
2569 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2570 Op->SVCR.PStateField = PStateField;
2571 Op->SVCR.Data = Str.data();
2572 Op->SVCR.Length = Str.size();
2578 static std::unique_ptr<AArch64Operand>
2581 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2582 Op->ShiftExtend.Type = ShOp;
2583 Op->ShiftExtend.Amount = Val;
2584 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2596 OS <<
"<fpimm " <<
getFPImm().bitcastToAPInt().getZExtValue();
2597 if (!getFPImmIsExact())
2604 OS <<
"<barrier " <<
Name <<
">";
2606 OS <<
"<barrier invalid #" << getBarrier() <<
">";
2612 case k_ShiftedImm: {
2613 unsigned Shift = getShiftedImmShift();
2614 OS <<
"<shiftedimm ";
2615 OS << *getShiftedImmVal();
2621 OS << getFirstImmVal();
2622 OS <<
":" << getLastImmVal() <<
">";
2628 case k_VectorList: {
2629 OS <<
"<vectorlist ";
2630 unsigned Reg = getVectorListStart();
2631 for (
unsigned i = 0, e = getVectorListCount(); i !=
e; ++i)
2632 OS <<
Reg + i * getVectorListStride() <<
" ";
2637 OS <<
"<vectorindex " << getVectorIndex() <<
">";
2640 OS <<
"<sysreg: " << getSysReg() <<
'>';
2643 OS <<
"'" << getToken() <<
"'";
2646 OS <<
"c" << getSysCR();
2651 OS <<
"<prfop " <<
Name <<
">";
2653 OS <<
"<prfop invalid #" << getPrefetch() <<
">";
2657 OS << getPSBHintName();
2660 OS << getPHintName();
2663 OS << getBTIHintName();
2665 case k_MatrixRegister:
2666 OS <<
"<matrix " << getMatrixReg() <<
">";
2668 case k_MatrixTileList: {
2669 OS <<
"<matrixlist ";
2670 unsigned RegMask = getMatrixTileListRegMask();
2671 unsigned MaxBits = 8;
2672 for (
unsigned I = MaxBits;
I > 0; --
I)
2673 OS << ((RegMask & (1 << (
I - 1))) >> (
I - 1));
2682 OS <<
"<register " <<
getReg() <<
">";
2683 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2688 << getShiftExtendAmount();
2689 if (!hasShiftExtendAmount())
2705 .
Case(
"v0", AArch64::Q0)
2706 .
Case(
"v1", AArch64::Q1)
2707 .
Case(
"v2", AArch64::Q2)
2708 .
Case(
"v3", AArch64::Q3)
2709 .
Case(
"v4", AArch64::Q4)
2710 .
Case(
"v5", AArch64::Q5)
2711 .
Case(
"v6", AArch64::Q6)
2712 .
Case(
"v7", AArch64::Q7)
2713 .
Case(
"v8", AArch64::Q8)
2714 .
Case(
"v9", AArch64::Q9)
2715 .
Case(
"v10", AArch64::Q10)
2716 .
Case(
"v11", AArch64::Q11)
2717 .
Case(
"v12", AArch64::Q12)
2718 .
Case(
"v13", AArch64::Q13)
2719 .
Case(
"v14", AArch64::Q14)
2720 .
Case(
"v15", AArch64::Q15)
2721 .
Case(
"v16", AArch64::Q16)
2722 .
Case(
"v17", AArch64::Q17)
2723 .
Case(
"v18", AArch64::Q18)
2724 .
Case(
"v19", AArch64::Q19)
2725 .
Case(
"v20", AArch64::Q20)
2726 .
Case(
"v21", AArch64::Q21)
2727 .
Case(
"v22", AArch64::Q22)
2728 .
Case(
"v23", AArch64::Q23)
2729 .
Case(
"v24", AArch64::Q24)
2730 .
Case(
"v25", AArch64::Q25)
2731 .
Case(
"v26", AArch64::Q26)
2732 .
Case(
"v27", AArch64::Q27)
2733 .
Case(
"v28", AArch64::Q28)
2734 .
Case(
"v29", AArch64::Q29)
2735 .
Case(
"v30", AArch64::Q30)
2736 .
Case(
"v31", AArch64::Q31)
2745 RegKind VectorKind) {
2746 std::pair<int, int> Res = {-1, -1};
2748 switch (VectorKind) {
2749 case RegKind::NeonVector:
2752 .Case(
".1d", {1, 64})
2753 .Case(
".1q", {1, 128})
2755 .Case(
".2h", {2, 16})
2756 .Case(
".2b", {2, 8})
2757 .Case(
".2s", {2, 32})
2758 .Case(
".2d", {2, 64})
2761 .Case(
".4b", {4, 8})
2762 .Case(
".4h", {4, 16})
2763 .Case(
".4s", {4, 32})
2764 .Case(
".8b", {8, 8})
2765 .Case(
".8h", {8, 16})
2766 .Case(
".16b", {16, 8})
2771 .Case(
".h", {0, 16})
2772 .Case(
".s", {0, 32})
2773 .Case(
".d", {0, 64})
2776 case RegKind::SVEPredicateAsCounter:
2777 case RegKind::SVEPredicateVector:
2778 case RegKind::SVEDataVector:
2779 case RegKind::Matrix:
2783 .Case(
".h", {0, 16})
2784 .Case(
".s", {0, 32})
2785 .Case(
".d", {0, 64})
2786 .Case(
".q", {0, 128})
2793 if (Res == std::make_pair(-1, -1))
2794 return std::nullopt;
2796 return std::optional<std::pair<int, int>>(Res);
2805 .
Case(
"z0", AArch64::Z0)
2806 .
Case(
"z1", AArch64::Z1)
2807 .
Case(
"z2", AArch64::Z2)
2808 .
Case(
"z3", AArch64::Z3)
2809 .
Case(
"z4", AArch64::Z4)
2810 .
Case(
"z5", AArch64::Z5)
2811 .
Case(
"z6", AArch64::Z6)
2812 .
Case(
"z7", AArch64::Z7)
2813 .
Case(
"z8", AArch64::Z8)
2814 .
Case(
"z9", AArch64::Z9)
2815 .
Case(
"z10", AArch64::Z10)
2816 .
Case(
"z11", AArch64::Z11)
2817 .
Case(
"z12", AArch64::Z12)
2818 .
Case(
"z13", AArch64::Z13)
2819 .
Case(
"z14", AArch64::Z14)
2820 .
Case(
"z15", AArch64::Z15)
2821 .
Case(
"z16", AArch64::Z16)
2822 .
Case(
"z17", AArch64::Z17)
2823 .
Case(
"z18", AArch64::Z18)
2824 .
Case(
"z19", AArch64::Z19)
2825 .
Case(
"z20", AArch64::Z20)
2826 .
Case(
"z21", AArch64::Z21)
2827 .
Case(
"z22", AArch64::Z22)
2828 .
Case(
"z23", AArch64::Z23)
2829 .
Case(
"z24", AArch64::Z24)
2830 .
Case(
"z25", AArch64::Z25)
2831 .
Case(
"z26", AArch64::Z26)
2832 .
Case(
"z27", AArch64::Z27)
2833 .
Case(
"z28", AArch64::Z28)
2834 .
Case(
"z29", AArch64::Z29)
2835 .
Case(
"z30", AArch64::Z30)
2836 .
Case(
"z31", AArch64::Z31)
2842 .
Case(
"p0", AArch64::P0)
2843 .
Case(
"p1", AArch64::P1)
2844 .
Case(
"p2", AArch64::P2)
2845 .
Case(
"p3", AArch64::P3)
2846 .
Case(
"p4", AArch64::P4)
2847 .
Case(
"p5", AArch64::P5)
2848 .
Case(
"p6", AArch64::P6)
2849 .
Case(
"p7", AArch64::P7)
2850 .
Case(
"p8", AArch64::P8)
2851 .
Case(
"p9", AArch64::P9)
2852 .
Case(
"p10", AArch64::P10)
2853 .
Case(
"p11", AArch64::P11)
2854 .
Case(
"p12", AArch64::P12)
2855 .
Case(
"p13", AArch64::P13)
2856 .
Case(
"p14", AArch64::P14)
2857 .
Case(
"p15", AArch64::P15)
2863 .
Case(
"pn0", AArch64::PN0)
2864 .
Case(
"pn1", AArch64::PN1)
2865 .
Case(
"pn2", AArch64::PN2)
2866 .
Case(
"pn3", AArch64::PN3)
2867 .
Case(
"pn4", AArch64::PN4)
2868 .
Case(
"pn5", AArch64::PN5)
2869 .
Case(
"pn6", AArch64::PN6)
2870 .
Case(
"pn7", AArch64::PN7)
2871 .
Case(
"pn8", AArch64::PN8)
2872 .
Case(
"pn9", AArch64::PN9)
2873 .
Case(
"pn10", AArch64::PN10)
2874 .
Case(
"pn11", AArch64::PN11)
2875 .
Case(
"pn12", AArch64::PN12)
2876 .
Case(
"pn13", AArch64::PN13)
2877 .
Case(
"pn14", AArch64::PN14)
2878 .
Case(
"pn15", AArch64::PN15)
2884 .
Case(
"za0.d", AArch64::ZAD0)
2885 .
Case(
"za1.d", AArch64::ZAD1)
2886 .
Case(
"za2.d", AArch64::ZAD2)
2887 .
Case(
"za3.d", AArch64::ZAD3)
2888 .
Case(
"za4.d", AArch64::ZAD4)
2889 .
Case(
"za5.d", AArch64::ZAD5)
2890 .
Case(
"za6.d", AArch64::ZAD6)
2891 .
Case(
"za7.d", AArch64::ZAD7)
2892 .
Case(
"za0.s", AArch64::ZAS0)
2893 .
Case(
"za1.s", AArch64::ZAS1)
2894 .
Case(
"za2.s", AArch64::ZAS2)
2895 .
Case(
"za3.s", AArch64::ZAS3)
2896 .
Case(
"za0.h", AArch64::ZAH0)
2897 .
Case(
"za1.h", AArch64::ZAH1)
2898 .
Case(
"za0.b", AArch64::ZAB0)
2904 .
Case(
"za", AArch64::ZA)
2905 .
Case(
"za0.q", AArch64::ZAQ0)
2906 .
Case(
"za1.q", AArch64::ZAQ1)
2907 .
Case(
"za2.q", AArch64::ZAQ2)
2908 .
Case(
"za3.q", AArch64::ZAQ3)
2909 .
Case(
"za4.q", AArch64::ZAQ4)
2910 .
Case(
"za5.q", AArch64::ZAQ5)
2911 .
Case(
"za6.q", AArch64::ZAQ6)
2912 .
Case(
"za7.q", AArch64::ZAQ7)
2913 .
Case(
"za8.q", AArch64::ZAQ8)
2914 .
Case(
"za9.q", AArch64::ZAQ9)
2915 .
Case(
"za10.q", AArch64::ZAQ10)
2916 .
Case(
"za11.q", AArch64::ZAQ11)
2917 .
Case(
"za12.q", AArch64::ZAQ12)
2918 .
Case(
"za13.q", AArch64::ZAQ13)
2919 .
Case(
"za14.q", AArch64::ZAQ14)
2920 .
Case(
"za15.q", AArch64::ZAQ15)
2921 .
Case(
"za0.d", AArch64::ZAD0)
2922 .
Case(
"za1.d", AArch64::ZAD1)
2923 .
Case(
"za2.d", AArch64::ZAD2)
2924 .
Case(
"za3.d", AArch64::ZAD3)
2925 .
Case(
"za4.d", AArch64::ZAD4)
2926 .
Case(
"za5.d", AArch64::ZAD5)
2927 .
Case(
"za6.d", AArch64::ZAD6)
2928 .
Case(
"za7.d", AArch64::ZAD7)
2929 .
Case(
"za0.s", AArch64::ZAS0)
2930 .
Case(
"za1.s", AArch64::ZAS1)
2931 .
Case(
"za2.s", AArch64::ZAS2)
2932 .
Case(
"za3.s", AArch64::ZAS3)
2933 .
Case(
"za0.h", AArch64::ZAH0)
2934 .
Case(
"za1.h", AArch64::ZAH1)
2935 .
Case(
"za0.b", AArch64::ZAB0)
2936 .
Case(
"za0h.q", AArch64::ZAQ0)
2937 .
Case(
"za1h.q", AArch64::ZAQ1)
2938 .
Case(
"za2h.q", AArch64::ZAQ2)
2939 .
Case(
"za3h.q", AArch64::ZAQ3)
2940 .
Case(
"za4h.q", AArch64::ZAQ4)
2941 .
Case(
"za5h.q", AArch64::ZAQ5)
2942 .
Case(
"za6h.q", AArch64::ZAQ6)
2943 .
Case(
"za7h.q", AArch64::ZAQ7)
2944 .
Case(
"za8h.q", AArch64::ZAQ8)
2945 .
Case(
"za9h.q", AArch64::ZAQ9)
2946 .
Case(
"za10h.q", AArch64::ZAQ10)
2947 .
Case(
"za11h.q", AArch64::ZAQ11)
2948 .
Case(
"za12h.q", AArch64::ZAQ12)
2949 .
Case(
"za13h.q", AArch64::ZAQ13)
2950 .
Case(
"za14h.q", AArch64::ZAQ14)
2951 .
Case(
"za15h.q", AArch64::ZAQ15)
2952 .
Case(
"za0h.d", AArch64::ZAD0)
2953 .
Case(
"za1h.d", AArch64::ZAD1)
2954 .
Case(
"za2h.d", AArch64::ZAD2)
2955 .
Case(
"za3h.d", AArch64::ZAD3)
2956 .
Case(
"za4h.d", AArch64::ZAD4)
2957 .
Case(
"za5h.d", AArch64::ZAD5)
2958 .
Case(
"za6h.d", AArch64::ZAD6)
2959 .
Case(
"za7h.d", AArch64::ZAD7)
2960 .
Case(
"za0h.s", AArch64::ZAS0)
2961 .
Case(
"za1h.s", AArch64::ZAS1)
2962 .
Case(
"za2h.s", AArch64::ZAS2)
2963 .
Case(
"za3h.s", AArch64::ZAS3)
2964 .
Case(
"za0h.h", AArch64::ZAH0)
2965 .
Case(
"za1h.h", AArch64::ZAH1)
2966 .
Case(
"za0h.b", AArch64::ZAB0)
2967 .
Case(
"za0v.q", AArch64::ZAQ0)
2968 .
Case(
"za1v.q", AArch64::ZAQ1)
2969 .
Case(
"za2v.q", AArch64::ZAQ2)
2970 .
Case(
"za3v.q", AArch64::ZAQ3)
2971 .
Case(
"za4v.q", AArch64::ZAQ4)
2972 .
Case(
"za5v.q", AArch64::ZAQ5)
2973 .
Case(
"za6v.q", AArch64::ZAQ6)
2974 .
Case(
"za7v.q", AArch64::ZAQ7)
2975 .
Case(
"za8v.q", AArch64::ZAQ8)
2976 .
Case(
"za9v.q", AArch64::ZAQ9)
2977 .
Case(
"za10v.q", AArch64::ZAQ10)
2978 .
Case(
"za11v.q", AArch64::ZAQ11)
2979 .
Case(
"za12v.q", AArch64::ZAQ12)
2980 .
Case(
"za13v.q", AArch64::ZAQ13)
2981 .
Case(
"za14v.q", AArch64::ZAQ14)
2982 .
Case(
"za15v.q", AArch64::ZAQ15)
2983 .
Case(
"za0v.d", AArch64::ZAD0)
2984 .
Case(
"za1v.d", AArch64::ZAD1)
2985 .
Case(
"za2v.d", AArch64::ZAD2)
2986 .
Case(
"za3v.d", AArch64::ZAD3)
2987 .
Case(
"za4v.d", AArch64::ZAD4)
2988 .
Case(
"za5v.d", AArch64::ZAD5)
2989 .
Case(
"za6v.d", AArch64::ZAD6)
2990 .
Case(
"za7v.d", AArch64::ZAD7)
2991 .
Case(
"za0v.s", AArch64::ZAS0)
2992 .
Case(
"za1v.s", AArch64::ZAS1)
2993 .
Case(
"za2v.s", AArch64::ZAS2)
2994 .
Case(
"za3v.s", AArch64::ZAS3)
2995 .
Case(
"za0v.h", AArch64::ZAH0)
2996 .
Case(
"za1v.h", AArch64::ZAH1)
2997 .
Case(
"za0v.b", AArch64::ZAB0)
3003 return !tryParseRegister(
Reg, StartLoc, EndLoc).isSuccess();
3008 StartLoc = getLoc();
3015unsigned AArch64AsmParser::matchRegisterNameAlias(
StringRef Name,
3017 unsigned RegNum = 0;
3019 return Kind == RegKind::SVEDataVector ? RegNum : 0;
3022 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
3025 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
3028 return Kind == RegKind::NeonVector ? RegNum : 0;
3031 return Kind == RegKind::Matrix ? RegNum : 0;
3033 if (
Name.equals_insensitive(
"zt0"))
3034 return Kind == RegKind::LookupTable ?
unsigned(AArch64::ZT0) : 0;
3038 return (Kind == RegKind::Scalar) ? RegNum : 0;
3043 .
Case(
"fp", AArch64::FP)
3044 .
Case(
"lr", AArch64::LR)
3045 .
Case(
"x31", AArch64::XZR)
3046 .
Case(
"w31", AArch64::WZR)
3048 return Kind == RegKind::Scalar ? RegNum : 0;
3054 if (Entry == RegisterReqs.
end())
3058 if (Kind ==
Entry->getValue().first)
3059 RegNum =
Entry->getValue().second;
3064unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3066 case RegKind::Scalar:
3067 case RegKind::NeonVector:
3068 case RegKind::SVEDataVector:
3070 case RegKind::Matrix:
3071 case RegKind::SVEPredicateVector:
3072 case RegKind::SVEPredicateAsCounter:
3074 case RegKind::LookupTable:
3089 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3103 return Error(S,
"Expected cN operand where 0 <= N <= 15");
3106 if (Tok[0] !=
'c' && Tok[0] !=
'C')
3107 return Error(S,
"Expected cN operand where 0 <= N <= 15");
3111 if (BadNum || CRNum > 15)
3112 return Error(S,
"Expected cN operand where 0 <= N <= 15");
3116 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3125 unsigned MaxVal = 63;
3131 if (getParser().parseExpression(ImmVal))
3136 return TokError(
"immediate value expected for prefetch operand");
3139 return TokError(
"prefetch operand out of range, [0," + utostr(MaxVal) +
3142 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->
getValue());
3143 Operands.push_back(AArch64Operand::CreatePrefetch(
3144 prfop, RPRFM ? RPRFM->Name :
"", S, getContext()));
3149 return TokError(
"prefetch hint expected");
3151 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.
getString());
3153 return TokError(
"prefetch hint expected");
3155 Operands.push_back(AArch64Operand::CreatePrefetch(
3156 RPRFM->Encoding, Tok.
getString(), S, getContext()));
3162template <
bool IsSVEPrefetch>
3168 if (IsSVEPrefetch) {
3169 if (
auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(
N))
3170 return std::optional<unsigned>(Res->Encoding);
3171 }
else if (
auto Res = AArch64PRFM::lookupPRFMByName(
N))
3172 return std::optional<unsigned>(Res->Encoding);
3173 return std::optional<unsigned>();
3176 auto LookupByEncoding = [](
unsigned E) {
3177 if (IsSVEPrefetch) {
3178 if (
auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(
E))
3179 return std::optional<StringRef>(Res->Name);
3180 }
else if (
auto Res = AArch64PRFM::lookupPRFMByEncoding(
E))
3181 return std::optional<StringRef>(Res->Name);
3182 return std::optional<StringRef>();
3184 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3191 if (getParser().parseExpression(ImmVal))
3196 return TokError(
"immediate value expected for prefetch operand");
3199 return TokError(
"prefetch operand out of range, [0," + utostr(MaxVal) +
3202 auto PRFM = LookupByEncoding(MCE->
getValue());
3203 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(
""),
3209 return TokError(
"prefetch hint expected");
3211 auto PRFM = LookupByName(Tok.
getString());
3213 return TokError(
"prefetch hint expected");
3215 Operands.push_back(AArch64Operand::CreatePrefetch(
3216 *PRFM, Tok.
getString(), S, getContext()));
3226 return TokError(
"invalid operand for instruction");
3228 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.
getString());
3230 return TokError(
"invalid operand for instruction");
3232 Operands.push_back(AArch64Operand::CreatePSBHint(
3233 PSB->Encoding, Tok.
getString(), S, getContext()));
3239 SMLoc StartLoc = getLoc();
3245 auto RegTok = getTok();
3246 if (!tryParseScalarRegister(RegNum).isSuccess())
3249 if (RegNum != AArch64::XZR) {
3250 getLexer().UnLex(RegTok);
3257 if (!tryParseScalarRegister(RegNum).isSuccess())
3258 return TokError(
"expected register operand");
3260 if (RegNum != AArch64::XZR)
3261 return TokError(
"xzr must be followed by xzr");
3265 Operands.push_back(AArch64Operand::CreateReg(
3266 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3276 return TokError(
"invalid operand for instruction");
3278 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.
getString());
3280 return TokError(
"invalid operand for instruction");
3282 Operands.push_back(AArch64Operand::CreateBTIHint(
3283 BTI->Encoding, Tok.
getString(), S, getContext()));
3292 const MCExpr *Expr =
nullptr;
3298 if (parseSymbolicImmVal(Expr))
3304 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3314 return Error(S,
"gotpage label reference not allowed an addend");
3326 return Error(S,
"page or gotpage label reference expected");
3334 Operands.push_back(AArch64Operand::CreateImm(Expr, S,
E, getContext()));
3343 const MCExpr *Expr =
nullptr;
3352 if (parseSymbolicImmVal(Expr))
3358 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3371 return Error(S,
"unexpected adr label");
3376 Operands.push_back(AArch64Operand::CreateImm(Expr, S,
E, getContext()));
3381template <
bool AddFPZeroAsLiteral>
3394 return TokError(
"invalid floating point immediate");
3399 if (Tok.
getIntVal() > 255 || isNegative)
3400 return TokError(
"encoded floating point value out of range");
3404 AArch64Operand::CreateFPImm(
F,
true, S, getContext()));
3407 APFloat RealVal(APFloat::IEEEdouble());
3409 RealVal.convertFromString(Tok.
getString(), APFloat::rmTowardZero);
3411 return TokError(
"invalid floating point representation");
3414 RealVal.changeSign();
3416 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3417 Operands.push_back(AArch64Operand::CreateToken(
"#0", S, getContext()));
3418 Operands.push_back(AArch64Operand::CreateToken(
".0", S, getContext()));
3420 Operands.push_back(AArch64Operand::CreateFPImm(
3421 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3446 if (parseSymbolicImmVal(Imm))
3450 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3457 if (!parseOptionalVGOperand(
Operands, VecGroup)) {
3459 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3461 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3467 !getTok().getIdentifier().equals_insensitive(
"lsl"))
3468 return Error(getLoc(),
"only 'lsl #+N' valid after immediate");
3476 return Error(getLoc(),
"only 'lsl #+N' valid after immediate");
3478 int64_t ShiftAmount = getTok().getIntVal();
3480 if (ShiftAmount < 0)
3481 return Error(getLoc(),
"positive shift amount required");
3485 if (ShiftAmount == 0 && Imm !=
nullptr) {
3487 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3491 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3492 getLoc(), getContext()));
3499AArch64AsmParser::parseCondCodeString(
StringRef Cond, std::string &Suggestion) {
3536 Suggestion =
"nfrst";
3543 bool invertCondCode) {
3549 std::string Suggestion;
3552 std::string Msg =
"invalid condition code";
3553 if (!Suggestion.empty())
3554 Msg +=
", did you mean " + Suggestion +
"?";
3555 return TokError(Msg);
3559 if (invertCondCode) {
3561 return TokError(
"condition codes AL and NV are invalid for this instruction");
3566 AArch64Operand::CreateCondCode(
CC, S, getLoc(), getContext()));
3575 return TokError(
"invalid operand for instruction");
3577 unsigned PStateImm = -1;
3578 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.
getString());
3581 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3582 PStateImm = SVCR->Encoding;
3585 AArch64Operand::CreateSVCR(PStateImm, Tok.
getString(), S, getContext()));
3596 if (
Name.equals_insensitive(
"za") ||
Name.starts_with_insensitive(
"za.")) {
3598 unsigned ElementWidth = 0;
3599 auto DotPosition =
Name.find(
'.');
3601 const auto &KindRes =
3605 "Expected the register to be followed by element width suffix");
3606 ElementWidth = KindRes->second;
3608 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3609 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3614 if (parseOperand(
Operands,
false,
false))
3621 unsigned Reg = matchRegisterNameAlias(
Name, RegKind::Matrix);
3625 size_t DotPosition =
Name.find(
'.');
3633 .
Case(
"h", MatrixKind::Row)
3634 .
Case(
"v", MatrixKind::Col)
3641 "Expected the register to be followed by element width suffix");
3642 unsigned ElementWidth = KindRes->second;
3646 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3647 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3652 if (parseOperand(
Operands,
false,
false))
3694 return TokError(
"expected #imm after shift specifier");
3700 AArch64Operand::CreateShiftExtend(ShOp, 0,
false, S,
E, getContext()));
3709 return Error(
E,
"expected integer shift amount");
3712 if (getParser().parseExpression(ImmVal))
3717 return Error(
E,
"expected constant '#imm' after shift specifier");
3720 Operands.push_back(AArch64Operand::CreateShiftExtend(
3721 ShOp, MCE->
getValue(),
true, S,
E, getContext()));
3729 {
"crc", {AArch64::FeatureCRC}},
3730 {
"sm4", {AArch64::FeatureSM4}},
3731 {
"sha3", {AArch64::FeatureSHA3}},
3732 {
"sha2", {AArch64::FeatureSHA2}},
3733 {
"aes", {AArch64::FeatureAES}},
3734 {
"crypto", {AArch64::FeatureCrypto}},
3735 {
"fp", {AArch64::FeatureFPARMv8}},
3736 {
"simd", {AArch64::FeatureNEON}},
3737 {
"ras", {AArch64::FeatureRAS}},
3738 {
"rasv2", {AArch64::FeatureRASv2}},
3739 {
"lse", {AArch64::FeatureLSE}},
3740 {
"predres", {AArch64::FeaturePredRes}},
3741 {
"predres2", {AArch64::FeatureSPECRES2}},
3742 {
"ccdp", {AArch64::FeatureCacheDeepPersist}},
3743 {
"mte", {AArch64::FeatureMTE}},
3744 {
"memtag", {AArch64::FeatureMTE}},
3745 {
"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3746 {
"pan", {AArch64::FeaturePAN}},
3747 {
"pan-rwv", {AArch64::FeaturePAN_RWV}},
3748 {
"ccpp", {AArch64::FeatureCCPP}},
3749 {
"rcpc", {AArch64::FeatureRCPC}},
3750 {
"rng", {AArch64::FeatureRandGen}},
3751 {
"sve", {AArch64::FeatureSVE}},
3752 {
"sve-b16b16", {AArch64::FeatureSVEB16B16}},
3753 {
"sve2", {AArch64::FeatureSVE2}},
3754 {
"sve-aes", {AArch64::FeatureSVEAES}},
3755 {
"sve2-aes", {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3756 {
"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3757 {
"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3758 {
"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3759 {
"sve2p1", {AArch64::FeatureSVE2p1}},
3760 {
"ls64", {AArch64::FeatureLS64}},
3761 {
"xs", {AArch64::FeatureXS}},
3762 {
"pauth", {AArch64::FeaturePAuth}},
3763 {
"flagm", {AArch64::FeatureFlagM}},
3764 {
"rme", {AArch64::FeatureRME}},
3765 {
"sme", {AArch64::FeatureSME}},
3766 {
"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3767 {
"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3768 {
"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3769 {
"sme2", {AArch64::FeatureSME2}},
3770 {
"sme2p1", {AArch64::FeatureSME2p1}},
3771 {
"sme-b16b16", {AArch64::FeatureSMEB16B16}},
3772 {
"hbc", {AArch64::FeatureHBC}},
3773 {
"mops", {AArch64::FeatureMOPS}},
3774 {
"mec", {AArch64::FeatureMEC}},
3775 {
"the", {AArch64::FeatureTHE}},
3776 {
"d128", {AArch64::FeatureD128}},
3777 {
"lse128", {AArch64::FeatureLSE128}},
3778 {
"ite", {AArch64::FeatureITE}},
3779 {
"cssc", {AArch64::FeatureCSSC}},
3780 {
"rcpc3", {AArch64::FeatureRCPC3}},
3781 {
"gcs", {AArch64::FeatureGCS}},
3782 {
"bf16", {AArch64::FeatureBF16}},
3783 {
"compnum", {AArch64::FeatureComplxNum}},
3784 {
"dotprod", {AArch64::FeatureDotProd}},
3785 {
"f32mm", {AArch64::FeatureMatMulFP32}},
3786 {
"f64mm", {AArch64::FeatureMatMulFP64}},
3787 {
"fp16", {AArch64::FeatureFullFP16}},
3788 {
"fp16fml", {AArch64::FeatureFP16FML}},
3789 {
"i8mm", {AArch64::FeatureMatMulInt8}},
3790 {
"lor", {AArch64::FeatureLOR}},
3791 {
"profile", {AArch64::FeatureSPE}},
3795 {
"rdm", {AArch64::FeatureRDM}},
3796 {
"rdma", {AArch64::FeatureRDM}},
3797 {
"sb", {AArch64::FeatureSB}},
3798 {
"ssbs", {AArch64::FeatureSSBS}},
3799 {
"tme", {AArch64::FeatureTME}},
3800 {
"fp8", {AArch64::FeatureFP8}},
3801 {
"faminmax", {AArch64::FeatureFAMINMAX}},
3802 {
"fp8fma", {AArch64::FeatureFP8FMA}},
3803 {
"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3804 {
"fp8dot2", {AArch64::FeatureFP8DOT2}},
3805 {
"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3806 {
"fp8dot4", {AArch64::FeatureFP8DOT4}},
3807 {
"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3808 {
"lut", {AArch64::FeatureLUT}},
3809 {
"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3810 {
"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3811 {
"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3812 {
"sme-fa64", {AArch64::FeatureSMEFA64}},
3813 {
"cpa", {AArch64::FeatureCPA}},
3814 {
"tlbiw", {AArch64::FeatureTLBIW}},
3815 {
"pops", {AArch64::FeaturePoPS}},
3816 {
"cmpbr", {AArch64::FeatureCMPBR}},
3817 {
"f8f32mm", {AArch64::FeatureF8F32MM}},
3818 {
"f8f16mm", {AArch64::FeatureF8F16MM}},
3819 {
"fprcvt", {AArch64::FeatureFPRCVT}},
3820 {
"lsfe", {AArch64::FeatureLSFE}},
3821 {
"sme2p2", {AArch64::FeatureSME2p2}},
3822 {
"ssve-aes", {AArch64::FeatureSSVE_AES}},
3823 {
"sve2p2", {AArch64::FeatureSVE2p2}},
3824 {
"sve-aes2", {AArch64::FeatureSVEAES2}},
3825 {
"sve-bfscale", {AArch64::FeatureSVEBFSCALE}},
3826 {
"sve-f16f32mm", {AArch64::FeatureSVE_F16F32MM}},
3827 {
"lsui", {AArch64::FeatureLSUI}},
3828 {
"occmo", {AArch64::FeatureOCCMO}},
3829 {
"pcdphint", {AArch64::FeaturePCDPHINT}},
3833 if (FBS[AArch64::HasV8_0aOps])
3835 if (FBS[AArch64::HasV8_1aOps])
3837 else if (FBS[AArch64::HasV8_2aOps])
3839 else if (FBS[AArch64::HasV8_3aOps])
3841 else if (FBS[AArch64::HasV8_4aOps])
3843 else if (FBS[AArch64::HasV8_5aOps])
3845 else if (FBS[AArch64::HasV8_6aOps])
3847 else if (FBS[AArch64::HasV8_7aOps])
3849 else if (FBS[AArch64::HasV8_8aOps])
3851 else if (FBS[AArch64::HasV8_9aOps])
3853 else if (FBS[AArch64::HasV9_0aOps])
3855 else if (FBS[AArch64::HasV9_1aOps])
3857 else if (FBS[AArch64::HasV9_2aOps])
3859 else if (FBS[AArch64::HasV9_3aOps])
3861 else if (FBS[AArch64::HasV9_4aOps])
3863 else if (FBS[AArch64::HasV9_5aOps])
3865 else if (FBS[AArch64::HasV9_6aOps])
3867 else if (FBS[AArch64::HasV8_0rOps])
3876 Str += !ExtMatches.
empty() ? llvm::join(ExtMatches,
", ") :
"(unknown)";
3883 const uint16_t Cm = (Encoding & 0x78) >> 3;
3884 const uint16_t Cn = (Encoding & 0x780) >> 7;
3885 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3890 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3892 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3894 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3897 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3904 if (
Name.contains(
'.'))
3905 return TokError(
"invalid operand");
3908 Operands.push_back(AArch64Operand::CreateToken(
"sys", NameLoc, getContext()));
3914 if (Mnemonic ==
"ic") {
3917 return TokError(
"invalid operand for IC instruction");
3918 else if (!IC->
haveFeatures(getSTI().getFeatureBits())) {
3919 std::string Str(
"IC " + std::string(IC->
Name) +
" requires: ");
3921 return TokError(Str);
3924 }
else if (Mnemonic ==
"dc") {
3927 return TokError(
"invalid operand for DC instruction");
3928 else if (!DC->
haveFeatures(getSTI().getFeatureBits())) {
3929 std::string Str(
"DC " + std::string(DC->
Name) +
" requires: ");
3931 return TokError(Str);
3934 }
else if (Mnemonic ==
"at") {
3937 return TokError(
"invalid operand for AT instruction");
3938 else if (!AT->
haveFeatures(getSTI().getFeatureBits())) {
3939 std::string Str(
"AT " + std::string(AT->
Name) +
" requires: ");
3941 return TokError(Str);
3944 }
else if (Mnemonic ==
"tlbi") {
3947 return TokError(
"invalid operand for TLBI instruction");
3948 else if (!TLBI->
haveFeatures(getSTI().getFeatureBits())) {
3949 std::string Str(
"TLBI " + std::string(TLBI->
Name) +
" requires: ");
3951 return TokError(Str);
3954 }
else if (Mnemonic ==
"cfp" || Mnemonic ==
"dvp" || Mnemonic ==
"cpp" || Mnemonic ==
"cosp") {
3956 if (
Op.lower() !=
"rctx")
3957 return TokError(
"invalid operand for prediction restriction instruction");
3959 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3960 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3961 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3963 if (Mnemonic ==
"cosp" && !hasSpecres2)
3964 return TokError(
"COSP requires: predres2");
3966 return TokError(Mnemonic.
upper() +
"RCTX requires: predres");
3968 uint16_t PRCTX_Op2 = Mnemonic ==
"cfp" ? 0b100
3969 : Mnemonic ==
"dvp" ? 0b101
3970 : Mnemonic ==
"cosp" ? 0b110
3971 : Mnemonic ==
"cpp" ? 0b111
3974 "Invalid mnemonic for prediction restriction instruction");
3975 const auto SYS_3_7_3 = 0b01101110011;
3976 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3978 createSysAlias(Encoding,
Operands, S);
3983 bool ExpectRegister = !
Op.contains_insensitive(
"all");
3984 bool HasRegister =
false;
3989 return TokError(
"expected register operand");
3993 if (ExpectRegister && !HasRegister)
3994 return TokError(
"specified " + Mnemonic +
" op requires a register");
3995 else if (!ExpectRegister && HasRegister)
3996 return TokError(
"specified " + Mnemonic +
" op does not use a register");
4008 if (
Name.contains(
'.'))
4009 return TokError(
"invalid operand");
4013 AArch64Operand::CreateToken(
"sysp", NameLoc, getContext()));
4019 if (Mnemonic ==
"tlbip") {
4020 bool HasnXSQualifier =
Op.ends_with_insensitive(
"nXS");
4021 if (HasnXSQualifier) {
4022 Op =
Op.drop_back(3);
4026 return TokError(
"invalid operand for TLBIP instruction");
4028 TLBIorig->
Name, TLBIorig->
Encoding | (HasnXSQualifier ? (1 << 7) : 0),
4035 std::string(TLBI.
Name) + (HasnXSQualifier ?
"nXS" :
"");
4036 std::string Str(
"TLBIP " +
Name +
" requires: ");
4038 return TokError(Str);
4049 return TokError(
"expected register identifier");
4054 return TokError(
"specified " + Mnemonic +
4055 " op requires a pair of registers");
4068 return TokError(
"'csync' operand expected");
4072 SMLoc ExprLoc = getLoc();
4074 if (getParser().parseExpression(ImmVal))
4078 return Error(ExprLoc,
"immediate value expected for barrier operand");
4080 if (Mnemonic ==
"dsb" &&
Value > 15) {
4087 if (Value < 0 || Value > 15)
4088 return Error(ExprLoc,
"barrier operand out of range");
4089 auto DB = AArch64DB::lookupDBByEncoding(
Value);
4090 Operands.push_back(AArch64Operand::CreateBarrier(
Value, DB ?
DB->Name :
"",
4091 ExprLoc, getContext(),
4097 return TokError(
"invalid operand for instruction");
4100 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4101 auto DB = AArch64DB::lookupDBByName(Operand);
4103 if (Mnemonic ==
"isb" && (!DB ||
DB->Encoding != AArch64DB::sy))
4104 return TokError(
"'sy' or #imm operand expected");
4106 if (Mnemonic ==
"tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4107 return TokError(
"'csync' operand expected");
4109 if (Mnemonic ==
"dsb") {
4114 return TokError(
"invalid barrier option name");
4117 Operands.push_back(AArch64Operand::CreateBarrier(
4118 DB ?
DB->Encoding : TSB->Encoding, Tok.
getString(), getLoc(),
4119 getContext(),
false ));
4129 assert(Mnemonic ==
"dsb" &&
"Instruction does not accept nXS operands");
4130 if (Mnemonic !=
"dsb")
4136 SMLoc ExprLoc = getLoc();
4137 if (getParser().parseExpression(ImmVal))
4141 return Error(ExprLoc,
"immediate value expected for barrier operand");
4146 return Error(ExprLoc,
"barrier operand out of range");
4147 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(
Value);
4148 Operands.push_back(AArch64Operand::CreateBarrier(
DB->Encoding,
DB->Name,
4149 ExprLoc, getContext(),
4155 return TokError(
"invalid operand for instruction");
4158 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4161 return TokError(
"invalid barrier option name");
4164 AArch64Operand::CreateBarrier(
DB->Encoding, Tok.
getString(), getLoc(),
4165 getContext(),
true ));
4177 if (AArch64SVCR::lookupSVCRByName(Tok.
getString()))
4182 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4183 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4184 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4188 unsigned PStateImm = -1;
4189 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.
getString());
4190 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4191 PStateImm = PState15->Encoding;
4193 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.
getString());
4194 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4195 PStateImm = PState1->Encoding;
4199 AArch64Operand::CreateSysReg(Tok.
getString(), getLoc(), MRSReg, MSRReg,
4200 PStateImm, getContext()));
4211 return TokError(
"invalid operand for instruction");
4215 return TokError(
"invalid operand for instruction");
4217 Operands.push_back(AArch64Operand::CreatePHintInst(
4218 PH->Encoding, Tok.
getString(), S, getContext()));
4232 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4240 unsigned ElementWidth = KindRes->second;
4242 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4243 S, getLoc(), getContext()));
4248 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4250 return tryParseVectorIndex(
Operands).isFailure();
4254 SMLoc SIdx = getLoc();
4257 if (getParser().parseExpression(ImmVal))
4261 return TokError(
"immediate value expected for vector index");
4268 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->
getValue(), SIdx,
4281 RegKind MatchKind) {
4290 size_t Start = 0, Next =
Name.find(
'.');
4292 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4298 return TokError(
"invalid vector kind qualifier");
4309ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4312 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(
Operands);
4314 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(
Operands);
4319template <RegKind RK>
4323 const SMLoc S = getLoc();
4326 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4334 unsigned ElementWidth = KindRes->second;
4335 Operands.push_back(AArch64Operand::CreateVectorReg(
4336 RegNum, RK, ElementWidth, S,
4337 getLoc(), getContext()));
4340 if (RK == RegKind::SVEPredicateAsCounter) {
4347 if (parseOperand(
Operands,
false,
false))
4358 return Error(S,
"not expecting size suffix");
4361 Operands.push_back(AArch64Operand::CreateToken(
"/", getLoc(), getContext()));
4366 auto Pred = getTok().getString().lower();
4367 if (RK == RegKind::SVEPredicateAsCounter && Pred !=
"z")
4368 return Error(getLoc(),
"expecting 'z' predication");
4370 if (RK == RegKind::SVEPredicateVector && Pred !=
"z" && Pred !=
"m")
4371 return Error(getLoc(),
"expecting 'm' or 'z' predication");
4374 const char *ZM = Pred ==
"z" ?
"z" :
"m";
4375 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4384 if (!tryParseNeonVectorRegister(
Operands))
4387 if (tryParseZTOperand(
Operands).isSuccess())
4391 if (tryParseGPROperand<false>(
Operands).isSuccess())
4397bool AArch64AsmParser::parseSymbolicImmVal(
const MCExpr *&ImmVal) {
4398 bool HasELFModifier =
false;
4402 HasELFModifier =
true;
4405 return TokError(
"expect relocation specifier in operand after ':'");
4407 std::string LowerCase = getTok().getIdentifier().lower();
4463 return TokError(
"expect relocation specifier in operand after ':'");
4467 if (parseToken(
AsmToken::Colon,
"expect ':' after relocation specifier"))
4471 if (getParser().parseExpression(ImmVal))
4484 auto ParseMatrixTile = [
this](
unsigned &
Reg,
4487 size_t DotPosition =
Name.find(
'.');
4496 const std::optional<std::pair<int, int>> &KindRes =
4500 "Expected the register to be followed by element width suffix");
4501 ElementWidth = KindRes->second;
4508 auto LCurly = getTok();
4513 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4514 0, S, getLoc(), getContext()));
4519 if (getTok().getString().equals_insensitive(
"za")) {
4525 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4526 0xFF, S, getLoc(), getContext()));
4530 SMLoc TileLoc = getLoc();
4532 unsigned FirstReg, ElementWidth;
4533 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4534 if (!ParseRes.isSuccess()) {
4535 getLexer().UnLex(LCurly);
4541 unsigned PrevReg = FirstReg;
4544 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4547 SeenRegs.
insert(FirstReg);
4551 unsigned Reg, NextElementWidth;
4552 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4553 if (!ParseRes.isSuccess())
4557 if (ElementWidth != NextElementWidth)
4558 return Error(TileLoc,
"mismatched register size suffix");
4561 Warning(TileLoc,
"tile list not in ascending order");
4564 Warning(TileLoc,
"duplicate tile in list");
4567 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4576 unsigned RegMask = 0;
4577 for (
auto Reg : DRegs)
4581 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4586template <RegKind VectorKind>
4596 auto RegTok = getTok();
4597 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4598 if (ParseRes.isSuccess()) {
4605 RegTok.getString().equals_insensitive(
"zt0"))
4609 (ParseRes.isNoMatch() && NoMatchIsError &&
4610 !RegTok.getString().starts_with_insensitive(
"za")))
4611 return Error(Loc,
"vector register expected");
4616 int NumRegs = getNumRegsForRegKind(VectorKind);
4618 auto LCurly = getTok();
4623 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4627 if (ParseRes.isNoMatch())
4630 if (!ParseRes.isSuccess())
4633 int64_t PrevReg = FirstReg;
4638 SMLoc Loc = getLoc();
4642 ParseRes = ParseVector(Reg, NextKind, getLoc(),
true);
4643 if (!ParseRes.isSuccess())
4647 if (Kind != NextKind)
4648 return Error(Loc,
"mismatched register size suffix");
4651 (PrevReg <
Reg) ? (Reg - PrevReg) : (
Reg + NumRegs - PrevReg);
4653 if (Space == 0 || Space > 3)
4654 return Error(Loc,
"invalid number of vectors");
4659 bool HasCalculatedStride =
false;
4661 SMLoc Loc = getLoc();
4664 ParseRes = ParseVector(Reg, NextKind, getLoc(),
true);
4665 if (!ParseRes.isSuccess())
4669 if (Kind != NextKind)
4670 return Error(Loc,
"mismatched register size suffix");
4672 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4673 unsigned PrevRegVal =
4674 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4675 if (!HasCalculatedStride) {
4676 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4677 : (RegVal + NumRegs - PrevRegVal);
4678 HasCalculatedStride =
true;
4682 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4683 return Error(Loc,
"registers must have the same sequential stride");
4694 return Error(S,
"invalid number of vectors");
4696 unsigned NumElements = 0;
4697 unsigned ElementWidth = 0;
4698 if (!
Kind.empty()) {
4700 std::tie(NumElements, ElementWidth) = *VK;
4703 Operands.push_back(AArch64Operand::CreateVectorList(
4704 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4705 getLoc(), getContext()));
4712 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(
Operands,
true);
4713 if (!ParseRes.isSuccess())
4716 return tryParseVectorIndex(
Operands).isFailure();
4720 SMLoc StartLoc = getLoc();
4728 Operands.push_back(AArch64Operand::CreateReg(
4729 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4736 return Error(getLoc(),
"index must be absent or #0");
4739 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4740 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4741 return Error(getLoc(),
"index must be absent or #0");
4743 Operands.push_back(AArch64Operand::CreateReg(
4744 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4749 SMLoc StartLoc = getLoc();
4753 unsigned RegNum = matchRegisterNameAlias(
Name, RegKind::LookupTable);
4758 Operands.push_back(AArch64Operand::CreateReg(
4759 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4765 AArch64Operand::CreateToken(
"[", getLoc(), getContext()));
4767 if (getParser().parseExpression(ImmVal))
4771 return TokError(
"immediate value expected for vector index");
4772 Operands.push_back(AArch64Operand::CreateImm(
4774 getLoc(), getContext()));
4776 if (parseOptionalMulOperand(
Operands))
4781 AArch64Operand::CreateToken(
"]", getLoc(), getContext()));
4786template <
bool ParseShiftExtend, RegConstra
intEqualityTy EqTy>
4788 SMLoc StartLoc = getLoc();
4797 Operands.push_back(AArch64Operand::CreateReg(
4798 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4807 Res = tryParseOptionalShiftExtend(ExtOpnd);
4811 auto Ext =
static_cast<AArch64Operand*
>(ExtOpnd.
back().get());
4812 Operands.push_back(AArch64Operand::CreateReg(
4813 RegNum, RegKind::Scalar, StartLoc,
Ext->getEndLoc(), getContext(), EqTy,
4814 Ext->getShiftExtendType(),
Ext->getShiftExtendAmount(),
4815 Ext->hasShiftExtendAmount()));
4829 if (!getTok().getString().equals_insensitive(
"mul") ||
4830 !(NextIsVL || NextIsHash))
4834 AArch64Operand::CreateToken(
"mul", getLoc(), getContext()));
4839 AArch64Operand::CreateToken(
"vl", getLoc(), getContext()));
4851 if (
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4852 Operands.push_back(AArch64Operand::CreateImm(
4859 return Error(getLoc(),
"expected 'vl' or '#<imm>'");
4865 auto Tok = Parser.
getTok();
4870 .
Case(
"vgx2",
"vgx2")
4871 .
Case(
"vgx4",
"vgx4")
4883 auto Tok = getTok();
4893 AArch64Operand::CreateToken(Keyword, Tok.
getLoc(), getContext()));