71 SVEPredicateAsCounter,
77enum class MatrixKind {
Array, Tile, Row, Col };
79enum RegConstraintEqualityTy {
97 case AArch64::MOVPRFX_ZZ:
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
109 "No destructive element size set for movprfx");
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
121 "No destructive element size set for movprfx");
132 PrefixInfo() =
default;
133 bool isActive()
const {
return Active; }
135 unsigned getElementSize()
const {
139 unsigned getDstReg()
const {
return Dst; }
140 unsigned getPgReg()
const {
147 bool Predicated =
false;
148 unsigned ElementSize;
164 std::string &Suggestion);
166 unsigned matchRegisterNameAlias(
StringRef Name, RegKind Kind);
168 bool parseSymbolicImmVal(
const MCExpr *&ImmVal);
174 bool invertCondCode);
175 bool parseImmExpr(int64_t &Out);
177 bool parseRegisterInRange(
unsigned &Out,
unsigned Base,
unsigned First,
183 bool parseDirectiveArch(
SMLoc L);
184 bool parseDirectiveArchExtension(
SMLoc L);
185 bool parseDirectiveCPU(
SMLoc L);
186 bool parseDirectiveInst(
SMLoc L);
188 bool parseDirectiveTLSDescCall(
SMLoc L);
191 bool parseDirectiveLtorg(
SMLoc L);
194 bool parseDirectiveUnreq(
SMLoc L);
195 bool parseDirectiveCFINegateRAState();
196 bool parseDirectiveCFIBKeyFrame();
197 bool parseDirectiveCFIMTETaggedFrame();
199 bool parseDirectiveVariantPCS(
SMLoc L);
201 bool parseDirectiveSEHAllocStack(
SMLoc L);
202 bool parseDirectiveSEHPrologEnd(
SMLoc L);
203 bool parseDirectiveSEHSaveR19R20X(
SMLoc L);
204 bool parseDirectiveSEHSaveFPLR(
SMLoc L);
205 bool parseDirectiveSEHSaveFPLRX(
SMLoc L);
206 bool parseDirectiveSEHSaveReg(
SMLoc L);
207 bool parseDirectiveSEHSaveRegX(
SMLoc L);
208 bool parseDirectiveSEHSaveRegP(
SMLoc L);
209 bool parseDirectiveSEHSaveRegPX(
SMLoc L);
210 bool parseDirectiveSEHSaveLRPair(
SMLoc L);
211 bool parseDirectiveSEHSaveFReg(
SMLoc L);
212 bool parseDirectiveSEHSaveFRegX(
SMLoc L);
213 bool parseDirectiveSEHSaveFRegP(
SMLoc L);
214 bool parseDirectiveSEHSaveFRegPX(
SMLoc L);
215 bool parseDirectiveSEHSetFP(
SMLoc L);
216 bool parseDirectiveSEHAddFP(
SMLoc L);
217 bool parseDirectiveSEHNop(
SMLoc L);
218 bool parseDirectiveSEHSaveNext(
SMLoc L);
219 bool parseDirectiveSEHEpilogStart(
SMLoc L);
220 bool parseDirectiveSEHEpilogEnd(
SMLoc L);
221 bool parseDirectiveSEHTrapFrame(
SMLoc L);
222 bool parseDirectiveSEHMachineFrame(
SMLoc L);
223 bool parseDirectiveSEHContext(
SMLoc L);
224 bool parseDirectiveSEHClearUnwoundToCall(
SMLoc L);
225 bool parseDirectiveSEHPACSignLR(
SMLoc L);
226 bool parseDirectiveSEHSaveAnyReg(
SMLoc L,
bool Paired,
bool Writeback);
228 bool validateInstruction(
MCInst &Inst,
SMLoc &IDLoc,
230 unsigned getNumRegsForRegKind(RegKind K);
234 bool MatchingInlineAsm)
override;
238#define GET_ASSEMBLER_HEADER
239#include "AArch64GenAsmMatcher.inc"
254 template <
bool IsSVEPrefetch = false>
261 template<
bool AddFPZeroAsLiteral>
269 template <
bool ParseShiftExtend,
270 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
273 template <
bool ParseShiftExtend,
bool ParseSuffix>
275 template <RegKind RK>
277 template <RegKind VectorKind>
279 bool ExpectMatch =
false);
287 enum AArch64MatchResultTy {
289#define GET_OPERAND_DIAGNOSTIC_TYPES
290#include "AArch64GenAsmMatcher.inc"
321 SMLoc &EndLoc)
override;
323 SMLoc &EndLoc)
override;
326 unsigned Kind)
override;
328 static bool classifySymbolRef(
const MCExpr *Expr,
360 SMLoc StartLoc, EndLoc;
369 struct ShiftExtendOp {
372 bool HasExplicitAmount;
382 RegConstraintEqualityTy EqualityTy;
398 ShiftExtendOp ShiftExtend;
403 unsigned ElementWidth;
407 struct MatrixTileListOp {
408 unsigned RegMask = 0;
411 struct VectorListOp {
415 unsigned NumElements;
416 unsigned ElementWidth;
417 RegKind RegisterKind;
420 struct VectorIndexOp {
428 struct ShiftedImmOp {
430 unsigned ShiftAmount;
487 unsigned PStateField;
493 struct MatrixRegOp MatrixReg;
494 struct MatrixTileListOp MatrixTileList;
495 struct VectorListOp VectorList;
496 struct VectorIndexOp VectorIndex;
498 struct ShiftedImmOp ShiftedImm;
499 struct ImmRangeOp ImmRange;
501 struct FPImmOp FPImm;
503 struct SysRegOp SysReg;
504 struct SysCRImmOp SysCRImm;
506 struct PSBHintOp PSBHint;
507 struct BTIHintOp BTIHint;
508 struct ShiftExtendOp ShiftExtend;
521 StartLoc =
o.StartLoc;
531 ShiftedImm =
o.ShiftedImm;
534 ImmRange =
o.ImmRange;
548 case k_MatrixRegister:
549 MatrixReg =
o.MatrixReg;
551 case k_MatrixTileList:
552 MatrixTileList =
o.MatrixTileList;
555 VectorList =
o.VectorList;
558 VectorIndex =
o.VectorIndex;
564 SysCRImm =
o.SysCRImm;
576 ShiftExtend =
o.ShiftExtend;
585 SMLoc getStartLoc()
const override {
return StartLoc; }
587 SMLoc getEndLoc()
const override {
return EndLoc; }
590 assert(Kind == k_Token &&
"Invalid access!");
594 bool isTokenSuffix()
const {
595 assert(Kind == k_Token &&
"Invalid access!");
599 const MCExpr *getImm()
const {
600 assert(Kind == k_Immediate &&
"Invalid access!");
604 const MCExpr *getShiftedImmVal()
const {
605 assert(Kind == k_ShiftedImm &&
"Invalid access!");
606 return ShiftedImm.Val;
609 unsigned getShiftedImmShift()
const {
610 assert(Kind == k_ShiftedImm &&
"Invalid access!");
611 return ShiftedImm.ShiftAmount;
614 unsigned getFirstImmVal()
const {
615 assert(Kind == k_ImmRange &&
"Invalid access!");
616 return ImmRange.First;
619 unsigned getLastImmVal()
const {
620 assert(Kind == k_ImmRange &&
"Invalid access!");
621 return ImmRange.Last;
625 assert(Kind == k_CondCode &&
"Invalid access!");
630 assert (Kind == k_FPImm &&
"Invalid access!");
631 return APFloat(APFloat::IEEEdouble(),
APInt(64, FPImm.Val,
true));
634 bool getFPImmIsExact()
const {
635 assert (Kind == k_FPImm &&
"Invalid access!");
636 return FPImm.IsExact;
639 unsigned getBarrier()
const {
640 assert(Kind == k_Barrier &&
"Invalid access!");
645 assert(Kind == k_Barrier &&
"Invalid access!");
649 bool getBarriernXSModifier()
const {
650 assert(Kind == k_Barrier &&
"Invalid access!");
654 unsigned getReg()
const override {
655 assert(Kind == k_Register &&
"Invalid access!");
659 unsigned getMatrixReg()
const {
660 assert(Kind == k_MatrixRegister &&
"Invalid access!");
661 return MatrixReg.RegNum;
664 unsigned getMatrixElementWidth()
const {
665 assert(Kind == k_MatrixRegister &&
"Invalid access!");
666 return MatrixReg.ElementWidth;
669 MatrixKind getMatrixKind()
const {
670 assert(Kind == k_MatrixRegister &&
"Invalid access!");
671 return MatrixReg.Kind;
674 unsigned getMatrixTileListRegMask()
const {
675 assert(isMatrixTileList() &&
"Invalid access!");
676 return MatrixTileList.RegMask;
679 RegConstraintEqualityTy getRegEqualityTy()
const {
680 assert(Kind == k_Register &&
"Invalid access!");
681 return Reg.EqualityTy;
684 unsigned getVectorListStart()
const {
685 assert(Kind == k_VectorList &&
"Invalid access!");
686 return VectorList.RegNum;
689 unsigned getVectorListCount()
const {
690 assert(Kind == k_VectorList &&
"Invalid access!");
691 return VectorList.Count;
694 unsigned getVectorListStride()
const {
695 assert(Kind == k_VectorList &&
"Invalid access!");
696 return VectorList.Stride;
699 int getVectorIndex()
const {
700 assert(Kind == k_VectorIndex &&
"Invalid access!");
701 return VectorIndex.Val;
705 assert(Kind == k_SysReg &&
"Invalid access!");
706 return StringRef(SysReg.Data, SysReg.Length);
709 unsigned getSysCR()
const {
710 assert(Kind == k_SysCR &&
"Invalid access!");
714 unsigned getPrefetch()
const {
715 assert(Kind == k_Prefetch &&
"Invalid access!");
719 unsigned getPSBHint()
const {
720 assert(Kind == k_PSBHint &&
"Invalid access!");
725 assert(Kind == k_PSBHint &&
"Invalid access!");
726 return StringRef(PSBHint.Data, PSBHint.Length);
729 unsigned getBTIHint()
const {
730 assert(Kind == k_BTIHint &&
"Invalid access!");
735 assert(Kind == k_BTIHint &&
"Invalid access!");
736 return StringRef(BTIHint.Data, BTIHint.Length);
740 assert(Kind == k_SVCR &&
"Invalid access!");
741 return StringRef(SVCR.Data, SVCR.Length);
745 assert(Kind == k_Prefetch &&
"Invalid access!");
750 if (Kind == k_ShiftExtend)
751 return ShiftExtend.Type;
752 if (Kind == k_Register)
753 return Reg.ShiftExtend.Type;
757 unsigned getShiftExtendAmount()
const {
758 if (Kind == k_ShiftExtend)
759 return ShiftExtend.Amount;
760 if (Kind == k_Register)
761 return Reg.ShiftExtend.Amount;
765 bool hasShiftExtendAmount()
const {
766 if (Kind == k_ShiftExtend)
767 return ShiftExtend.HasExplicitAmount;
768 if (Kind == k_Register)
769 return Reg.ShiftExtend.HasExplicitAmount;
773 bool isImm()
const override {
return Kind == k_Immediate; }
774 bool isMem()
const override {
return false; }
776 bool isUImm6()
const {
783 return (Val >= 0 && Val < 64);
786 template <
int W
idth>
bool isSImm()
const {
return isSImmScaled<Width, 1>(); }
789 return isImmScaled<Bits, Scale>(
true);
792 template <
int Bits,
int Scale,
int Offset = 0,
bool IsRange = false>
794 if (IsRange && isImmRange() &&
795 (getLastImmVal() != getFirstImmVal() +
Offset))
796 return DiagnosticPredicateTy::NoMatch;
798 return isImmScaled<Bits, Scale, IsRange>(
false);
801 template <
int Bits,
int Scale,
bool IsRange = false>
803 if ((!
isImm() && !isImmRange()) || (
isImm() && IsRange) ||
804 (isImmRange() && !IsRange))
805 return DiagnosticPredicateTy::NoMatch;
809 Val = getFirstImmVal();
813 return DiagnosticPredicateTy::NoMatch;
817 int64_t MinVal, MaxVal;
819 int64_t Shift =
Bits - 1;
820 MinVal = (int64_t(1) << Shift) * -Scale;
821 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
824 MaxVal = ((int64_t(1) <<
Bits) - 1) * Scale;
827 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
828 return DiagnosticPredicateTy::Match;
830 return DiagnosticPredicateTy::NearMatch;
835 return DiagnosticPredicateTy::NoMatch;
836 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
838 return DiagnosticPredicateTy::NoMatch;
840 if (Val >= 0 && Val < 32)
841 return DiagnosticPredicateTy::Match;
842 return DiagnosticPredicateTy::NearMatch;
847 return DiagnosticPredicateTy::NoMatch;
848 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
850 return DiagnosticPredicateTy::NoMatch;
852 if (Val >= 0 && Val <= 1)
853 return DiagnosticPredicateTy::Match;
854 return DiagnosticPredicateTy::NearMatch;
857 bool isSymbolicUImm12Offset(
const MCExpr *Expr)
const {
861 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
893 template <
int Scale>
bool isUImm12Offset()
const {
899 return isSymbolicUImm12Offset(getImm());
902 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
905 template <
int N,
int M>
906 bool isImmInRange()
const {
913 return (Val >=
N && Val <= M);
918 template <
typename T>
919 bool isLogicalImm()
const {
936 bool isShiftedImm()
const {
return Kind == k_ShiftedImm; }
938 bool isImmRange()
const {
return Kind == k_ImmRange; }
943 template <
unsigned W
idth>
944 std::optional<std::pair<int64_t, unsigned>> getShiftedVal()
const {
945 if (isShiftedImm() && Width == getShiftedImmShift())
946 if (
auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
947 return std::make_pair(
CE->getValue(), Width);
950 if (
auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
951 int64_t Val =
CE->getValue();
953 return std::make_pair(Val >> Width, Width);
955 return std::make_pair(Val, 0u);
961 bool isAddSubImm()
const {
962 if (!isShiftedImm() && !
isImm())
968 if (isShiftedImm()) {
969 unsigned Shift = ShiftedImm.ShiftAmount;
970 Expr = ShiftedImm.Val;
971 if (Shift != 0 && Shift != 12)
980 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
981 DarwinRefKind, Addend)) {
998 if (
auto ShiftedVal = getShiftedVal<12>())
999 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1006 bool isAddSubImmNeg()
const {
1007 if (!isShiftedImm() && !
isImm())
1011 if (
auto ShiftedVal = getShiftedVal<12>())
1012 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1022 template <
typename T>
1024 if (!isShiftedImm() && (!
isImm() || !isa<MCConstantExpr>(getImm())))
1025 return DiagnosticPredicateTy::NoMatch;
1027 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>
::value ||
1028 std::is_same<int8_t, T>::value;
1029 if (
auto ShiftedImm = getShiftedVal<8>())
1030 if (!(IsByte && ShiftedImm->second) &&
1031 AArch64_AM::isSVECpyImm<T>(
uint64_t(ShiftedImm->first)
1032 << ShiftedImm->second))
1033 return DiagnosticPredicateTy::Match;
1035 return DiagnosticPredicateTy::NearMatch;
1042 if (!isShiftedImm() && (!
isImm() || !isa<MCConstantExpr>(getImm())))
1043 return DiagnosticPredicateTy::NoMatch;
1045 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>
::value ||
1046 std::is_same<int8_t, T>::value;
1047 if (
auto ShiftedImm = getShiftedVal<8>())
1048 if (!(IsByte && ShiftedImm->second) &&
1049 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1050 << ShiftedImm->second))
1051 return DiagnosticPredicateTy::Match;
1053 return DiagnosticPredicateTy::NearMatch;
1057 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1058 return DiagnosticPredicateTy::Match;
1059 return DiagnosticPredicateTy::NoMatch;
1062 bool isCondCode()
const {
return Kind == k_CondCode; }
1064 bool isSIMDImmType10()
const {
1074 bool isBranchTarget()
const {
1083 assert(
N > 0 &&
"Branch target immediate cannot be 0 bits!");
1084 return (Val >= -((1<<(
N-1)) << 2) && Val <= (((1<<(
N-1))-1) << 2));
1095 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1096 DarwinRefKind, Addend)) {
1105 bool isMovWSymbolG3()
const {
1109 bool isMovWSymbolG2()
const {
1110 return isMovWSymbol(
1117 bool isMovWSymbolG1()
const {
1118 return isMovWSymbol(
1126 bool isMovWSymbolG0()
const {
1127 return isMovWSymbol(
1135 template<
int RegW
idth,
int Shift>
1136 bool isMOVZMovAlias()
const {
1137 if (!
isImm())
return false;
1150 template<
int RegW
idth,
int Shift>
1151 bool isMOVNMovAlias()
const {
1152 if (!
isImm())
return false;
1155 if (!CE)
return false;
1161 bool isFPImm()
const {
1162 return Kind == k_FPImm &&
1166 bool isBarrier()
const {
1167 return Kind == k_Barrier && !getBarriernXSModifier();
1169 bool isBarriernXS()
const {
1170 return Kind == k_Barrier && getBarriernXSModifier();
1172 bool isSysReg()
const {
return Kind == k_SysReg; }
1174 bool isMRSSystemRegister()
const {
1175 if (!isSysReg())
return false;
1177 return SysReg.MRSReg != -1U;
1180 bool isMSRSystemRegister()
const {
1181 if (!isSysReg())
return false;
1182 return SysReg.MSRReg != -1U;
1185 bool isSystemPStateFieldWithImm0_1()
const {
1186 if (!isSysReg())
return false;
1187 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1190 bool isSystemPStateFieldWithImm0_15()
const {
1193 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1196 bool isSVCR()
const {
1199 return SVCR.PStateField != -1U;
1202 bool isReg()
const override {
1203 return Kind == k_Register;
1206 bool isVectorList()
const {
return Kind == k_VectorList; }
1208 bool isScalarReg()
const {
1209 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar;
1212 bool isNeonVectorReg()
const {
1213 return Kind == k_Register &&
Reg.Kind == RegKind::NeonVector;
1216 bool isNeonVectorRegLo()
const {
1217 return Kind == k_Register &&
Reg.Kind == RegKind::NeonVector &&
1218 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1220 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1224 bool isMatrix()
const {
return Kind == k_MatrixRegister; }
1225 bool isMatrixTileList()
const {
return Kind == k_MatrixTileList; }
1227 template <
unsigned Class>
bool isSVEPredicateAsCounterReg()
const {
1230 case AArch64::PPRRegClassID:
1231 case AArch64::PPR_3bRegClassID:
1232 case AArch64::PPR_p8to15RegClassID:
1233 RK = RegKind::SVEPredicateAsCounter;
1239 return (Kind == k_Register &&
Reg.Kind == RK) &&
1240 AArch64MCRegisterClasses[
Class].contains(
getReg());
1243 template <
unsigned Class>
bool isSVEVectorReg()
const {
1246 case AArch64::ZPRRegClassID:
1247 case AArch64::ZPR_3bRegClassID:
1248 case AArch64::ZPR_4bRegClassID:
1249 RK = RegKind::SVEDataVector;
1251 case AArch64::PPRRegClassID:
1252 case AArch64::PPR_3bRegClassID:
1253 RK = RegKind::SVEPredicateVector;
1259 return (Kind == k_Register &&
Reg.Kind == RK) &&
1260 AArch64MCRegisterClasses[
Class].contains(
getReg());
1263 template <
unsigned Class>
bool isFPRasZPR()
const {
1264 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1265 AArch64MCRegisterClasses[
Class].contains(
getReg());
1268 template <
int ElementW
idth,
unsigned Class>
1270 if (Kind != k_Register ||
Reg.Kind != RegKind::SVEPredicateVector)
1271 return DiagnosticPredicateTy::NoMatch;
1273 if (isSVEVectorReg<Class>() && (
Reg.ElementWidth == ElementWidth))
1274 return DiagnosticPredicateTy::Match;
1276 return DiagnosticPredicateTy::NearMatch;
1279 template <
int ElementW
idth,
unsigned Class>
1281 if (Kind != k_Register ||
Reg.Kind != RegKind::SVEPredicateAsCounter)
1282 return DiagnosticPredicateTy::NoMatch;
1284 if (isSVEPredicateAsCounterReg<Class>() && (
Reg.ElementWidth == ElementWidth))
1285 return DiagnosticPredicateTy::Match;
1287 return DiagnosticPredicateTy::NearMatch;
1290 template <
int ElementW
idth,
unsigned Class>
1292 if (Kind != k_Register ||
Reg.Kind != RegKind::SVEDataVector)
1293 return DiagnosticPredicateTy::NoMatch;
1295 if (isSVEVectorReg<Class>() &&
Reg.ElementWidth == ElementWidth)
1296 return DiagnosticPredicateTy::Match;
1298 return DiagnosticPredicateTy::NearMatch;
1301 template <
int ElementWidth,
unsigned Class,
1303 bool ShiftWidthAlwaysSame>
1305 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1306 if (!VectorMatch.isMatch())
1307 return DiagnosticPredicateTy::NoMatch;
1312 bool MatchShift = getShiftExtendAmount() ==
Log2_32(ShiftWidth / 8);
1315 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1316 return DiagnosticPredicateTy::NoMatch;
1318 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1319 return DiagnosticPredicateTy::Match;
1321 return DiagnosticPredicateTy::NearMatch;
1324 bool isGPR32as64()
const {
1325 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1326 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(
Reg.RegNum);
1329 bool isGPR64as32()
const {
1330 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1331 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(
Reg.RegNum);
1334 bool isGPR64x8()
const {
1335 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1336 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1340 bool isWSeqPair()
const {
1341 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1342 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1346 bool isXSeqPair()
const {
1347 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1348 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1352 bool isSyspXzrPair()
const {
1353 return isGPR64<AArch64::GPR64RegClassID>() &&
Reg.RegNum == AArch64::XZR;
1356 template<
int64_t Angle,
int64_t Remainder>
1358 if (!
isImm())
return DiagnosticPredicateTy::NoMatch;
1361 if (!CE)
return DiagnosticPredicateTy::NoMatch;
1364 if (
Value % Angle == Remainder &&
Value <= 270)
1365 return DiagnosticPredicateTy::Match;
1366 return DiagnosticPredicateTy::NearMatch;
1369 template <
unsigned RegClassID>
bool isGPR64()
const {
1370 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1371 AArch64MCRegisterClasses[RegClassID].contains(
getReg());
1374 template <
unsigned RegClassID,
int ExtW
idth>
1376 if (Kind != k_Register ||
Reg.Kind != RegKind::Scalar)
1377 return DiagnosticPredicateTy::NoMatch;
1379 if (isGPR64<RegClassID>() && getShiftExtendType() ==
AArch64_AM::LSL &&
1380 getShiftExtendAmount() ==
Log2_32(ExtWidth / 8))
1381 return DiagnosticPredicateTy::Match;
1382 return DiagnosticPredicateTy::NearMatch;
1387 template <RegKind VectorKind,
unsigned NumRegs>
1388 bool isImplicitlyTypedVectorList()
const {
1389 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1390 VectorList.NumElements == 0 &&
1391 VectorList.RegisterKind == VectorKind;
1394 template <RegKind VectorKind,
unsigned NumRegs,
unsigned NumElements,
1395 unsigned ElementWidth,
unsigned Stride = 1>
1396 bool isTypedVectorList()
const {
1397 if (Kind != k_VectorList)
1399 if (VectorList.Count != NumRegs)
1401 if (VectorList.RegisterKind != VectorKind)
1403 if (VectorList.ElementWidth != ElementWidth)
1405 if (VectorList.Stride != Stride)
1407 return VectorList.NumElements == NumElements;
1410 template <RegKind VectorKind,
unsigned NumRegs,
unsigned NumElements,
1411 unsigned ElementWidth>
1414 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1416 return DiagnosticPredicateTy::NoMatch;
1417 if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0)
1418 return DiagnosticPredicateTy::NearMatch;
1419 return DiagnosticPredicateTy::Match;
1422 template <RegKind VectorKind,
unsigned NumRegs,
unsigned Stride,
1423 unsigned ElementWidth>
1425 bool Res = isTypedVectorList<VectorKind, NumRegs, 0,
1426 ElementWidth, Stride>();
1428 return DiagnosticPredicateTy::NoMatch;
1429 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1430 ((VectorList.RegNum >= AArch64::Z16) &&
1431 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1432 return DiagnosticPredicateTy::Match;
1433 return DiagnosticPredicateTy::NoMatch;
1436 template <
int Min,
int Max>
1438 if (Kind != k_VectorIndex)
1439 return DiagnosticPredicateTy::NoMatch;
1440 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1441 return DiagnosticPredicateTy::Match;
1442 return DiagnosticPredicateTy::NearMatch;
1445 bool isToken()
const override {
return Kind == k_Token; }
1447 bool isTokenEqual(
StringRef Str)
const {
1448 return Kind == k_Token && getToken() == Str;
1450 bool isSysCR()
const {
return Kind == k_SysCR; }
1451 bool isPrefetch()
const {
return Kind == k_Prefetch; }
1452 bool isPSBHint()
const {
return Kind == k_PSBHint; }
1453 bool isBTIHint()
const {
return Kind == k_BTIHint; }
1454 bool isShiftExtend()
const {
return Kind == k_ShiftExtend; }
1455 bool isShifter()
const {
1456 if (!isShiftExtend())
1466 if (Kind != k_FPImm)
1467 return DiagnosticPredicateTy::NoMatch;
1469 if (getFPImmIsExact()) {
1471 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1472 assert(Desc &&
"Unknown enum value");
1475 APFloat RealVal(APFloat::IEEEdouble());
1477 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1478 if (
errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1481 if (
getFPImm().bitwiseIsEqual(RealVal))
1482 return DiagnosticPredicateTy::Match;
1485 return DiagnosticPredicateTy::NearMatch;
1488 template <
unsigned ImmA,
unsigned ImmB>
1491 if ((Res = isExactFPImm<ImmA>()))
1492 return DiagnosticPredicateTy::Match;
1493 if ((Res = isExactFPImm<ImmB>()))
1494 return DiagnosticPredicateTy::Match;
1498 bool isExtend()
const {
1499 if (!isShiftExtend())
1508 getShiftExtendAmount() <= 4;
1511 bool isExtend64()
const {
1521 bool isExtendLSL64()
const {
1527 getShiftExtendAmount() <= 4;
1530 template<
int W
idth>
bool isMemXExtend()
const {
1535 (getShiftExtendAmount() ==
Log2_32(Width / 8) ||
1536 getShiftExtendAmount() == 0);
1539 template<
int W
idth>
bool isMemWExtend()
const {
1544 (getShiftExtendAmount() ==
Log2_32(Width / 8) ||
1545 getShiftExtendAmount() == 0);
1548 template <
unsigned w
idth>
1549 bool isArithmeticShifter()
const {
1559 template <
unsigned w
idth>
1560 bool isLogicalShifter()
const {
1568 getShiftExtendAmount() < width;
1571 bool isMovImm32Shifter()
const {
1579 uint64_t Val = getShiftExtendAmount();
1580 return (Val == 0 || Val == 16);
1583 bool isMovImm64Shifter()
const {
1591 uint64_t Val = getShiftExtendAmount();
1592 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1595 bool isLogicalVecShifter()
const {
1600 unsigned Shift = getShiftExtendAmount();
1602 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1605 bool isLogicalVecHalfWordShifter()
const {
1606 if (!isLogicalVecShifter())
1610 unsigned Shift = getShiftExtendAmount();
1612 (Shift == 0 || Shift == 8);
1615 bool isMoveVecShifter()
const {
1616 if (!isShiftExtend())
1620 unsigned Shift = getShiftExtendAmount();
1622 (Shift == 8 || Shift == 16);
1631 bool isSImm9OffsetFB()
const {
1632 return isSImm<9>() && !isUImm12Offset<
Width / 8>();
1635 bool isAdrpLabel()
const {
1642 int64_t Val =
CE->getValue();
1643 int64_t Min = - (4096 * (1LL << (21 - 1)));
1644 int64_t
Max = 4096 * ((1LL << (21 - 1)) - 1);
1645 return (Val % 4096) == 0 && Val >= Min && Val <=
Max;
1651 bool isAdrLabel()
const {
1658 int64_t Val =
CE->getValue();
1659 int64_t Min = - (1LL << (21 - 1));
1660 int64_t
Max = ((1LL << (21 - 1)) - 1);
1661 return Val >= Min && Val <=
Max;
1667 template <MatrixKind Kind,
unsigned EltSize,
unsigned RegClass>
1670 return DiagnosticPredicateTy::NoMatch;
1671 if (getMatrixKind() != Kind ||
1672 !AArch64MCRegisterClasses[RegClass].
contains(getMatrixReg()) ||
1673 EltSize != getMatrixElementWidth())
1674 return DiagnosticPredicateTy::NearMatch;
1675 return DiagnosticPredicateTy::Match;
1682 else if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1688 void addRegOperands(
MCInst &Inst,
unsigned N)
const {
1689 assert(
N == 1 &&
"Invalid number of operands!");
1693 void addMatrixOperands(
MCInst &Inst,
unsigned N)
const {
1694 assert(
N == 1 &&
"Invalid number of operands!");
1698 void addGPR32as64Operands(
MCInst &Inst,
unsigned N)
const {
1699 assert(
N == 1 &&
"Invalid number of operands!");
1701 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].
contains(
getReg()));
1710 void addGPR64as32Operands(
MCInst &Inst,
unsigned N)
const {
1711 assert(
N == 1 &&
"Invalid number of operands!");
1713 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].
contains(
getReg()));
1722 template <
int W
idth>
1723 void addFPRasZPRRegOperands(
MCInst &Inst,
unsigned N)
const {
1726 case 8:
Base = AArch64::B0;
break;
1727 case 16:
Base = AArch64::H0;
break;
1728 case 32:
Base = AArch64::S0;
break;
1729 case 64:
Base = AArch64::D0;
break;
1730 case 128:
Base = AArch64::Q0;
break;
1737 void addVectorReg64Operands(
MCInst &Inst,
unsigned N)
const {
1738 assert(
N == 1 &&
"Invalid number of operands!");
1740 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].
contains(
getReg()));
1744 void addVectorReg128Operands(
MCInst &Inst,
unsigned N)
const {
1745 assert(
N == 1 &&
"Invalid number of operands!");
1747 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].
contains(
getReg()));
1751 void addVectorRegLoOperands(
MCInst &Inst,
unsigned N)
const {
1752 assert(
N == 1 &&
"Invalid number of operands!");
1756 enum VecListIndexType {
1757 VecListIdx_DReg = 0,
1758 VecListIdx_QReg = 1,
1759 VecListIdx_ZReg = 2,
1760 VecListIdx_PReg = 3,
1763 template <VecListIndexType RegTy,
unsigned NumRegs>
1764 void addVectorListOperands(
MCInst &Inst,
unsigned N)
const {
1765 assert(
N == 1 &&
"Invalid number of operands!");
1766 static const unsigned FirstRegs[][5] = {
1768 AArch64::D0, AArch64::D0_D1,
1769 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1771 AArch64::Q0, AArch64::Q0_Q1,
1772 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1774 AArch64::Z0, AArch64::Z0_Z1,
1775 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1777 AArch64::P0, AArch64::P0_P1 }
1780 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1781 " NumRegs must be <= 4 for ZRegs");
1783 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1784 " NumRegs must be <= 2 for PRegs");
1786 unsigned FirstReg = FirstRegs[(
unsigned)RegTy][NumRegs];
1788 FirstRegs[(
unsigned)RegTy][0]));
1791 template <
unsigned NumRegs>
1792 void addStridedVectorListOperands(
MCInst &Inst,
unsigned N)
const {
1793 assert(
N == 1 &&
"Invalid number of operands!");
1794 assert((NumRegs == 2 || NumRegs == 4) &&
" NumRegs must be 2 or 4");
1798 if (getVectorListStart() < AArch64::Z16) {
1799 assert((getVectorListStart() < AArch64::Z8) &&
1800 (getVectorListStart() >= AArch64::Z0) &&
"Invalid Register");
1802 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1804 assert((getVectorListStart() < AArch64::Z24) &&
1805 (getVectorListStart() >= AArch64::Z16) &&
"Invalid Register");
1807 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1811 if (getVectorListStart() < AArch64::Z16) {
1812 assert((getVectorListStart() < AArch64::Z4) &&
1813 (getVectorListStart() >= AArch64::Z0) &&
"Invalid Register");
1815 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1817 assert((getVectorListStart() < AArch64::Z20) &&
1818 (getVectorListStart() >= AArch64::Z16) &&
"Invalid Register");
1820 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1828 void addMatrixTileListOperands(
MCInst &Inst,
unsigned N)
const {
1829 assert(
N == 1 &&
"Invalid number of operands!");
1830 unsigned RegMask = getMatrixTileListRegMask();
1831 assert(RegMask <= 0xFF &&
"Invalid mask!");
1835 void addVectorIndexOperands(
MCInst &Inst,
unsigned N)
const {
1836 assert(
N == 1 &&
"Invalid number of operands!");
1840 template <
unsigned ImmIs0,
unsigned ImmIs1>
1841 void addExactFPImmOperands(
MCInst &Inst,
unsigned N)
const {
1842 assert(
N == 1 &&
"Invalid number of operands!");
1843 assert(
bool(isExactFPImm<ImmIs0, ImmIs1>()) &&
"Invalid operand");
1847 void addImmOperands(
MCInst &Inst,
unsigned N)
const {
1848 assert(
N == 1 &&
"Invalid number of operands!");
1852 addExpr(Inst, getImm());
1855 template <
int Shift>
1856 void addImmWithOptionalShiftOperands(
MCInst &Inst,
unsigned N)
const {
1857 assert(
N == 2 &&
"Invalid number of operands!");
1858 if (
auto ShiftedVal = getShiftedVal<Shift>()) {
1861 }
else if (isShiftedImm()) {
1862 addExpr(Inst, getShiftedImmVal());
1865 addExpr(Inst, getImm());
1870 template <
int Shift>
1871 void addImmNegWithOptionalShiftOperands(
MCInst &Inst,
unsigned N)
const {
1872 assert(
N == 2 &&
"Invalid number of operands!");
1873 if (
auto ShiftedVal = getShiftedVal<Shift>()) {
1880 void addCondCodeOperands(
MCInst &Inst,
unsigned N)
const {
1881 assert(
N == 1 &&
"Invalid number of operands!");
1885 void addAdrpLabelOperands(
MCInst &Inst,
unsigned N)
const {
1886 assert(
N == 1 &&
"Invalid number of operands!");
1889 addExpr(Inst, getImm());
1894 void addAdrLabelOperands(
MCInst &Inst,
unsigned N)
const {
1895 addImmOperands(Inst,
N);
1899 void addUImm12OffsetOperands(
MCInst &Inst,
unsigned N)
const {
1900 assert(
N == 1 &&
"Invalid number of operands!");
1910 void addUImm6Operands(
MCInst &Inst,
unsigned N)
const {
1911 assert(
N == 1 &&
"Invalid number of operands!");
1916 template <
int Scale>
1917 void addImmScaledOperands(
MCInst &Inst,
unsigned N)
const {
1918 assert(
N == 1 &&
"Invalid number of operands!");
1923 template <
int Scale>
1924 void addImmScaledRangeOperands(
MCInst &Inst,
unsigned N)
const {
1925 assert(
N == 1 &&
"Invalid number of operands!");
1929 template <
typename T>
1930 void addLogicalImmOperands(
MCInst &Inst,
unsigned N)
const {
1931 assert(
N == 1 &&
"Invalid number of operands!");
1933 std::make_unsigned_t<T> Val = MCE->
getValue();
1938 template <
typename T>
1939 void addLogicalImmNotOperands(
MCInst &Inst,
unsigned N)
const {
1940 assert(
N == 1 &&
"Invalid number of operands!");
1942 std::make_unsigned_t<T> Val = ~MCE->getValue();
1947 void addSIMDImmType10Operands(
MCInst &Inst,
unsigned N)
const {
1948 assert(
N == 1 &&
"Invalid number of operands!");
1954 void addBranchTarget26Operands(
MCInst &Inst,
unsigned N)
const {
1958 assert(
N == 1 &&
"Invalid number of operands!");
1961 addExpr(Inst, getImm());
1964 assert(MCE &&
"Invalid constant immediate operand!");
1968 void addPCRelLabel19Operands(
MCInst &Inst,
unsigned N)
const {
1972 assert(
N == 1 &&
"Invalid number of operands!");
1975 addExpr(Inst, getImm());
1978 assert(MCE &&
"Invalid constant immediate operand!");
1982 void addBranchTarget14Operands(
MCInst &Inst,
unsigned N)
const {
1986 assert(
N == 1 &&
"Invalid number of operands!");
1989 addExpr(Inst, getImm());
1992 assert(MCE &&
"Invalid constant immediate operand!");
1996 void addFPImmOperands(
MCInst &Inst,
unsigned N)
const {
1997 assert(
N == 1 &&
"Invalid number of operands!");
2002 void addBarrierOperands(
MCInst &Inst,
unsigned N)
const {
2003 assert(
N == 1 &&
"Invalid number of operands!");
2007 void addBarriernXSOperands(
MCInst &Inst,
unsigned N)
const {
2008 assert(
N == 1 &&
"Invalid number of operands!");
2012 void addMRSSystemRegisterOperands(
MCInst &Inst,
unsigned N)
const {
2013 assert(
N == 1 &&
"Invalid number of operands!");
2018 void addMSRSystemRegisterOperands(
MCInst &Inst,
unsigned N)
const {
2019 assert(
N == 1 &&
"Invalid number of operands!");
2024 void addSystemPStateFieldWithImm0_1Operands(
MCInst &Inst,
unsigned N)
const {
2025 assert(
N == 1 &&
"Invalid number of operands!");
2030 void addSVCROperands(
MCInst &Inst,
unsigned N)
const {
2031 assert(
N == 1 &&
"Invalid number of operands!");
2036 void addSystemPStateFieldWithImm0_15Operands(
MCInst &Inst,
unsigned N)
const {
2037 assert(
N == 1 &&
"Invalid number of operands!");
2042 void addSysCROperands(
MCInst &Inst,
unsigned N)
const {
2043 assert(
N == 1 &&
"Invalid number of operands!");
2047 void addPrefetchOperands(
MCInst &Inst,
unsigned N)
const {
2048 assert(
N == 1 &&
"Invalid number of operands!");
2052 void addPSBHintOperands(
MCInst &Inst,
unsigned N)
const {
2053 assert(
N == 1 &&
"Invalid number of operands!");
2057 void addBTIHintOperands(
MCInst &Inst,
unsigned N)
const {
2058 assert(
N == 1 &&
"Invalid number of operands!");
2062 void addShifterOperands(
MCInst &Inst,
unsigned N)
const {
2063 assert(
N == 1 &&
"Invalid number of operands!");
2069 void addSyspXzrPairOperand(
MCInst &Inst,
unsigned N)
const {
2070 assert(
N == 1 &&
"Invalid number of operands!");
2078 if (
Reg != AArch64::XZR)
2084 void addExtendOperands(
MCInst &Inst,
unsigned N)
const {
2085 assert(
N == 1 &&
"Invalid number of operands!");
2092 void addExtend64Operands(
MCInst &Inst,
unsigned N)
const {
2093 assert(
N == 1 &&
"Invalid number of operands!");
2100 void addMemExtendOperands(
MCInst &Inst,
unsigned N)
const {
2101 assert(
N == 2 &&
"Invalid number of operands!");
2112 void addMemExtend8Operands(
MCInst &Inst,
unsigned N)
const {
2113 assert(
N == 2 &&
"Invalid number of operands!");
2121 void addMOVZMovAliasOperands(
MCInst &Inst,
unsigned N)
const {
2122 assert(
N == 1 &&
"Invalid number of operands!");
2129 addExpr(Inst, getImm());
2134 void addMOVNMovAliasOperands(
MCInst &Inst,
unsigned N)
const {
2135 assert(
N == 1 &&
"Invalid number of operands!");
2142 void addComplexRotationEvenOperands(
MCInst &Inst,
unsigned N)
const {
2143 assert(
N == 1 &&
"Invalid number of operands!");
2148 void addComplexRotationOddOperands(
MCInst &Inst,
unsigned N)
const {
2149 assert(
N == 1 &&
"Invalid number of operands!");
2156 static std::unique_ptr<AArch64Operand>
2158 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2159 Op->Tok.Data = Str.data();
2160 Op->Tok.Length = Str.size();
2161 Op->Tok.IsSuffix = IsSuffix;
2167 static std::unique_ptr<AArch64Operand>
2169 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2171 unsigned ShiftAmount = 0,
2172 unsigned HasExplicitAmount =
false) {
2173 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2174 Op->Reg.RegNum = RegNum;
2176 Op->Reg.ElementWidth = 0;
2177 Op->Reg.EqualityTy = EqTy;
2178 Op->Reg.ShiftExtend.Type = ExtTy;
2179 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2180 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2186 static std::unique_ptr<AArch64Operand>
2187 CreateVectorReg(
unsigned RegNum, RegKind Kind,
unsigned ElementWidth,
2190 unsigned ShiftAmount = 0,
2191 unsigned HasExplicitAmount =
false) {
2192 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2193 Kind == RegKind::SVEPredicateVector ||
2194 Kind == RegKind::SVEPredicateAsCounter) &&
2195 "Invalid vector kind");
2196 auto Op = CreateReg(RegNum, Kind, S,
E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2198 Op->Reg.ElementWidth = ElementWidth;
2202 static std::unique_ptr<AArch64Operand>
2203 CreateVectorList(
unsigned RegNum,
unsigned Count,
unsigned Stride,
2204 unsigned NumElements,
unsigned ElementWidth,
2206 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2207 Op->VectorList.RegNum = RegNum;
2208 Op->VectorList.Count = Count;
2209 Op->VectorList.Stride = Stride;
2210 Op->VectorList.NumElements = NumElements;
2211 Op->VectorList.ElementWidth = ElementWidth;
2212 Op->VectorList.RegisterKind = RegisterKind;
2218 static std::unique_ptr<AArch64Operand>
2220 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2221 Op->VectorIndex.Val =
Idx;
2227 static std::unique_ptr<AArch64Operand>
2229 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2230 Op->MatrixTileList.RegMask = RegMask;
2237 const unsigned ElementWidth) {
2238 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2240 {{0, AArch64::ZAB0},
2241 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2242 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2243 {{8, AArch64::ZAB0},
2244 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2245 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2246 {{16, AArch64::ZAH0},
2247 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2248 {{16, AArch64::ZAH1},
2249 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2250 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2251 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2252 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2253 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2256 if (ElementWidth == 64)
2259 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth,
Reg)];
2260 assert(!Regs.empty() &&
"Invalid tile or element width!");
2261 for (
auto OutReg : Regs)
2266 static std::unique_ptr<AArch64Operand> CreateImm(
const MCExpr *Val,
SMLoc S,
2268 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2275 static std::unique_ptr<AArch64Operand> CreateShiftedImm(
const MCExpr *Val,
2276 unsigned ShiftAmount,
2279 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2280 Op->ShiftedImm .Val = Val;
2281 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2287 static std::unique_ptr<AArch64Operand> CreateImmRange(
unsigned First,
2291 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2293 Op->ImmRange.Last =
Last;
2298 static std::unique_ptr<AArch64Operand>
2300 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2301 Op->CondCode.Code =
Code;
2307 static std::unique_ptr<AArch64Operand>
2309 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2311 Op->FPImm.IsExact = IsExact;
2317 static std::unique_ptr<AArch64Operand> CreateBarrier(
unsigned Val,
2321 bool HasnXSModifier) {
2322 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2323 Op->Barrier.Val = Val;
2324 Op->Barrier.Data = Str.data();
2325 Op->Barrier.Length = Str.size();
2326 Op->Barrier.HasnXSModifier = HasnXSModifier;
2332 static std::unique_ptr<AArch64Operand> CreateSysReg(
StringRef Str,
SMLoc S,
2337 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2338 Op->SysReg.Data = Str.data();
2339 Op->SysReg.Length = Str.size();
2340 Op->SysReg.MRSReg = MRSReg;
2341 Op->SysReg.MSRReg = MSRReg;
2342 Op->SysReg.PStateField = PStateField;
2348 static std::unique_ptr<AArch64Operand> CreateSysCR(
unsigned Val,
SMLoc S,
2350 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2351 Op->SysCRImm.Val = Val;
2357 static std::unique_ptr<AArch64Operand> CreatePrefetch(
unsigned Val,
2361 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2362 Op->Prefetch.Val = Val;
2363 Op->Barrier.Data = Str.data();
2364 Op->Barrier.Length = Str.size();
2370 static std::unique_ptr<AArch64Operand> CreatePSBHint(
unsigned Val,
2374 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2375 Op->PSBHint.Val = Val;
2376 Op->PSBHint.Data = Str.data();
2377 Op->PSBHint.Length = Str.size();
2383 static std::unique_ptr<AArch64Operand> CreateBTIHint(
unsigned Val,
2387 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2388 Op->BTIHint.Val = Val | 32;
2389 Op->BTIHint.Data = Str.data();
2390 Op->BTIHint.Length = Str.size();
2396 static std::unique_ptr<AArch64Operand>
2397 CreateMatrixRegister(
unsigned RegNum,
unsigned ElementWidth, MatrixKind Kind,
2399 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2400 Op->MatrixReg.RegNum = RegNum;
2401 Op->MatrixReg.ElementWidth = ElementWidth;
2402 Op->MatrixReg.Kind =
Kind;
2408 static std::unique_ptr<AArch64Operand>
2410 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2411 Op->SVCR.PStateField = PStateField;
2412 Op->SVCR.Data = Str.data();
2413 Op->SVCR.Length = Str.size();
2419 static std::unique_ptr<AArch64Operand>
2422 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2423 Op->ShiftExtend.Type = ShOp;
2424 Op->ShiftExtend.Amount = Val;
2425 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2437 OS <<
"<fpimm " <<
getFPImm().bitcastToAPInt().getZExtValue();
2438 if (!getFPImmIsExact())
2445 OS <<
"<barrier " <<
Name <<
">";
2447 OS <<
"<barrier invalid #" << getBarrier() <<
">";
2453 case k_ShiftedImm: {
2454 unsigned Shift = getShiftedImmShift();
2455 OS <<
"<shiftedimm ";
2456 OS << *getShiftedImmVal();
2462 OS << getFirstImmVal();
2463 OS <<
":" << getLastImmVal() <<
">";
2469 case k_VectorList: {
2470 OS <<
"<vectorlist ";
2471 unsigned Reg = getVectorListStart();
2472 for (
unsigned i = 0, e = getVectorListCount(); i !=
e; ++i)
2473 OS <<
Reg + i * getVectorListStride() <<
" ";
2478 OS <<
"<vectorindex " << getVectorIndex() <<
">";
2481 OS <<
"<sysreg: " << getSysReg() <<
'>';
2484 OS <<
"'" << getToken() <<
"'";
2487 OS <<
"c" << getSysCR();
2492 OS <<
"<prfop " <<
Name <<
">";
2494 OS <<
"<prfop invalid #" << getPrefetch() <<
">";
2498 OS << getPSBHintName();
2501 OS << getBTIHintName();
2503 case k_MatrixRegister:
2504 OS <<
"<matrix " << getMatrixReg() <<
">";
2506 case k_MatrixTileList: {
2507 OS <<
"<matrixlist ";
2508 unsigned RegMask = getMatrixTileListRegMask();
2509 unsigned MaxBits = 8;
2510 for (
unsigned I = MaxBits;
I > 0; --
I)
2511 OS << ((RegMask & (1 << (
I - 1))) >> (
I - 1));
2520 OS <<
"<register " <<
getReg() <<
">";
2521 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2526 << getShiftExtendAmount();
2527 if (!hasShiftExtendAmount())
2543 .
Case(
"v0", AArch64::Q0)
2544 .
Case(
"v1", AArch64::Q1)
2545 .
Case(
"v2", AArch64::Q2)
2546 .
Case(
"v3", AArch64::Q3)
2547 .
Case(
"v4", AArch64::Q4)
2548 .
Case(
"v5", AArch64::Q5)
2549 .
Case(
"v6", AArch64::Q6)
2550 .
Case(
"v7", AArch64::Q7)
2551 .
Case(
"v8", AArch64::Q8)
2552 .
Case(
"v9", AArch64::Q9)
2553 .
Case(
"v10", AArch64::Q10)
2554 .
Case(
"v11", AArch64::Q11)
2555 .
Case(
"v12", AArch64::Q12)
2556 .
Case(
"v13", AArch64::Q13)
2557 .
Case(
"v14", AArch64::Q14)
2558 .
Case(
"v15", AArch64::Q15)
2559 .
Case(
"v16", AArch64::Q16)
2560 .
Case(
"v17", AArch64::Q17)
2561 .
Case(
"v18", AArch64::Q18)
2562 .
Case(
"v19", AArch64::Q19)
2563 .
Case(
"v20", AArch64::Q20)
2564 .
Case(
"v21", AArch64::Q21)
2565 .
Case(
"v22", AArch64::Q22)
2566 .
Case(
"v23", AArch64::Q23)
2567 .
Case(
"v24", AArch64::Q24)
2568 .
Case(
"v25", AArch64::Q25)
2569 .
Case(
"v26", AArch64::Q26)
2570 .
Case(
"v27", AArch64::Q27)
2571 .
Case(
"v28", AArch64::Q28)
2572 .
Case(
"v29", AArch64::Q29)
2573 .
Case(
"v30", AArch64::Q30)
2574 .
Case(
"v31", AArch64::Q31)
2583 RegKind VectorKind) {
2584 std::pair<int, int> Res = {-1, -1};
2586 switch (VectorKind) {
2587 case RegKind::NeonVector:
2591 .Case(
".1d", {1, 64})
2592 .Case(
".1q", {1, 128})
2594 .Case(
".2h", {2, 16})
2595 .Case(
".2s", {2, 32})
2596 .Case(
".2d", {2, 64})
2599 .Case(
".4b", {4, 8})
2600 .Case(
".4h", {4, 16})
2601 .Case(
".4s", {4, 32})
2602 .Case(
".8b", {8, 8})
2603 .Case(
".8h", {8, 16})
2604 .Case(
".16b", {16, 8})
2609 .Case(
".h", {0, 16})
2610 .Case(
".s", {0, 32})
2611 .Case(
".d", {0, 64})
2614 case RegKind::SVEPredicateAsCounter:
2615 case RegKind::SVEPredicateVector:
2616 case RegKind::SVEDataVector:
2617 case RegKind::Matrix:
2621 .Case(
".h", {0, 16})
2622 .Case(
".s", {0, 32})
2623 .Case(
".d", {0, 64})
2624 .Case(
".q", {0, 128})
2631 if (Res == std::make_pair(-1, -1))
2632 return std::nullopt;
2634 return std::optional<std::pair<int, int>>(Res);
2643 .
Case(
"z0", AArch64::Z0)
2644 .
Case(
"z1", AArch64::Z1)
2645 .
Case(
"z2", AArch64::Z2)
2646 .
Case(
"z3", AArch64::Z3)
2647 .
Case(
"z4", AArch64::Z4)
2648 .
Case(
"z5", AArch64::Z5)
2649 .
Case(
"z6", AArch64::Z6)
2650 .
Case(
"z7", AArch64::Z7)
2651 .
Case(
"z8", AArch64::Z8)
2652 .
Case(
"z9", AArch64::Z9)
2653 .
Case(
"z10", AArch64::Z10)
2654 .
Case(
"z11", AArch64::Z11)
2655 .
Case(
"z12", AArch64::Z12)
2656 .
Case(
"z13", AArch64::Z13)
2657 .
Case(
"z14", AArch64::Z14)
2658 .
Case(
"z15", AArch64::Z15)
2659 .
Case(
"z16", AArch64::Z16)
2660 .
Case(
"z17", AArch64::Z17)
2661 .
Case(
"z18", AArch64::Z18)
2662 .
Case(
"z19", AArch64::Z19)
2663 .
Case(
"z20", AArch64::Z20)
2664 .
Case(
"z21", AArch64::Z21)
2665 .
Case(
"z22", AArch64::Z22)
2666 .
Case(
"z23", AArch64::Z23)
2667 .
Case(
"z24", AArch64::Z24)
2668 .
Case(
"z25", AArch64::Z25)
2669 .
Case(
"z26", AArch64::Z26)
2670 .
Case(
"z27", AArch64::Z27)
2671 .
Case(
"z28", AArch64::Z28)
2672 .
Case(
"z29", AArch64::Z29)
2673 .
Case(
"z30", AArch64::Z30)
2674 .
Case(
"z31", AArch64::Z31)
2680 .
Case(
"p0", AArch64::P0)
2681 .
Case(
"p1", AArch64::P1)
2682 .
Case(
"p2", AArch64::P2)
2683 .
Case(
"p3", AArch64::P3)
2684 .
Case(
"p4", AArch64::P4)
2685 .
Case(
"p5", AArch64::P5)
2686 .
Case(
"p6", AArch64::P6)
2687 .
Case(
"p7", AArch64::P7)
2688 .
Case(
"p8", AArch64::P8)
2689 .
Case(
"p9", AArch64::P9)
2690 .
Case(
"p10", AArch64::P10)
2691 .
Case(
"p11", AArch64::P11)
2692 .
Case(
"p12", AArch64::P12)
2693 .
Case(
"p13", AArch64::P13)
2694 .
Case(
"p14", AArch64::P14)
2695 .
Case(
"p15", AArch64::P15)
2701 .
Case(
"pn0", AArch64::P0)
2702 .
Case(
"pn1", AArch64::P1)
2703 .
Case(
"pn2", AArch64::P2)
2704 .
Case(
"pn3", AArch64::P3)
2705 .
Case(
"pn4", AArch64::P4)
2706 .
Case(
"pn5", AArch64::P5)
2707 .
Case(
"pn6", AArch64::P6)
2708 .
Case(
"pn7", AArch64::P7)
2709 .
Case(
"pn8", AArch64::P8)
2710 .
Case(
"pn9", AArch64::P9)
2711 .
Case(
"pn10", AArch64::P10)
2712 .
Case(
"pn11", AArch64::P11)
2713 .
Case(
"pn12", AArch64::P12)
2714 .
Case(
"pn13", AArch64::P13)
2715 .
Case(
"pn14", AArch64::P14)
2716 .
Case(
"pn15", AArch64::P15)
2722 .
Case(
"za0.d", AArch64::ZAD0)
2723 .
Case(
"za1.d", AArch64::ZAD1)
2724 .
Case(
"za2.d", AArch64::ZAD2)
2725 .
Case(
"za3.d", AArch64::ZAD3)
2726 .
Case(
"za4.d", AArch64::ZAD4)
2727 .
Case(
"za5.d", AArch64::ZAD5)
2728 .
Case(
"za6.d", AArch64::ZAD6)
2729 .
Case(
"za7.d", AArch64::ZAD7)
2730 .
Case(
"za0.s", AArch64::ZAS0)
2731 .
Case(
"za1.s", AArch64::ZAS1)
2732 .
Case(
"za2.s", AArch64::ZAS2)
2733 .
Case(
"za3.s", AArch64::ZAS3)
2734 .
Case(
"za0.h", AArch64::ZAH0)
2735 .
Case(
"za1.h", AArch64::ZAH1)
2736 .
Case(
"za0.b", AArch64::ZAB0)
2742 .
Case(
"za", AArch64::ZA)
2743 .
Case(
"za0.q", AArch64::ZAQ0)
2744 .
Case(
"za1.q", AArch64::ZAQ1)
2745 .
Case(
"za2.q", AArch64::ZAQ2)
2746 .
Case(
"za3.q", AArch64::ZAQ3)
2747 .
Case(
"za4.q", AArch64::ZAQ4)
2748 .
Case(
"za5.q", AArch64::ZAQ5)
2749 .
Case(
"za6.q", AArch64::ZAQ6)
2750 .
Case(
"za7.q", AArch64::ZAQ7)
2751 .
Case(
"za8.q", AArch64::ZAQ8)
2752 .
Case(
"za9.q", AArch64::ZAQ9)
2753 .
Case(
"za10.q", AArch64::ZAQ10)
2754 .
Case(
"za11.q", AArch64::ZAQ11)
2755 .
Case(
"za12.q", AArch64::ZAQ12)
2756 .
Case(
"za13.q", AArch64::ZAQ13)
2757 .
Case(
"za14.q", AArch64::ZAQ14)
2758 .
Case(
"za15.q", AArch64::ZAQ15)
2759 .
Case(
"za0.d", AArch64::ZAD0)
2760 .
Case(
"za1.d", AArch64::ZAD1)
2761 .
Case(
"za2.d", AArch64::ZAD2)
2762 .
Case(
"za3.d", AArch64::ZAD3)
2763 .
Case(
"za4.d", AArch64::ZAD4)
2764 .
Case(
"za5.d", AArch64::ZAD5)
2765 .
Case(
"za6.d", AArch64::ZAD6)
2766 .
Case(
"za7.d", AArch64::ZAD7)
2767 .
Case(
"za0.s", AArch64::ZAS0)
2768 .
Case(
"za1.s", AArch64::ZAS1)
2769 .
Case(
"za2.s", AArch64::ZAS2)
2770 .
Case(
"za3.s", AArch64::ZAS3)
2771 .
Case(
"za0.h", AArch64::ZAH0)
2772 .
Case(
"za1.h", AArch64::ZAH1)
2773 .
Case(
"za0.b", AArch64::ZAB0)
2774 .
Case(
"za0h.q", AArch64::ZAQ0)
2775 .
Case(
"za1h.q", AArch64::ZAQ1)
2776 .
Case(
"za2h.q", AArch64::ZAQ2)
2777 .
Case(
"za3h.q", AArch64::ZAQ3)
2778 .
Case(
"za4h.q", AArch64::ZAQ4)
2779 .
Case(
"za5h.q", AArch64::ZAQ5)
2780 .
Case(
"za6h.q", AArch64::ZAQ6)
2781 .
Case(
"za7h.q", AArch64::ZAQ7)
2782 .
Case(
"za8h.q", AArch64::ZAQ8)
2783 .
Case(
"za9h.q", AArch64::ZAQ9)
2784 .
Case(
"za10h.q", AArch64::ZAQ10)
2785 .
Case(
"za11h.q", AArch64::ZAQ11)
2786 .
Case(
"za12h.q", AArch64::ZAQ12)
2787 .
Case(
"za13h.q", AArch64::ZAQ13)
2788 .
Case(
"za14h.q", AArch64::ZAQ14)
2789 .
Case(
"za15h.q", AArch64::ZAQ15)
2790 .
Case(
"za0h.d", AArch64::ZAD0)
2791 .
Case(
"za1h.d", AArch64::ZAD1)
2792 .
Case(
"za2h.d", AArch64::ZAD2)
2793 .
Case(
"za3h.d", AArch64::ZAD3)
2794 .
Case(
"za4h.d", AArch64::ZAD4)
2795 .
Case(
"za5h.d", AArch64::ZAD5)
2796 .
Case(
"za6h.d", AArch64::ZAD6)
2797 .
Case(
"za7h.d", AArch64::ZAD7)
2798 .
Case(
"za0h.s", AArch64::ZAS0)
2799 .
Case(
"za1h.s", AArch64::ZAS1)
2800 .
Case(
"za2h.s", AArch64::ZAS2)
2801 .
Case(
"za3h.s", AArch64::ZAS3)
2802 .
Case(
"za0h.h", AArch64::ZAH0)
2803 .
Case(
"za1h.h", AArch64::ZAH1)
2804 .
Case(
"za0h.b", AArch64::ZAB0)
2805 .
Case(
"za0v.q", AArch64::ZAQ0)
2806 .
Case(
"za1v.q", AArch64::ZAQ1)
2807 .
Case(
"za2v.q", AArch64::ZAQ2)
2808 .
Case(
"za3v.q", AArch64::ZAQ3)
2809 .
Case(
"za4v.q", AArch64::ZAQ4)
2810 .
Case(
"za5v.q", AArch64::ZAQ5)
2811 .
Case(
"za6v.q", AArch64::ZAQ6)
2812 .
Case(
"za7v.q", AArch64::ZAQ7)
2813 .
Case(
"za8v.q", AArch64::ZAQ8)
2814 .
Case(
"za9v.q", AArch64::ZAQ9)
2815 .
Case(
"za10v.q", AArch64::ZAQ10)
2816 .
Case(
"za11v.q", AArch64::ZAQ11)
2817 .
Case(
"za12v.q", AArch64::ZAQ12)
2818 .
Case(
"za13v.q", AArch64::ZAQ13)
2819 .
Case(
"za14v.q", AArch64::ZAQ14)
2820 .
Case(
"za15v.q", AArch64::ZAQ15)
2821 .
Case(
"za0v.d", AArch64::ZAD0)
2822 .
Case(
"za1v.d", AArch64::ZAD1)
2823 .
Case(
"za2v.d", AArch64::ZAD2)
2824 .
Case(
"za3v.d", AArch64::ZAD3)
2825 .
Case(
"za4v.d", AArch64::ZAD4)
2826 .
Case(
"za5v.d", AArch64::ZAD5)
2827 .
Case(
"za6v.d", AArch64::ZAD6)
2828 .
Case(
"za7v.d", AArch64::ZAD7)
2829 .
Case(
"za0v.s", AArch64::ZAS0)
2830 .
Case(
"za1v.s", AArch64::ZAS1)
2831 .
Case(
"za2v.s", AArch64::ZAS2)
2832 .
Case(
"za3v.s", AArch64::ZAS3)
2833 .
Case(
"za0v.h", AArch64::ZAH0)
2834 .
Case(
"za1v.h", AArch64::ZAH1)
2835 .
Case(
"za0v.b", AArch64::ZAB0)
2839bool AArch64AsmParser::parseRegister(
MCRegister &RegNo,
SMLoc &StartLoc,
2847 StartLoc = getLoc();
2848 auto Res = tryParseScalarRegister(RegNo);
2854unsigned AArch64AsmParser::matchRegisterNameAlias(
StringRef Name,
2856 unsigned RegNum = 0;
2858 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2861 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2864 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
2867 return Kind == RegKind::NeonVector ? RegNum : 0;
2870 return Kind == RegKind::Matrix ? RegNum : 0;
2872 if (
Name.equals_insensitive(
"zt0"))
2873 return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0;
2877 return (Kind == RegKind::Scalar) ? RegNum : 0;
2882 .
Case(
"fp", AArch64::FP)
2883 .
Case(
"lr", AArch64::LR)
2884 .
Case(
"x31", AArch64::XZR)
2885 .
Case(
"w31", AArch64::WZR)
2887 return Kind == RegKind::Scalar ? RegNum : 0;
2892 auto Entry = RegisterReqs.
find(
Name.lower());
2893 if (Entry == RegisterReqs.
end())
2897 if (Kind == Entry->getValue().first)
2898 RegNum = Entry->getValue().second;
2903unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
2905 case RegKind::Scalar:
2906 case RegKind::NeonVector:
2907 case RegKind::SVEDataVector:
2909 case RegKind::Matrix:
2910 case RegKind::SVEPredicateVector:
2911 case RegKind::SVEPredicateAsCounter:
2913 case RegKind::LookupTable:
2923AArch64AsmParser::tryParseScalarRegister(
MCRegister &RegNum) {
2929 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2944 Error(S,
"Expected cN operand where 0 <= N <= 15");
2949 if (Tok[0] !=
'c' && Tok[0] !=
'C') {
2950 Error(S,
"Expected cN operand where 0 <= N <= 15");
2956 if (BadNum || CRNum > 15) {
2957 Error(S,
"Expected cN operand where 0 <= N <= 15");
2963 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2973 unsigned MaxVal = 63;
2979 if (getParser().parseExpression(ImmVal))
2984 TokError(
"immediate value expected for prefetch operand");
2988 if (prfop > MaxVal) {
2989 TokError(
"prefetch operand out of range, [0," + utostr(MaxVal) +
2994 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->
getValue());
2995 Operands.push_back(AArch64Operand::CreatePrefetch(
2996 prfop, RPRFM ? RPRFM->Name :
"", S, getContext()));
3001 TokError(
"prefetch hint expected");
3005 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.
getString());
3007 TokError(
"prefetch hint expected");
3011 Operands.push_back(AArch64Operand::CreatePrefetch(
3012 RPRFM->Encoding, Tok.
getString(), S, getContext()));
3018template <
bool IsSVEPrefetch>
3025 if (IsSVEPrefetch) {
3026 if (
auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(
N))
3027 return std::optional<unsigned>(Res->Encoding);
3028 }
else if (
auto Res = AArch64PRFM::lookupPRFMByName(
N))
3029 return std::optional<unsigned>(Res->Encoding);
3030 return std::optional<unsigned>();
3033 auto LookupByEncoding = [](
unsigned E) {
3034 if (IsSVEPrefetch) {
3035 if (
auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(
E))
3036 return std::optional<StringRef>(Res->Name);
3037 }
else if (
auto Res = AArch64PRFM::lookupPRFMByEncoding(
E))
3038 return std::optional<StringRef>(Res->Name);
3039 return std::optional<StringRef>();
3041 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3048 if (getParser().parseExpression(ImmVal))
3053 TokError(
"immediate value expected for prefetch operand");
3057 if (prfop > MaxVal) {
3058 TokError(
"prefetch operand out of range, [0," + utostr(MaxVal) +
3063 auto PRFM = LookupByEncoding(MCE->
getValue());
3064 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(
""),
3070 TokError(
"prefetch hint expected");
3074 auto PRFM = LookupByName(Tok.
getString());
3076 TokError(
"prefetch hint expected");
3080 Operands.push_back(AArch64Operand::CreatePrefetch(
3081 *PRFM, Tok.
getString(), S, getContext()));
3092 TokError(
"invalid operand for instruction");
3096 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.
getString());
3098 TokError(
"invalid operand for instruction");
3102 Operands.push_back(AArch64Operand::CreatePSBHint(
3103 PSB->Encoding, Tok.
getString(), S, getContext()));
3110 SMLoc StartLoc = getLoc();
3116 auto RegTok = getTok();
3120 if (RegNum != AArch64::XZR) {
3121 getLexer().UnLex(RegTok);
3129 TokError(
"expected register operand");
3133 if (RegNum != AArch64::XZR) {
3134 TokError(
"xzr must be followed by xzr");
3140 Operands.push_back(AArch64Operand::CreateReg(
3141 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3152 TokError(
"invalid operand for instruction");
3156 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.
getString());
3158 TokError(
"invalid operand for instruction");
3162 Operands.push_back(AArch64Operand::CreateBTIHint(
3163 BTI->Encoding, Tok.
getString(), S, getContext()));
3173 const MCExpr *Expr =
nullptr;
3179 if (parseSymbolicImmVal(Expr))
3185 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3195 Error(S,
"gotpage label reference not allowed an addend");
3206 Error(S,
"page or gotpage label reference expected");
3215 Operands.push_back(AArch64Operand::CreateImm(Expr, S,
E, getContext()));
3225 const MCExpr *Expr =
nullptr;
3234 if (parseSymbolicImmVal(Expr))
3240 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3247 Error(S,
"unexpected adr label");
3253 Operands.push_back(AArch64Operand::CreateImm(Expr, S,
E, getContext()));
3258template<
bool AddFPZeroAsLiteral>
3272 TokError(
"invalid floating point immediate");
3278 if (Tok.
getIntVal() > 255 || isNegative) {
3279 TokError(
"encoded floating point value out of range");
3285 AArch64Operand::CreateFPImm(
F,
true, S, getContext()));
3288 APFloat RealVal(APFloat::IEEEdouble());
3290 RealVal.convertFromString(Tok.
getString(), APFloat::rmTowardZero);
3292 TokError(
"invalid floating point representation");
3297 RealVal.changeSign();
3299 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3300 Operands.push_back(AArch64Operand::CreateToken(
"#0", S, getContext()));
3301 Operands.push_back(AArch64Operand::CreateToken(
".0", S, getContext()));
3303 Operands.push_back(AArch64Operand::CreateFPImm(
3304 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3329 if (parseSymbolicImmVal(Imm))
3333 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3340 if (!parseOptionalVGOperand(
Operands, VecGroup)) {
3342 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3344 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3350 !getTok().getIdentifier().equals_insensitive(
"lsl")) {
3351 Error(getLoc(),
"only 'lsl #+N' valid after immediate");
3361 Error(getLoc(),
"only 'lsl #+N' valid after immediate");
3365 int64_t ShiftAmount = getTok().getIntVal();
3367 if (ShiftAmount < 0) {
3368 Error(getLoc(),
"positive shift amount required");
3374 if (ShiftAmount == 0 && Imm !=
nullptr) {
3376 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3380 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3381 getLoc(), getContext()));
3388AArch64AsmParser::parseCondCodeString(
StringRef Cond, std::string &Suggestion) {
3425 Suggestion =
"nfrst";
3432 bool invertCondCode) {
3438 std::string Suggestion;
3441 std::string
Msg =
"invalid condition code";
3442 if (!Suggestion.empty())
3443 Msg +=
", did you mean " + Suggestion +
"?";
3444 return TokError(Msg);
3448 if (invertCondCode) {
3450 return TokError(
"condition codes AL and NV are invalid for this instruction");
3455 AArch64Operand::CreateCondCode(
CC, S, getLoc(), getContext()));
3465 TokError(
"invalid operand for instruction");
3469 unsigned PStateImm = -1;
3470 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.
getString());
3473 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3474 PStateImm = SVCR->Encoding;
3477 AArch64Operand::CreateSVCR(PStateImm, Tok.
getString(), S, getContext()));
3489 if (
Name.equals_insensitive(
"za") ||
Name.startswith_insensitive(
"za.")) {
3491 unsigned ElementWidth = 0;
3492 auto DotPosition =
Name.find(
'.');
3494 const auto &KindRes =
3498 "Expected the register to be followed by element width suffix");
3501 ElementWidth = KindRes->second;
3503 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3504 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3509 if (parseOperand(
Operands,
false,
false))
3516 unsigned Reg = matchRegisterNameAlias(
Name, RegKind::Matrix);
3520 size_t DotPosition =
Name.find(
'.');
3528 .
Case(
"h", MatrixKind::Row)
3529 .
Case(
"v", MatrixKind::Col)
3535 TokError(
"Expected the register to be followed by element width suffix");
3538 unsigned ElementWidth = KindRes->second;
3542 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3543 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3548 if (parseOperand(
Operands,
false,
false))
3590 TokError(
"expected #imm after shift specifier");
3597 AArch64Operand::CreateShiftExtend(ShOp, 0,
false, S,
E, getContext()));
3606 Error(
E,
"expected integer shift amount");
3611 if (getParser().parseExpression(ImmVal))
3616 Error(
E,
"expected constant '#imm' after shift specifier");
3621 Operands.push_back(AArch64Operand::CreateShiftExtend(
3622 ShOp, MCE->
getValue(),
true, S,
E, getContext()));
3630 {
"crc", {AArch64::FeatureCRC}},
3631 {
"sm4", {AArch64::FeatureSM4}},
3632 {
"sha3", {AArch64::FeatureSHA3}},
3633 {
"sha2", {AArch64::FeatureSHA2}},
3634 {
"aes", {AArch64::FeatureAES}},
3635 {
"crypto", {AArch64::FeatureCrypto}},
3636 {
"fp", {AArch64::FeatureFPARMv8}},
3637 {
"simd", {AArch64::FeatureNEON}},
3638 {
"ras", {AArch64::FeatureRAS}},
3639 {
"rasv2", {AArch64::FeatureRASv2}},
3640 {
"lse", {AArch64::FeatureLSE}},
3641 {
"predres", {AArch64::FeaturePredRes}},
3642 {
"predres2", {AArch64::FeatureSPECRES2}},
3643 {
"ccdp", {AArch64::FeatureCacheDeepPersist}},
3644 {
"mte", {AArch64::FeatureMTE}},
3645 {
"memtag", {AArch64::FeatureMTE}},
3646 {
"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3647 {
"pan", {AArch64::FeaturePAN}},
3648 {
"pan-rwv", {AArch64::FeaturePAN_RWV}},
3649 {
"ccpp", {AArch64::FeatureCCPP}},
3650 {
"rcpc", {AArch64::FeatureRCPC}},
3651 {
"rng", {AArch64::FeatureRandGen}},
3652 {
"sve", {AArch64::FeatureSVE}},
3653 {
"sve2", {AArch64::FeatureSVE2}},
3654 {
"sve2-aes", {AArch64::FeatureSVE2AES}},
3655 {
"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3656 {
"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3657 {
"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3658 {
"sve2p1", {AArch64::FeatureSVE2p1}},
3659 {
"b16b16", {AArch64::FeatureB16B16}},
3660 {
"ls64", {AArch64::FeatureLS64}},
3661 {
"xs", {AArch64::FeatureXS}},
3662 {
"pauth", {AArch64::FeaturePAuth}},
3663 {
"flagm", {AArch64::FeatureFlagM}},
3664 {
"rme", {AArch64::FeatureRME}},
3665 {
"sme", {AArch64::FeatureSME}},
3666 {
"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3667 {
"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3668 {
"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3669 {
"sme2", {AArch64::FeatureSME2}},
3670 {
"sme2p1", {AArch64::FeatureSME2p1}},
3671 {
"hbc", {AArch64::FeatureHBC}},
3672 {
"mops", {AArch64::FeatureMOPS}},
3673 {
"mec", {AArch64::FeatureMEC}},
3674 {
"the", {AArch64::FeatureTHE}},
3675 {
"d128", {AArch64::FeatureD128}},
3676 {
"lse128", {AArch64::FeatureLSE128}},
3677 {
"ite", {AArch64::FeatureITE}},
3678 {
"cssc", {AArch64::FeatureCSSC}},
3679 {
"rcpc3", {AArch64::FeatureRCPC3}},
3680 {
"gcs", {AArch64::FeatureGCS}},
3688 if (FBS[AArch64::HasV8_0aOps])
3690 if (FBS[AArch64::HasV8_1aOps])
3692 else if (FBS[AArch64::HasV8_2aOps])
3694 else if (FBS[AArch64::HasV8_3aOps])
3696 else if (FBS[AArch64::HasV8_4aOps])
3698 else if (FBS[AArch64::HasV8_5aOps])
3700 else if (FBS[AArch64::HasV8_6aOps])
3702 else if (FBS[AArch64::HasV8_7aOps])
3704 else if (FBS[AArch64::HasV8_8aOps])
3706 else if (FBS[AArch64::HasV8_9aOps])
3708 else if (FBS[AArch64::HasV9_0aOps])
3710 else if (FBS[AArch64::HasV9_1aOps])
3712 else if (FBS[AArch64::HasV9_2aOps])
3714 else if (FBS[AArch64::HasV9_3aOps])
3716 else if (FBS[AArch64::HasV9_4aOps])
3718 else if (FBS[AArch64::HasV8_0rOps])
3727 Str += !ExtMatches.
empty() ? llvm::join(ExtMatches,
", ") :
"(unknown)";
3734 const uint16_t Cm = (Encoding & 0x78) >> 3;
3735 const uint16_t Cn = (Encoding & 0x780) >> 7;
3736 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3741 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3743 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3745 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3748 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3755 if (
Name.contains(
'.'))
3756 return TokError(
"invalid operand");
3759 Operands.push_back(AArch64Operand::CreateToken(
"sys", NameLoc, getContext()));
3765 if (Mnemonic ==
"ic") {
3768 return TokError(
"invalid operand for IC instruction");
3769 else if (!IC->
haveFeatures(getSTI().getFeatureBits())) {
3770 std::string Str(
"IC " + std::string(IC->
Name) +
" requires: ");
3772 return TokError(Str);
3775 }
else if (Mnemonic ==
"dc") {
3778 return TokError(
"invalid operand for DC instruction");
3779 else if (!DC->
haveFeatures(getSTI().getFeatureBits())) {
3780 std::string Str(
"DC " + std::string(DC->
Name) +
" requires: ");
3782 return TokError(Str);
3785 }
else if (Mnemonic ==
"at") {
3788 return TokError(
"invalid operand for AT instruction");
3789 else if (!AT->
haveFeatures(getSTI().getFeatureBits())) {
3790 std::string Str(
"AT " + std::string(AT->
Name) +
" requires: ");
3792 return TokError(Str);
3795 }
else if (Mnemonic ==
"tlbi") {
3798 return TokError(
"invalid operand for TLBI instruction");
3799 else if (!TLBI->
haveFeatures(getSTI().getFeatureBits())) {
3800 std::string Str(
"TLBI " + std::string(TLBI->
Name) +
" requires: ");
3802 return TokError(Str);
3805 }
else if (Mnemonic ==
"cfp" || Mnemonic ==
"dvp" || Mnemonic ==
"cpp" || Mnemonic ==
"cosp") {
3807 if (
Op.lower() !=
"rctx")
3808 return TokError(
"invalid operand for prediction restriction instruction");
3810 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3811 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3812 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3814 if (Mnemonic ==
"cosp" && !hasSpecres2)
3815 return TokError(
"COSP requires: predres2");
3817 return TokError(Mnemonic.
upper() +
"RCTX requires: predres");
3819 uint16_t PRCTX_Op2 = Mnemonic ==
"cfp" ? 0b100
3820 : Mnemonic ==
"dvp" ? 0b101
3821 : Mnemonic ==
"cosp" ? 0b110
3822 : Mnemonic ==
"cpp" ? 0b111
3825 "Invalid mnemonic for prediction restriction instruction");
3826 const auto SYS_3_7_3 = 0b01101110011;
3827 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3829 createSysAlias(Encoding,
Operands, S);
3835 bool HasRegister =
false;
3840 return TokError(
"expected register operand");
3844 if (ExpectRegister && !HasRegister)
3845 return TokError(
"specified " + Mnemonic +
" op requires a register");
3846 else if (!ExpectRegister && HasRegister)
3847 return TokError(
"specified " + Mnemonic +
" op does not use a register");
3859 if (
Name.contains(
'.'))
3860 return TokError(
"invalid operand");
3864 AArch64Operand::CreateToken(
"sysp", NameLoc, getContext()));
3870 if (Mnemonic ==
"tlbip") {
3871 bool HasnXSQualifier =
Op.endswith_insensitive(
"nXS");
3872 if (HasnXSQualifier) {
3873 Op =
Op.drop_back(3);
3877 return TokError(
"invalid operand for TLBIP instruction");
3879 TLBIorig->
Name, TLBIorig->
Encoding | (HasnXSQualifier ? (1 << 7) : 0),
3886 std::string(TLBI.
Name) + (HasnXSQualifier ?
"nXS" :
"");
3887 std::string Str(
"TLBIP " +
Name +
" requires: ");
3889 return TokError(Str);
3900 return TokError(
"expected register identifier");
3905 return TokError(
"specified " + Mnemonic +
3906 " op requires a pair of registers");
3920 TokError(
"'csync' operand expected");
3925 SMLoc ExprLoc = getLoc();
3927 if (getParser().parseExpression(ImmVal))
3931 Error(ExprLoc,
"immediate value expected for barrier operand");
3935 if (Mnemonic ==
"dsb" &&
Value > 15) {
3942 if (Value < 0 || Value > 15) {
3943 Error(ExprLoc,
"barrier operand out of range");
3946 auto DB = AArch64DB::lookupDBByEncoding(
Value);
3947 Operands.push_back(AArch64Operand::CreateBarrier(
Value, DB ?
DB->Name :
"",
3948 ExprLoc, getContext(),
3954 TokError(
"invalid operand for instruction");
3959 auto TSB = AArch64TSB::lookupTSBByName(Operand);
3960 auto DB = AArch64DB::lookupDBByName(Operand);
3962 if (Mnemonic ==
"isb" && (!DB ||
DB->Encoding != AArch64DB::sy)) {
3963 TokError(
"'sy' or #imm operand expected");
3966 }
else if (Mnemonic ==
"tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3967 TokError(
"'csync' operand expected");
3969 }
else if (!DB && !TSB) {
3970 if (Mnemonic ==
"dsb") {
3975 TokError(
"invalid barrier option name");
3979 Operands.push_back(AArch64Operand::CreateBarrier(
3980 DB ?
DB->Encoding : TSB->Encoding, Tok.
getString(), getLoc(),
3981 getContext(),
false ));
3991 assert(Mnemonic ==
"dsb" &&
"Instruction does not accept nXS operands");
3992 if (Mnemonic !=
"dsb")
3998 SMLoc ExprLoc = getLoc();
3999 if (getParser().parseExpression(ImmVal))
4003 Error(ExprLoc,
"immediate value expected for barrier operand");
4010 Error(ExprLoc,
"barrier operand out of range");
4013 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(
Value);
4014 Operands.push_back(AArch64Operand::CreateBarrier(
DB->Encoding,
DB->Name,
4015 ExprLoc, getContext(),
4021 TokError(
"invalid operand for instruction");
4026 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4029 TokError(
"invalid barrier option name");
4034 AArch64Operand::CreateBarrier(
DB->Encoding, Tok.
getString(), getLoc(),
4035 getContext(),
true ));
4048 if (AArch64SVCR::lookupSVCRByName(Tok.
getString()))
4053 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4054 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4055 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4059 unsigned PStateImm = -1;
4060 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.
getString());
4061 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4062 PStateImm = PState15->Encoding;
4064 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.
getString());
4065 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4066 PStateImm = PState1->Encoding;
4070 AArch64Operand::CreateSysReg(Tok.
getString(), getLoc(), MRSReg, MSRReg,
4071 PStateImm, getContext()));
4087 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4095 unsigned ElementWidth = KindRes->second;
4097 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4098 S, getLoc(), getContext()));
4103 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4110 SMLoc SIdx = getLoc();
4113 if (getParser().parseExpression(ImmVal))
4117 TokError(
"immediate value expected for vector index");
4126 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->
getValue(), SIdx,
4139 RegKind MatchKind) {
4148 size_t Start = 0, Next =
Name.find(
'.');
4150 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4156 TokError(
"invalid vector kind qualifier");
4173 const SMLoc S = getLoc();
4176 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4184 unsigned ElementWidth = KindRes->second;
4185 Operands.push_back(AArch64Operand::CreateVectorReg(
4186 RegNum, RK, ElementWidth, S,
4187 getLoc(), getContext()));
4190 if (RK == RegKind::SVEPredicateAsCounter) {
4197 if (parseOperand(
Operands,
false,
false))
4207 if (!
Kind.empty()) {
4208 Error(S,
"not expecting size suffix");
4213 Operands.push_back(AArch64Operand::CreateToken(
"/", getLoc(), getContext()));
4218 auto Pred = getTok().getString().lower();
4219 if (RK == RegKind::SVEPredicateAsCounter && Pred !=
"z") {
4220 Error(getLoc(),
"expecting 'z' predication");
4224 if (RK == RegKind::SVEPredicateVector && Pred !=
"z" && Pred !=
"m") {
4225 Error(getLoc(),
"expecting 'm' or 'z' predication");
4230 const char *ZM = Pred ==
"z" ?
"z" :
"m";
4231 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4240 if (!tryParseNeonVectorRegister(
Operands))
4253bool AArch64AsmParser::parseSymbolicImmVal(
const MCExpr *&ImmVal) {
4254 bool HasELFModifier =
false;
4258 HasELFModifier =
true;
4261 return TokError(
"expect relocation specifier in operand after ':'");
4263 std::string LowerCase = getTok().getIdentifier().lower();
4314 return TokError(
"expect relocation specifier in operand after ':'");
4318 if (parseToken(
AsmToken::Colon,
"expect ':' after relocation specifier"))
4322 if (getParser().parseExpression(ImmVal))
4336 auto ParseMatrixTile = [
this](
unsigned &
Reg,
unsigned &ElementWidth) {
4338 size_t DotPosition =
Name.find(
'.');
4347 const std::optional<std::pair<int, int>> &KindRes =
4350 TokError(
"Expected the register to be followed by element width suffix");
4353 ElementWidth = KindRes->second;
4360 auto LCurly = getTok();
4365 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4366 0, S, getLoc(), getContext()));
4371 if (getTok().getString().equals_insensitive(
"za")) {
4377 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4378 0xFF, S, getLoc(), getContext()));
4382 SMLoc TileLoc = getLoc();
4384 unsigned FirstReg, ElementWidth;
4385 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4387 getLexer().UnLex(LCurly);
4393 unsigned PrevReg = FirstReg;
4396 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4399 SeenRegs.
insert(FirstReg);
4403 unsigned Reg, NextElementWidth;
4404 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4409 if (ElementWidth != NextElementWidth) {
4410 Error(TileLoc,
"mismatched register size suffix");
4415 Warning(TileLoc,
"tile list not in ascending order");
4418 Warning(TileLoc,
"duplicate tile in list");
4421 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4430 unsigned RegMask = 0;
4431 for (
auto Reg : DRegs)
4435 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4440template <RegKind VectorKind>
4450 bool NoMatchIsError) {
4451 auto RegTok = getTok();
4452 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4460 RegTok.getString().equals_insensitive(
"zt0"))
4466 !RegTok.getString().startswith_insensitive(
"za"))) {
4467 Error(Loc,
"vector register expected");
4474 int NumRegs = getNumRegsForRegKind(VectorKind);
4476 auto LCurly = getTok();
4481 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4491 int64_t PrevReg = FirstReg;
4496 SMLoc Loc = getLoc();
4500 ParseRes = ParseVector(Reg, NextKind, getLoc(),
true);
4505 if (Kind != NextKind) {
4506 Error(Loc,
"mismatched register size suffix");
4511 (PrevReg <
Reg) ? (Reg - PrevReg) : (
Reg + NumRegs - PrevReg);
4513 if (Space == 0 || Space > 3) {
4514 Error(Loc,
"invalid number of vectors");
4521 bool HasCalculatedStride =
false;
4523 SMLoc Loc = getLoc();
4526 ParseRes = ParseVector(Reg, NextKind, getLoc(),
true);
4531 if (Kind != NextKind) {
4532 Error(Loc,
"mismatched register size suffix");
4536 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4537 unsigned PrevRegVal =
4538 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4539 if (!HasCalculatedStride) {
4540 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4541 : (RegVal + NumRegs - PrevRegVal);
4542 HasCalculatedStride =
true;
4546 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs)) {
4547 Error(Loc,
"registers must have the same sequential stride");
4560 Error(S,
"invalid number of vectors");
4564 unsigned NumElements = 0;
4565 unsigned ElementWidth = 0;
4566 if (!
Kind.empty()) {
4568 std::tie(NumElements, ElementWidth) = *VK;
4571 Operands.push_back(AArch64Operand::CreateVectorList(
4572 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4573 getLoc(), getContext()));
4580 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(
Operands,
true);
4589 SMLoc StartLoc = getLoc();
4597 Operands.push_back(AArch64Operand::CreateReg(
4598 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4605 Error(getLoc(),
"index must be absent or #0");
4610 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4611 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
4612 Error(getLoc(),
"index must be absent or #0");
4616 Operands.push_back(AArch64Operand::CreateReg(
4617 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4623 SMLoc StartLoc = getLoc();
4627 unsigned RegNum = matchRegisterNameAlias(
Name, RegKind::LookupTable);
4632 Operands.push_back(AArch64Operand::CreateReg(
4633 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4639 if (getParser().parseExpression(ImmVal))
4643 TokError(
"immediate value expected for vector index");
4649 Operands.push_back(AArch64Operand::CreateImm(
4651 getLoc(), getContext()));
4657template <
bool ParseShiftExtend, RegConstra
intEqualityTy EqTy>
4660 SMLoc StartLoc = getLoc();
4669 Operands.push_back(AArch64Operand::CreateReg(
4670 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4679 Res = tryParseOptionalShiftExtend(ExtOpnd);
4683 auto Ext =
static_cast<AArch64Operand*
>(ExtOpnd.
back().get());
4684 Operands.push_back(AArch64Operand::CreateReg(
4685 RegNum, RegKind::Scalar, StartLoc,
Ext->getEndLoc(), getContext(), EqTy,
4686 Ext->getShiftExtendType(),
Ext->getShiftExtendAmount(),
4687 Ext->hasShiftExtendAmount()));
4701 if (!getTok().getString().equals_insensitive(
"mul") ||
4702 !(NextIsVL || NextIsHash))
4706 AArch64Operand::CreateToken(
"mul", getLoc(), getContext()));
4711 AArch64Operand::CreateToken(
"vl", getLoc(), getContext()));
4723 if (
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4724 Operands.push_back(AArch64Operand::CreateImm(
4731 return Error(getLoc(),
"expected 'vl' or '#<imm>'");
4737 auto Tok = Parser.
getTok();
4742 .
Case(
"vgx2",
"vgx2")
4743 .
Case(
"vgx4",
"vgx4")
4755 auto Tok = getTok();
4765 AArch64Operand::CreateToken(Keyword, Tok.
getLoc(), getContext()));
4774 bool invertCondCode) {
4778 MatchOperandParserImpl(
Operands, Mnemonic,
true);
4792 switch (getLexer().getKind()) {
4796 if (parseSymbolicImmVal(Expr))
4797 return Error(S,
"invalid operand");
4800 Operands.push_back(AArch64Operand::CreateImm(Expr, S,
E, getContext()));
4805 AArch64Operand::CreateToken(
"[", getLoc(), getContext()));
4810 return parseOperand(
Operands,
false,
false);
4813 if (!parseNeonVectorList(
Operands))
4817 AArch64Operand::CreateToken(
"{", getLoc(), getContext()));
4822 return parseOperand(
Operands,
false,
false);
4827 if (!parseOptionalVGOperand(
Operands, VecGroup)) {
4829 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4834 return parseCondCode(
Operands, invertCondCode);
4842 if (!parseOptionalMulOperand(
Operands))
4853 if (Mnemonic ==
"brb" || Mnemonic ==
"smstart" || Mnemonic ==
"smstop" ||
4855 return parseKeywordOperand(
Operands);
4861 if (getParser().parseExpression(IdVal))
4864 Operands.push_back(AArch64Operand::CreateImm(IdVal, S,
E, getContext()));
4876 bool isNegative =
false;