72 SVEPredicateAsCounter,
78enum class MatrixKind { Array, Tile, Row, Col };
80enum RegConstraintEqualityTy {
91 StringMap<std::pair<RegKind, MCRegister>> RegisterReqs;
95 static PrefixInfo CreateFromInst(
const MCInst &Inst, uint64_t TSFlags) {
98 case AArch64::MOVPRFX_ZZ:
102 case AArch64::MOVPRFX_ZPmZ_B:
103 case AArch64::MOVPRFX_ZPmZ_H:
104 case AArch64::MOVPRFX_ZPmZ_S:
105 case AArch64::MOVPRFX_ZPmZ_D:
110 "No destructive element size set for movprfx");
114 case AArch64::MOVPRFX_ZPzZ_B:
115 case AArch64::MOVPRFX_ZPzZ_H:
116 case AArch64::MOVPRFX_ZPzZ_S:
117 case AArch64::MOVPRFX_ZPzZ_D:
122 "No destructive element size set for movprfx");
133 PrefixInfo() =
default;
134 bool isActive()
const {
return Active; }
136 unsigned getElementSize()
const {
140 MCRegister getDstReg()
const {
return Dst; }
141 MCRegister getPgReg()
const {
148 bool Predicated =
false;
149 unsigned ElementSize;
154 AArch64TargetStreamer &getTargetStreamer() {
155 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
156 return static_cast<AArch64TargetStreamer &
>(TS);
159 SMLoc getLoc()
const {
return getParser().getTok().getLoc(); }
161 bool parseSysAlias(StringRef Name, SMLoc NameLoc,
OperandVector &Operands);
162 bool parseSyslAlias(StringRef Name, SMLoc NameLoc,
OperandVector &Operands);
163 bool parseSyspAlias(StringRef Name, SMLoc NameLoc,
OperandVector &Operands);
164 void createSysAlias(uint16_t Encoding,
OperandVector &Operands, SMLoc S);
166 std::string &Suggestion);
168 MCRegister matchRegisterNameAlias(StringRef Name, RegKind Kind);
170 bool parseSymbolicImmVal(
const MCExpr *&ImmVal);
173 bool parseOptionalVGOperand(
OperandVector &Operands, StringRef &VecGroup);
176 bool invertCondCode);
177 bool parseImmExpr(int64_t &Out);
179 bool parseRegisterInRange(
unsigned &Out,
unsigned Base,
unsigned First,
182 bool showMatchError(SMLoc Loc,
unsigned ErrCode, uint64_t ErrorInfo,
185 bool parseExprWithSpecifier(
const MCExpr *&Res, SMLoc &
E);
186 bool parseDataExpr(
const MCExpr *&Res)
override;
187 bool parseAuthExpr(
const MCExpr *&Res, SMLoc &EndLoc);
189 bool parseDirectiveArch(SMLoc L);
190 bool parseDirectiveArchExtension(SMLoc L);
191 bool parseDirectiveCPU(SMLoc L);
192 bool parseDirectiveInst(SMLoc L);
194 bool parseDirectiveTLSDescCall(SMLoc L);
196 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
197 bool parseDirectiveLtorg(SMLoc L);
199 bool parseDirectiveReq(StringRef Name, SMLoc L);
200 bool parseDirectiveUnreq(SMLoc L);
201 bool parseDirectiveCFINegateRAState();
202 bool parseDirectiveCFINegateRAStateWithPC();
203 bool parseDirectiveCFIBKeyFrame();
204 bool parseDirectiveCFIMTETaggedFrame();
206 bool parseDirectiveVariantPCS(SMLoc L);
208 bool parseDirectiveSEHAllocStack(SMLoc L);
209 bool parseDirectiveSEHPrologEnd(SMLoc L);
210 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
211 bool parseDirectiveSEHSaveFPLR(SMLoc L);
212 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
213 bool parseDirectiveSEHSaveReg(SMLoc L);
214 bool parseDirectiveSEHSaveRegX(SMLoc L);
215 bool parseDirectiveSEHSaveRegP(SMLoc L);
216 bool parseDirectiveSEHSaveRegPX(SMLoc L);
217 bool parseDirectiveSEHSaveLRPair(SMLoc L);
218 bool parseDirectiveSEHSaveFReg(SMLoc L);
219 bool parseDirectiveSEHSaveFRegX(SMLoc L);
220 bool parseDirectiveSEHSaveFRegP(SMLoc L);
221 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
222 bool parseDirectiveSEHSetFP(SMLoc L);
223 bool parseDirectiveSEHAddFP(SMLoc L);
224 bool parseDirectiveSEHNop(SMLoc L);
225 bool parseDirectiveSEHSaveNext(SMLoc L);
226 bool parseDirectiveSEHEpilogStart(SMLoc L);
227 bool parseDirectiveSEHEpilogEnd(SMLoc L);
228 bool parseDirectiveSEHTrapFrame(SMLoc L);
229 bool parseDirectiveSEHMachineFrame(SMLoc L);
230 bool parseDirectiveSEHContext(SMLoc L);
231 bool parseDirectiveSEHECContext(SMLoc L);
232 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
233 bool parseDirectiveSEHPACSignLR(SMLoc L);
234 bool parseDirectiveSEHSaveAnyReg(SMLoc L,
bool Paired,
bool Writeback);
235 bool parseDirectiveSEHAllocZ(SMLoc L);
236 bool parseDirectiveSEHSaveZReg(SMLoc L);
237 bool parseDirectiveSEHSavePReg(SMLoc L);
238 bool parseDirectiveAeabiSubSectionHeader(SMLoc L);
239 bool parseDirectiveAeabiAArch64Attr(SMLoc L);
241 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
242 SmallVectorImpl<SMLoc> &Loc);
243 unsigned getNumRegsForRegKind(RegKind K);
244 bool matchAndEmitInstruction(SMLoc IDLoc,
unsigned &Opcode,
247 bool MatchingInlineAsm)
override;
251#define GET_ASSEMBLER_HEADER
252#include "AArch64GenAsmMatcher.inc"
266 template <
bool IsSVEPrefetch = false>
275 template <
bool AddFPZeroAsLiteral>
283 template <
bool ParseShiftExtend,
284 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
287 template <
bool ParseShiftExtend,
bool ParseSuffix>
289 template <RegKind RK>
292 tryParseSVEPredicateOrPredicateAsCounterVector(
OperandVector &Operands);
293 template <RegKind VectorKind>
295 bool ExpectMatch =
false);
305 enum AArch64MatchResultTy {
306 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
307#define GET_OPERAND_DIAGNOSTIC_TYPES
308#include "AArch64GenAsmMatcher.inc"
311 bool IsWindowsArm64EC;
313 AArch64AsmParser(
const MCSubtargetInfo &STI, MCAsmParser &Parser,
314 const MCInstrInfo &MII,
const MCTargetOptions &
Options)
315 : MCTargetAsmParser(
Options, STI, MII) {
319 MCStreamer &S = getParser().getStreamer();
321 new AArch64TargetStreamer(S);
333 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
336 bool areEqualRegs(
const MCParsedAsmOperand &Op1,
337 const MCParsedAsmOperand &Op2)
const override;
338 bool parseInstruction(ParseInstructionInfo &Info, StringRef Name,
340 bool parseRegister(MCRegister &
Reg, SMLoc &StartLoc, SMLoc &EndLoc)
override;
341 ParseStatus tryParseRegister(MCRegister &
Reg, SMLoc &StartLoc,
342 SMLoc &EndLoc)
override;
343 bool ParseDirective(AsmToken DirectiveID)
override;
344 unsigned validateTargetOperandClass(MCParsedAsmOperand &
Op,
345 unsigned Kind)
override;
381 SMLoc StartLoc, EndLoc;
390 struct ShiftExtendOp {
393 bool HasExplicitAmount;
403 RegConstraintEqualityTy EqualityTy;
419 ShiftExtendOp ShiftExtend;
424 unsigned ElementWidth;
428 struct MatrixTileListOp {
429 unsigned RegMask = 0;
432 struct VectorListOp {
436 unsigned NumElements;
437 unsigned ElementWidth;
438 RegKind RegisterKind;
441 struct VectorIndexOp {
449 struct ShiftedImmOp {
451 unsigned ShiftAmount;
480 uint32_t PStateField;
508 struct CMHPriorityHintOp {
513 struct TIndexHintOp {
522 unsigned PStateField;
528 struct MatrixRegOp MatrixReg;
529 struct MatrixTileListOp MatrixTileList;
530 struct VectorListOp VectorList;
531 struct VectorIndexOp VectorIndex;
533 struct ShiftedImmOp ShiftedImm;
534 struct ImmRangeOp ImmRange;
536 struct FPImmOp FPImm;
538 struct SysRegOp SysReg;
539 struct SysCRImmOp SysCRImm;
541 struct PSBHintOp PSBHint;
542 struct PHintOp PHint;
543 struct BTIHintOp BTIHint;
544 struct CMHPriorityHintOp CMHPriorityHint;
545 struct TIndexHintOp TIndexHint;
546 struct ShiftExtendOp ShiftExtend;
555 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(
K), Ctx(Ctx) {}
557 AArch64Operand(
const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(
o.Ctx) {
559 StartLoc =
o.StartLoc;
569 ShiftedImm =
o.ShiftedImm;
572 ImmRange =
o.ImmRange;
586 case k_MatrixRegister:
587 MatrixReg =
o.MatrixReg;
589 case k_MatrixTileList:
590 MatrixTileList =
o.MatrixTileList;
593 VectorList =
o.VectorList;
596 VectorIndex =
o.VectorIndex;
602 SysCRImm =
o.SysCRImm;
616 case k_CMHPriorityHint:
617 CMHPriorityHint =
o.CMHPriorityHint;
620 TIndexHint =
o.TIndexHint;
623 ShiftExtend =
o.ShiftExtend;
632 SMLoc getStartLoc()
const override {
return StartLoc; }
634 SMLoc getEndLoc()
const override {
return EndLoc; }
637 assert(Kind == k_Token &&
"Invalid access!");
638 return StringRef(Tok.Data, Tok.Length);
641 bool isTokenSuffix()
const {
642 assert(Kind == k_Token &&
"Invalid access!");
646 const MCExpr *
getImm()
const {
647 assert(Kind == k_Immediate &&
"Invalid access!");
651 const MCExpr *getShiftedImmVal()
const {
652 assert(Kind == k_ShiftedImm &&
"Invalid access!");
653 return ShiftedImm.Val;
656 unsigned getShiftedImmShift()
const {
657 assert(Kind == k_ShiftedImm &&
"Invalid access!");
658 return ShiftedImm.ShiftAmount;
661 unsigned getFirstImmVal()
const {
662 assert(Kind == k_ImmRange &&
"Invalid access!");
663 return ImmRange.First;
666 unsigned getLastImmVal()
const {
667 assert(Kind == k_ImmRange &&
"Invalid access!");
668 return ImmRange.Last;
672 assert(Kind == k_CondCode &&
"Invalid access!");
677 assert (Kind == k_FPImm &&
"Invalid access!");
678 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val,
true));
681 bool getFPImmIsExact()
const {
682 assert (Kind == k_FPImm &&
"Invalid access!");
683 return FPImm.IsExact;
686 unsigned getBarrier()
const {
687 assert(Kind == k_Barrier &&
"Invalid access!");
691 StringRef getBarrierName()
const {
692 assert(Kind == k_Barrier &&
"Invalid access!");
696 bool getBarriernXSModifier()
const {
697 assert(Kind == k_Barrier &&
"Invalid access!");
701 MCRegister
getReg()
const override {
702 assert(Kind == k_Register &&
"Invalid access!");
706 MCRegister getMatrixReg()
const {
707 assert(Kind == k_MatrixRegister &&
"Invalid access!");
708 return MatrixReg.Reg;
711 unsigned getMatrixElementWidth()
const {
712 assert(Kind == k_MatrixRegister &&
"Invalid access!");
713 return MatrixReg.ElementWidth;
716 MatrixKind getMatrixKind()
const {
717 assert(Kind == k_MatrixRegister &&
"Invalid access!");
718 return MatrixReg.Kind;
721 unsigned getMatrixTileListRegMask()
const {
722 assert(isMatrixTileList() &&
"Invalid access!");
723 return MatrixTileList.RegMask;
726 RegConstraintEqualityTy getRegEqualityTy()
const {
727 assert(Kind == k_Register &&
"Invalid access!");
728 return Reg.EqualityTy;
731 MCRegister getVectorListStart()
const {
732 assert(Kind == k_VectorList &&
"Invalid access!");
733 return VectorList.Reg;
736 unsigned getVectorListCount()
const {
737 assert(Kind == k_VectorList &&
"Invalid access!");
738 return VectorList.Count;
741 unsigned getVectorListStride()
const {
742 assert(Kind == k_VectorList &&
"Invalid access!");
743 return VectorList.Stride;
746 int getVectorIndex()
const {
747 assert(Kind == k_VectorIndex &&
"Invalid access!");
748 return VectorIndex.Val;
751 StringRef getSysReg()
const {
752 assert(Kind == k_SysReg &&
"Invalid access!");
753 return StringRef(SysReg.Data, SysReg.Length);
756 unsigned getSysCR()
const {
757 assert(Kind == k_SysCR &&
"Invalid access!");
761 unsigned getPrefetch()
const {
762 assert(Kind == k_Prefetch &&
"Invalid access!");
766 unsigned getPSBHint()
const {
767 assert(Kind == k_PSBHint &&
"Invalid access!");
771 unsigned getPHint()
const {
772 assert(Kind == k_PHint &&
"Invalid access!");
776 StringRef getPSBHintName()
const {
777 assert(Kind == k_PSBHint &&
"Invalid access!");
778 return StringRef(PSBHint.Data, PSBHint.Length);
781 StringRef getPHintName()
const {
782 assert(Kind == k_PHint &&
"Invalid access!");
783 return StringRef(PHint.Data, PHint.Length);
786 unsigned getBTIHint()
const {
787 assert(Kind == k_BTIHint &&
"Invalid access!");
791 StringRef getBTIHintName()
const {
792 assert(Kind == k_BTIHint &&
"Invalid access!");
793 return StringRef(BTIHint.Data, BTIHint.Length);
796 unsigned getCMHPriorityHint()
const {
797 assert(Kind == k_CMHPriorityHint &&
"Invalid access!");
798 return CMHPriorityHint.Val;
801 StringRef getCMHPriorityHintName()
const {
802 assert(Kind == k_CMHPriorityHint &&
"Invalid access!");
803 return StringRef(CMHPriorityHint.Data, CMHPriorityHint.Length);
806 unsigned getTIndexHint()
const {
807 assert(Kind == k_TIndexHint &&
"Invalid access!");
808 return TIndexHint.Val;
811 StringRef getTIndexHintName()
const {
812 assert(Kind == k_TIndexHint &&
"Invalid access!");
813 return StringRef(TIndexHint.Data, TIndexHint.Length);
816 StringRef getSVCR()
const {
817 assert(Kind == k_SVCR &&
"Invalid access!");
818 return StringRef(SVCR.Data, SVCR.Length);
821 StringRef getPrefetchName()
const {
822 assert(Kind == k_Prefetch &&
"Invalid access!");
827 if (Kind == k_ShiftExtend)
828 return ShiftExtend.Type;
829 if (Kind == k_Register)
830 return Reg.ShiftExtend.Type;
834 unsigned getShiftExtendAmount()
const {
835 if (Kind == k_ShiftExtend)
836 return ShiftExtend.Amount;
837 if (Kind == k_Register)
838 return Reg.ShiftExtend.Amount;
842 bool hasShiftExtendAmount()
const {
843 if (Kind == k_ShiftExtend)
844 return ShiftExtend.HasExplicitAmount;
845 if (Kind == k_Register)
846 return Reg.ShiftExtend.HasExplicitAmount;
850 bool isImm()
const override {
return Kind == k_Immediate; }
851 bool isMem()
const override {
return false; }
853 bool isUImm6()
const {
860 return (Val >= 0 && Val < 64);
863 template <
int W
idth>
bool isSImm()
const {
864 return bool(isSImmScaled<Width, 1>());
867 template <
int Bits,
int Scale> DiagnosticPredicate isSImmScaled()
const {
868 return isImmScaled<Bits, Scale>(
true);
871 template <
int Bits,
int Scale,
int Offset = 0,
bool IsRange = false>
872 DiagnosticPredicate isUImmScaled()
const {
873 if (IsRange && isImmRange() &&
874 (getLastImmVal() != getFirstImmVal() +
Offset))
877 return isImmScaled<Bits, Scale, IsRange>(
false);
880 template <
int Bits,
int Scale,
bool IsRange = false>
881 DiagnosticPredicate isImmScaled(
bool Signed)
const {
882 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
883 (isImmRange() && !IsRange))
888 Val = getFirstImmVal();
896 int64_t MinVal, MaxVal;
898 int64_t Shift =
Bits - 1;
899 MinVal = (int64_t(1) << Shift) * -Scale;
900 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
903 MaxVal = ((int64_t(1) <<
Bits) - 1) * Scale;
906 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
912 DiagnosticPredicate isSVEPattern()
const {
919 if (Val >= 0 && Val < 32)
924 DiagnosticPredicate isSVEVecLenSpecifier()
const {
931 if (Val >= 0 && Val <= 1)
936 bool isSymbolicUImm12Offset(
const MCExpr *Expr)
const {
940 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
969 template <
int Scale>
bool isUImm12Offset()
const {
975 return isSymbolicUImm12Offset(
getImm());
978 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
981 template <
int N,
int M>
982 bool isImmInRange()
const {
989 return (Val >=
N && Val <= M);
994 template <
typename T>
995 bool isLogicalImm()
const {
1004 uint64_t
Upper = UINT64_C(-1) << (
sizeof(
T) * 4) << (
sizeof(
T) * 4);
1012 bool isShiftedImm()
const {
return Kind == k_ShiftedImm; }
1014 bool isImmRange()
const {
return Kind == k_ImmRange; }
1019 template <
unsigned W
idth>
1020 std::optional<std::pair<int64_t, unsigned>> getShiftedVal()
const {
1021 if (isShiftedImm() && Width == getShiftedImmShift())
1023 return std::make_pair(
CE->getValue(), Width);
1027 int64_t Val =
CE->getValue();
1028 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
1029 return std::make_pair(Val >> Width, Width);
1031 return std::make_pair(Val, 0u);
1037 bool isAddSubImm()
const {
1038 if (!isShiftedImm() && !isImm())
1044 if (isShiftedImm()) {
1045 unsigned Shift = ShiftedImm.ShiftAmount;
1046 Expr = ShiftedImm.Val;
1047 if (Shift != 0 && Shift != 12)
1056 if (AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
1072 if (
auto ShiftedVal = getShiftedVal<12>())
1073 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1080 bool isAddSubImmNeg()
const {
1081 if (!isShiftedImm() && !isImm())
1085 if (
auto ShiftedVal = getShiftedVal<12>())
1086 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1096 template <
typename T>
1097 DiagnosticPredicate isSVECpyImm()
const {
1101 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1102 std::is_same<int8_t, T>::value;
1103 if (
auto ShiftedImm = getShiftedVal<8>())
1104 if (!(IsByte && ShiftedImm->second) &&
1106 << ShiftedImm->second))
1115 template <
typename T> DiagnosticPredicate isSVEAddSubImm()
const {
1119 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1120 std::is_same<int8_t, T>::value;
1121 if (
auto ShiftedImm = getShiftedVal<8>())
1122 if (!(IsByte && ShiftedImm->second) &&
1124 << ShiftedImm->second))
1130 template <
typename T> DiagnosticPredicate isSVEPreferredLogicalImm()
const {
1131 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1136 bool isCondCode()
const {
return Kind == k_CondCode; }
1138 bool isSIMDImmType10()
const {
1148 bool isBranchTarget()
const {
1157 assert(
N > 0 &&
"Branch target immediate cannot be 0 bits!");
1158 return (Val >= -((1<<(
N-1)) << 2) && Val <= (((1<<(
N-1))-1) << 2));
1168 if (!AArch64AsmParser::classifySymbolRef(
getImm(), ELFSpec, DarwinSpec,
1178 bool isMovWSymbolG3()
const {
1182 bool isMovWSymbolG2()
const {
1189 bool isMovWSymbolG1()
const {
1197 bool isMovWSymbolG0()
const {
1205 template<
int RegW
idth,
int Shift>
1206 bool isMOVZMovAlias()
const {
1207 if (!isImm())
return false;
1211 uint64_t
Value =
CE->getValue();
1220 template<
int RegW
idth,
int Shift>
1221 bool isMOVNMovAlias()
const {
1222 if (!isImm())
return false;
1225 if (!CE)
return false;
1226 uint64_t
Value =
CE->getValue();
1231 bool isFPImm()
const {
1232 return Kind == k_FPImm &&
1236 bool isBarrier()
const {
1237 return Kind == k_Barrier && !getBarriernXSModifier();
1239 bool isBarriernXS()
const {
1240 return Kind == k_Barrier && getBarriernXSModifier();
1242 bool isSysReg()
const {
return Kind == k_SysReg; }
1244 bool isMRSSystemRegister()
const {
1245 if (!isSysReg())
return false;
1247 return SysReg.MRSReg != -1U;
1250 bool isMSRSystemRegister()
const {
1251 if (!isSysReg())
return false;
1252 return SysReg.MSRReg != -1U;
1255 bool isSystemPStateFieldWithImm0_1()
const {
1256 if (!isSysReg())
return false;
1257 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1260 bool isSystemPStateFieldWithImm0_15()
const {
1263 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1266 bool isSVCR()
const {
1269 return SVCR.PStateField != -1U;
1272 bool isReg()
const override {
1273 return Kind == k_Register;
1276 bool isVectorList()
const {
return Kind == k_VectorList; }
1278 bool isScalarReg()
const {
1279 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar;
1282 bool isNeonVectorReg()
const {
1283 return Kind == k_Register &&
Reg.Kind == RegKind::NeonVector;
1286 bool isNeonVectorRegLo()
const {
1287 return Kind == k_Register &&
Reg.Kind == RegKind::NeonVector &&
1288 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1290 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1294 bool isNeonVectorReg0to7()
const {
1295 return Kind == k_Register &&
Reg.Kind == RegKind::NeonVector &&
1296 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1300 bool isMatrix()
const {
return Kind == k_MatrixRegister; }
1301 bool isMatrixTileList()
const {
return Kind == k_MatrixTileList; }
1303 template <
unsigned Class>
bool isSVEPredicateAsCounterReg()
const {
1306 case AArch64::PPRRegClassID:
1307 case AArch64::PPR_3bRegClassID:
1308 case AArch64::PPR_p8to15RegClassID:
1309 case AArch64::PNRRegClassID:
1310 case AArch64::PNR_p8to15RegClassID:
1311 case AArch64::PPRorPNRRegClassID:
1312 RK = RegKind::SVEPredicateAsCounter;
1318 return (Kind == k_Register &&
Reg.Kind == RK) &&
1319 AArch64MCRegisterClasses[
Class].contains(
getReg());
1322 template <
unsigned Class>
bool isSVEVectorReg()
const {
1325 case AArch64::ZPRRegClassID:
1326 case AArch64::ZPR_3bRegClassID:
1327 case AArch64::ZPR_4bRegClassID:
1328 case AArch64::ZPRMul2_LoRegClassID:
1329 case AArch64::ZPRMul2_HiRegClassID:
1330 case AArch64::ZPR_KRegClassID:
1331 RK = RegKind::SVEDataVector;
1333 case AArch64::PPRRegClassID:
1334 case AArch64::PPR_3bRegClassID:
1335 case AArch64::PPR_p8to15RegClassID:
1336 case AArch64::PNRRegClassID:
1337 case AArch64::PNR_p8to15RegClassID:
1338 case AArch64::PPRorPNRRegClassID:
1339 RK = RegKind::SVEPredicateVector;
1345 return (Kind == k_Register &&
Reg.Kind == RK) &&
1346 AArch64MCRegisterClasses[
Class].contains(
getReg());
1349 template <
unsigned Class>
bool isFPRasZPR()
const {
1350 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1351 AArch64MCRegisterClasses[
Class].contains(
getReg());
1354 template <
int ElementW
idth,
unsigned Class>
1355 DiagnosticPredicate isSVEPredicateVectorRegOfWidth()
const {
1356 if (Kind != k_Register ||
Reg.Kind != RegKind::SVEPredicateVector)
1359 if (isSVEVectorReg<Class>() && (
Reg.ElementWidth == ElementWidth))
1365 template <
int ElementW
idth,
unsigned Class>
1366 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth()
const {
1367 if (Kind != k_Register || (
Reg.Kind != RegKind::SVEPredicateAsCounter &&
1368 Reg.Kind != RegKind::SVEPredicateVector))
1371 if ((isSVEPredicateAsCounterReg<Class>() ||
1372 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1373 Reg.ElementWidth == ElementWidth)
1379 template <
int ElementW
idth,
unsigned Class>
1380 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth()
const {
1381 if (Kind != k_Register ||
Reg.Kind != RegKind::SVEPredicateAsCounter)
1384 if (isSVEPredicateAsCounterReg<Class>() && (
Reg.ElementWidth == ElementWidth))
1390 template <
int ElementW
idth,
unsigned Class>
1391 DiagnosticPredicate isSVEDataVectorRegOfWidth()
const {
1392 if (Kind != k_Register ||
Reg.Kind != RegKind::SVEDataVector)
1395 if (isSVEVectorReg<Class>() &&
Reg.ElementWidth == ElementWidth)
1401 template <
int ElementWidth,
unsigned Class,
1403 bool ShiftWidthAlwaysSame>
1404 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend()
const {
1405 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1406 if (!VectorMatch.isMatch())
1412 bool MatchShift = getShiftExtendAmount() ==
Log2_32(ShiftWidth / 8);
1415 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1418 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1424 bool isGPR32as64()
const {
1425 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1426 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(
Reg.Reg);
1429 bool isGPR64as32()
const {
1430 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1431 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(
Reg.Reg);
1434 bool isGPR64x8()
const {
1435 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1436 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1440 bool isWSeqPair()
const {
1441 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1442 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1446 bool isXSeqPair()
const {
1447 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1448 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1452 bool isSyspXzrPair()
const {
1456 template<
int64_t Angle,
int64_t Remainder>
1457 DiagnosticPredicate isComplexRotation()
const {
1464 uint64_t
Value =
CE->getValue();
1466 if (
Value % Angle == Remainder &&
Value <= 270)
1471 template <
unsigned RegClassID>
bool isGPR64()
const {
1472 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1473 AArch64MCRegisterClasses[RegClassID].contains(
getReg());
1476 template <
unsigned RegClassID,
int ExtW
idth>
1477 DiagnosticPredicate isGPR64WithShiftExtend()
const {
1478 if (Kind != k_Register ||
Reg.Kind != RegKind::Scalar)
1482 getShiftExtendAmount() ==
Log2_32(ExtWidth / 8))
1489 template <RegKind VectorKind,
unsigned NumRegs,
bool IsConsecutive = false>
1490 bool isImplicitlyTypedVectorList()
const {
1491 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1492 VectorList.NumElements == 0 &&
1493 VectorList.RegisterKind == VectorKind &&
1494 (!IsConsecutive || (VectorList.Stride == 1));
1497 template <RegKind VectorKind,
unsigned NumRegs,
unsigned NumElements,
1498 unsigned ElementWidth,
unsigned Stride = 1>
1499 bool isTypedVectorList()
const {
1500 if (Kind != k_VectorList)
1502 if (VectorList.Count != NumRegs)
1504 if (VectorList.RegisterKind != VectorKind)
1506 if (VectorList.ElementWidth != ElementWidth)
1508 if (VectorList.Stride != Stride)
1510 return VectorList.NumElements == NumElements;
1513 template <RegKind VectorKind,
unsigned NumRegs,
unsigned NumElements,
1514 unsigned ElementWidth,
unsigned RegClass>
1515 DiagnosticPredicate isTypedVectorListMultiple()
const {
1517 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1520 if (!AArch64MCRegisterClasses[RegClass].
contains(VectorList.Reg))
1525 template <RegKind VectorKind,
unsigned NumRegs,
unsigned Stride,
1526 unsigned ElementWidth>
1527 DiagnosticPredicate isTypedVectorListStrided()
const {
1528 bool Res = isTypedVectorList<VectorKind, NumRegs, 0,
1529 ElementWidth, Stride>();
1532 if ((VectorList.Reg < (AArch64::Z0 + Stride)) ||
1533 ((VectorList.Reg >= AArch64::Z16) &&
1534 (VectorList.Reg < (AArch64::Z16 + Stride))))
1539 template <
int Min,
int Max>
1540 DiagnosticPredicate isVectorIndex()
const {
1541 if (Kind != k_VectorIndex)
1543 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1548 bool isToken()
const override {
return Kind == k_Token; }
1550 bool isTokenEqual(StringRef Str)
const {
1551 return Kind == k_Token &&
getToken() == Str;
1553 bool isSysCR()
const {
return Kind == k_SysCR; }
1554 bool isPrefetch()
const {
return Kind == k_Prefetch; }
1555 bool isPSBHint()
const {
return Kind == k_PSBHint; }
1556 bool isPHint()
const {
return Kind == k_PHint; }
1557 bool isBTIHint()
const {
return Kind == k_BTIHint; }
1558 bool isCMHPriorityHint()
const {
return Kind == k_CMHPriorityHint; }
1559 bool isTIndexHint()
const {
return Kind == k_TIndexHint; }
1560 bool isShiftExtend()
const {
return Kind == k_ShiftExtend; }
1561 bool isShifter()
const {
1562 if (!isShiftExtend())
1571 template <
unsigned ImmEnum> DiagnosticPredicate isExactFPImm()
const {
1572 if (Kind != k_FPImm)
1575 if (getFPImmIsExact()) {
1577 auto *
Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1581 APFloat RealVal(APFloat::IEEEdouble());
1583 RealVal.convertFromString(
Desc->Repr, APFloat::rmTowardZero);
1584 if (
errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1587 if (
getFPImm().bitwiseIsEqual(RealVal))
1594 template <
unsigned ImmA,
unsigned ImmB>
1595 DiagnosticPredicate isExactFPImm()
const {
1597 if ((Res = isExactFPImm<ImmA>()))
1599 if ((Res = isExactFPImm<ImmB>()))
1604 bool isExtend()
const {
1605 if (!isShiftExtend())
1614 getShiftExtendAmount() <= 4;
1617 bool isExtend64()
const {
1627 bool isExtendLSL64()
const {
1633 getShiftExtendAmount() <= 4;
1636 bool isLSLImm3Shift()
const {
1637 if (!isShiftExtend())
1643 template<
int W
idth>
bool isMemXExtend()
const {
1648 (getShiftExtendAmount() ==
Log2_32(Width / 8) ||
1649 getShiftExtendAmount() == 0);
1652 template<
int W
idth>
bool isMemWExtend()
const {
1657 (getShiftExtendAmount() ==
Log2_32(Width / 8) ||
1658 getShiftExtendAmount() == 0);
1661 template <
unsigned w
idth>
1662 bool isArithmeticShifter()
const {
1672 template <
unsigned w
idth>
1673 bool isLogicalShifter()
const {
1681 getShiftExtendAmount() < width;
1684 bool isMovImm32Shifter()
const {
1692 uint64_t Val = getShiftExtendAmount();
1693 return (Val == 0 || Val == 16);
1696 bool isMovImm64Shifter()
const {
1704 uint64_t Val = getShiftExtendAmount();
1705 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1708 bool isLogicalVecShifter()
const {
1713 unsigned Shift = getShiftExtendAmount();
1715 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1718 bool isLogicalVecHalfWordShifter()
const {
1719 if (!isLogicalVecShifter())
1723 unsigned Shift = getShiftExtendAmount();
1725 (Shift == 0 || Shift == 8);
1728 bool isMoveVecShifter()
const {
1729 if (!isShiftExtend())
1733 unsigned Shift = getShiftExtendAmount();
1735 (Shift == 8 || Shift == 16);
1744 bool isSImm9OffsetFB()
const {
1745 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1748 bool isAdrpLabel()
const {
1755 int64_t Val =
CE->getValue();
1756 int64_t Min = - (4096 * (1LL << (21 - 1)));
1757 int64_t
Max = 4096 * ((1LL << (21 - 1)) - 1);
1758 return (Val % 4096) == 0 && Val >= Min && Val <=
Max;
1764 bool isAdrLabel()
const {
1771 int64_t Val =
CE->getValue();
1772 int64_t Min = - (1LL << (21 - 1));
1773 int64_t
Max = ((1LL << (21 - 1)) - 1);
1774 return Val >= Min && Val <=
Max;
1780 template <MatrixKind Kind,
unsigned EltSize,
unsigned RegClass>
1781 DiagnosticPredicate isMatrixRegOperand()
const {
1784 if (getMatrixKind() != Kind ||
1785 !AArch64MCRegisterClasses[RegClass].
contains(getMatrixReg()) ||
1786 EltSize != getMatrixElementWidth())
1791 bool isPAuthPCRelLabel16Operand()
const {
1803 return (Val <= 0) && (Val > -(1 << 18));
1806 void addExpr(MCInst &Inst,
const MCExpr *Expr)
const {
1816 void addRegOperands(MCInst &Inst,
unsigned N)
const {
1817 assert(
N == 1 &&
"Invalid number of operands!");
1821 void addMatrixOperands(MCInst &Inst,
unsigned N)
const {
1822 assert(
N == 1 &&
"Invalid number of operands!");
1826 void addGPR32as64Operands(MCInst &Inst,
unsigned N)
const {
1827 assert(
N == 1 &&
"Invalid number of operands!");
1829 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].
contains(
getReg()));
1831 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1838 void addGPR64as32Operands(MCInst &Inst,
unsigned N)
const {
1839 assert(
N == 1 &&
"Invalid number of operands!");
1841 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].
contains(
getReg()));
1843 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1850 template <
int W
idth>
1851 void addFPRasZPRRegOperands(MCInst &Inst,
unsigned N)
const {
1854 case 8:
Base = AArch64::B0;
break;
1855 case 16:
Base = AArch64::H0;
break;
1856 case 32:
Base = AArch64::S0;
break;
1857 case 64:
Base = AArch64::D0;
break;
1858 case 128:
Base = AArch64::Q0;
break;
1865 void addPPRorPNRRegOperands(MCInst &Inst,
unsigned N)
const {
1866 assert(
N == 1 &&
"Invalid number of operands!");
1869 if (
Reg >= AArch64::PN0 &&
Reg <= AArch64::PN15)
1870 Reg =
Reg - AArch64::PN0 + AArch64::P0;
1874 void addPNRasPPRRegOperands(MCInst &Inst,
unsigned N)
const {
1875 assert(
N == 1 &&
"Invalid number of operands!");
1880 void addVectorReg64Operands(MCInst &Inst,
unsigned N)
const {
1881 assert(
N == 1 &&
"Invalid number of operands!");
1883 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].
contains(
getReg()));
1887 void addVectorReg128Operands(MCInst &Inst,
unsigned N)
const {
1888 assert(
N == 1 &&
"Invalid number of operands!");
1890 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].
contains(
getReg()));
1894 void addVectorRegLoOperands(MCInst &Inst,
unsigned N)
const {
1895 assert(
N == 1 &&
"Invalid number of operands!");
1899 void addVectorReg0to7Operands(MCInst &Inst,
unsigned N)
const {
1900 assert(
N == 1 &&
"Invalid number of operands!");
1904 enum VecListIndexType {
1905 VecListIdx_DReg = 0,
1906 VecListIdx_QReg = 1,
1907 VecListIdx_ZReg = 2,
1908 VecListIdx_PReg = 3,
1911 template <VecListIndexType RegTy,
unsigned NumRegs,
1912 bool IsConsecutive =
false>
1913 void addVectorListOperands(MCInst &Inst,
unsigned N)
const {
1914 assert(
N == 1 &&
"Invalid number of operands!");
1915 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1916 "Expected consecutive registers");
1917 static const unsigned FirstRegs[][5] = {
1919 AArch64::D0, AArch64::D0_D1,
1920 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1922 AArch64::Q0, AArch64::Q0_Q1,
1923 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1925 AArch64::Z0, AArch64::Z0_Z1,
1926 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1928 AArch64::P0, AArch64::P0_P1 }
1931 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1932 " NumRegs must be <= 4 for ZRegs");
1934 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1935 " NumRegs must be <= 2 for PRegs");
1937 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1939 FirstRegs[(
unsigned)RegTy][0]));
1942 template <
unsigned NumRegs>
1943 void addStridedVectorListOperands(MCInst &Inst,
unsigned N)
const {
1944 assert(
N == 1 &&
"Invalid number of operands!");
1945 assert((NumRegs == 2 || NumRegs == 4) &&
" NumRegs must be 2 or 4");
1949 if (getVectorListStart() < AArch64::Z16) {
1950 assert((getVectorListStart() < AArch64::Z8) &&
1951 (getVectorListStart() >= AArch64::Z0) &&
"Invalid Register");
1953 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1955 assert((getVectorListStart() < AArch64::Z24) &&
1956 (getVectorListStart() >= AArch64::Z16) &&
"Invalid Register");
1958 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1962 if (getVectorListStart() < AArch64::Z16) {
1963 assert((getVectorListStart() < AArch64::Z4) &&
1964 (getVectorListStart() >= AArch64::Z0) &&
"Invalid Register");
1966 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1968 assert((getVectorListStart() < AArch64::Z20) &&
1969 (getVectorListStart() >= AArch64::Z16) &&
"Invalid Register");
1971 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1979 void addMatrixTileListOperands(MCInst &Inst,
unsigned N)
const {
1980 assert(
N == 1 &&
"Invalid number of operands!");
1981 unsigned RegMask = getMatrixTileListRegMask();
1982 assert(RegMask <= 0xFF &&
"Invalid mask!");
1986 void addVectorIndexOperands(MCInst &Inst,
unsigned N)
const {
1987 assert(
N == 1 &&
"Invalid number of operands!");
1991 template <
unsigned ImmIs0,
unsigned ImmIs1>
1992 void addExactFPImmOperands(MCInst &Inst,
unsigned N)
const {
1993 assert(
N == 1 &&
"Invalid number of operands!");
1994 assert(
bool(isExactFPImm<ImmIs0, ImmIs1>()) &&
"Invalid operand");
1998 void addImmOperands(MCInst &Inst,
unsigned N)
const {
1999 assert(
N == 1 &&
"Invalid number of operands!");
2006 template <
int Shift>
2007 void addImmWithOptionalShiftOperands(MCInst &Inst,
unsigned N)
const {
2008 assert(
N == 2 &&
"Invalid number of operands!");
2009 if (
auto ShiftedVal = getShiftedVal<Shift>()) {
2012 }
else if (isShiftedImm()) {
2013 addExpr(Inst, getShiftedImmVal());
2021 template <
int Shift>
2022 void addImmNegWithOptionalShiftOperands(MCInst &Inst,
unsigned N)
const {
2023 assert(
N == 2 &&
"Invalid number of operands!");
2024 if (
auto ShiftedVal = getShiftedVal<Shift>()) {
2031 void addCondCodeOperands(MCInst &Inst,
unsigned N)
const {
2032 assert(
N == 1 &&
"Invalid number of operands!");
2036 void addAdrpLabelOperands(MCInst &Inst,
unsigned N)
const {
2037 assert(
N == 1 &&
"Invalid number of operands!");
2045 void addAdrLabelOperands(MCInst &Inst,
unsigned N)
const {
2046 addImmOperands(Inst,
N);
2050 void addUImm12OffsetOperands(MCInst &Inst,
unsigned N)
const {
2051 assert(
N == 1 &&
"Invalid number of operands!");
2061 void addUImm6Operands(MCInst &Inst,
unsigned N)
const {
2062 assert(
N == 1 &&
"Invalid number of operands!");
2067 template <
int Scale>
2068 void addImmScaledOperands(MCInst &Inst,
unsigned N)
const {
2069 assert(
N == 1 &&
"Invalid number of operands!");
2074 template <
int Scale>
2075 void addImmScaledRangeOperands(MCInst &Inst,
unsigned N)
const {
2076 assert(
N == 1 &&
"Invalid number of operands!");
2080 template <
typename T>
2081 void addLogicalImmOperands(MCInst &Inst,
unsigned N)
const {
2082 assert(
N == 1 &&
"Invalid number of operands!");
2084 std::make_unsigned_t<T> Val = MCE->
getValue();
2089 template <
typename T>
2090 void addLogicalImmNotOperands(MCInst &Inst,
unsigned N)
const {
2091 assert(
N == 1 &&
"Invalid number of operands!");
2093 std::make_unsigned_t<T> Val = ~MCE->getValue();
2098 void addSIMDImmType10Operands(MCInst &Inst,
unsigned N)
const {
2099 assert(
N == 1 &&
"Invalid number of operands!");
2105 void addBranchTarget26Operands(MCInst &Inst,
unsigned N)
const {
2109 assert(
N == 1 &&
"Invalid number of operands!");
2115 assert(MCE &&
"Invalid constant immediate operand!");
2119 void addPAuthPCRelLabel16Operands(MCInst &Inst,
unsigned N)
const {
2123 assert(
N == 1 &&
"Invalid number of operands!");
2132 void addPCRelLabel19Operands(MCInst &Inst,
unsigned N)
const {
2136 assert(
N == 1 &&
"Invalid number of operands!");
2142 assert(MCE &&
"Invalid constant immediate operand!");
2146 void addPCRelLabel9Operands(MCInst &Inst,
unsigned N)
const {
2150 assert(
N == 1 &&
"Invalid number of operands!");
2156 assert(MCE &&
"Invalid constant immediate operand!");
2160 void addBranchTarget14Operands(MCInst &Inst,
unsigned N)
const {
2164 assert(
N == 1 &&
"Invalid number of operands!");
2170 assert(MCE &&
"Invalid constant immediate operand!");
2174 void addFPImmOperands(MCInst &Inst,
unsigned N)
const {
2175 assert(
N == 1 &&
"Invalid number of operands!");
2180 void addBarrierOperands(MCInst &Inst,
unsigned N)
const {
2181 assert(
N == 1 &&
"Invalid number of operands!");
2185 void addBarriernXSOperands(MCInst &Inst,
unsigned N)
const {
2186 assert(
N == 1 &&
"Invalid number of operands!");
2190 void addMRSSystemRegisterOperands(MCInst &Inst,
unsigned N)
const {
2191 assert(
N == 1 &&
"Invalid number of operands!");
2196 void addMSRSystemRegisterOperands(MCInst &Inst,
unsigned N)
const {
2197 assert(
N == 1 &&
"Invalid number of operands!");
2202 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst,
unsigned N)
const {
2203 assert(
N == 1 &&
"Invalid number of operands!");
2208 void addSVCROperands(MCInst &Inst,
unsigned N)
const {
2209 assert(
N == 1 &&
"Invalid number of operands!");
2214 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst,
unsigned N)
const {
2215 assert(
N == 1 &&
"Invalid number of operands!");
2220 void addSysCROperands(MCInst &Inst,
unsigned N)
const {
2221 assert(
N == 1 &&
"Invalid number of operands!");
2225 void addPrefetchOperands(MCInst &Inst,
unsigned N)
const {
2226 assert(
N == 1 &&
"Invalid number of operands!");
2230 void addPSBHintOperands(MCInst &Inst,
unsigned N)
const {
2231 assert(
N == 1 &&
"Invalid number of operands!");
2235 void addPHintOperands(MCInst &Inst,
unsigned N)
const {
2236 assert(
N == 1 &&
"Invalid number of operands!");
2240 void addBTIHintOperands(MCInst &Inst,
unsigned N)
const {
2241 assert(
N == 1 &&
"Invalid number of operands!");
2245 void addCMHPriorityHintOperands(MCInst &Inst,
unsigned N)
const {
2246 assert(
N == 1 &&
"Invalid number of operands!");
2250 void addTIndexHintOperands(MCInst &Inst,
unsigned N)
const {
2251 assert(
N == 1 &&
"Invalid number of operands!");
2255 void addShifterOperands(MCInst &Inst,
unsigned N)
const {
2256 assert(
N == 1 &&
"Invalid number of operands!");
2262 void addLSLImm3ShifterOperands(MCInst &Inst,
unsigned N)
const {
2263 assert(
N == 1 &&
"Invalid number of operands!");
2264 unsigned Imm = getShiftExtendAmount();
2268 void addSyspXzrPairOperand(MCInst &Inst,
unsigned N)
const {
2269 assert(
N == 1 &&
"Invalid number of operands!");
2274 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2277 if (
Reg != AArch64::XZR)
2283 void addExtendOperands(MCInst &Inst,
unsigned N)
const {
2284 assert(
N == 1 &&
"Invalid number of operands!");
2291 void addExtend64Operands(MCInst &Inst,
unsigned N)
const {
2292 assert(
N == 1 &&
"Invalid number of operands!");
2299 void addMemExtendOperands(MCInst &Inst,
unsigned N)
const {
2300 assert(
N == 2 &&
"Invalid number of operands!");
2311 void addMemExtend8Operands(MCInst &Inst,
unsigned N)
const {
2312 assert(
N == 2 &&
"Invalid number of operands!");
2320 void addMOVZMovAliasOperands(MCInst &Inst,
unsigned N)
const {
2321 assert(
N == 1 &&
"Invalid number of operands!");
2325 uint64_t
Value =
CE->getValue();
2333 void addMOVNMovAliasOperands(MCInst &Inst,
unsigned N)
const {
2334 assert(
N == 1 &&
"Invalid number of operands!");
2337 uint64_t
Value =
CE->getValue();
2341 void addComplexRotationEvenOperands(MCInst &Inst,
unsigned N)
const {
2342 assert(
N == 1 &&
"Invalid number of operands!");
2347 void addComplexRotationOddOperands(MCInst &Inst,
unsigned N)
const {
2348 assert(
N == 1 &&
"Invalid number of operands!");
2353 void print(raw_ostream &OS,
const MCAsmInfo &MAI)
const override;
2355 static std::unique_ptr<AArch64Operand>
2356 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx,
bool IsSuffix =
false) {
2357 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2358 Op->Tok.Data = Str.data();
2359 Op->Tok.Length = Str.size();
2360 Op->Tok.IsSuffix = IsSuffix;
2366 static std::unique_ptr<AArch64Operand>
2367 CreateReg(MCRegister
Reg, RegKind Kind, SMLoc S, SMLoc
E, MCContext &Ctx,
2368 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2370 unsigned ShiftAmount = 0,
unsigned HasExplicitAmount =
false) {
2371 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2373 Op->Reg.Kind = Kind;
2374 Op->Reg.ElementWidth = 0;
2375 Op->Reg.EqualityTy = EqTy;
2376 Op->Reg.ShiftExtend.Type = ExtTy;
2377 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2378 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2384 static std::unique_ptr<AArch64Operand> CreateVectorReg(
2385 MCRegister
Reg, RegKind Kind,
unsigned ElementWidth, SMLoc S, SMLoc
E,
2387 unsigned ShiftAmount = 0,
unsigned HasExplicitAmount =
false) {
2388 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2389 Kind == RegKind::SVEPredicateVector ||
2390 Kind == RegKind::SVEPredicateAsCounter) &&
2391 "Invalid vector kind");
2392 auto Op = CreateReg(
Reg, Kind, S,
E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2394 Op->Reg.ElementWidth = ElementWidth;
2398 static std::unique_ptr<AArch64Operand>
2399 CreateVectorList(MCRegister
Reg,
unsigned Count,
unsigned Stride,
2400 unsigned NumElements,
unsigned ElementWidth,
2401 RegKind RegisterKind, SMLoc S, SMLoc
E, MCContext &Ctx) {
2402 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2403 Op->VectorList.Reg =
Reg;
2405 Op->VectorList.Stride = Stride;
2406 Op->VectorList.NumElements = NumElements;
2407 Op->VectorList.ElementWidth = ElementWidth;
2408 Op->VectorList.RegisterKind = RegisterKind;
2414 static std::unique_ptr<AArch64Operand>
2415 CreateVectorIndex(
int Idx, SMLoc S, SMLoc
E, MCContext &Ctx) {
2416 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2417 Op->VectorIndex.Val = Idx;
2423 static std::unique_ptr<AArch64Operand>
2424 CreateMatrixTileList(
unsigned RegMask, SMLoc S, SMLoc
E, MCContext &Ctx) {
2425 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2426 Op->MatrixTileList.RegMask = RegMask;
2432 static void ComputeRegsForAlias(
unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2433 const unsigned ElementWidth) {
2434 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2436 {{0, AArch64::ZAB0},
2437 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2438 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2439 {{8, AArch64::ZAB0},
2440 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2441 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2442 {{16, AArch64::ZAH0},
2443 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2444 {{16, AArch64::ZAH1},
2445 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2446 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2447 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2448 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2449 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2452 if (ElementWidth == 64)
2455 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth,
Reg)];
2456 assert(!Regs.empty() &&
"Invalid tile or element width!");
2461 static std::unique_ptr<AArch64Operand> CreateImm(
const MCExpr *Val, SMLoc S,
2462 SMLoc
E, MCContext &Ctx) {
2463 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2470 static std::unique_ptr<AArch64Operand> CreateShiftedImm(
const MCExpr *Val,
2471 unsigned ShiftAmount,
2474 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2475 Op->ShiftedImm .Val = Val;
2476 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2482 static std::unique_ptr<AArch64Operand> CreateImmRange(
unsigned First,
2483 unsigned Last, SMLoc S,
2486 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2488 Op->ImmRange.Last =
Last;
2493 static std::unique_ptr<AArch64Operand>
2495 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2496 Op->CondCode.Code =
Code;
2502 static std::unique_ptr<AArch64Operand>
2503 CreateFPImm(APFloat Val,
bool IsExact, SMLoc S, MCContext &Ctx) {
2504 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2506 Op->FPImm.IsExact = IsExact;
2512 static std::unique_ptr<AArch64Operand> CreateBarrier(
unsigned Val,
2516 bool HasnXSModifier) {
2517 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2518 Op->Barrier.Val = Val;
2519 Op->Barrier.Data = Str.data();
2520 Op->Barrier.Length = Str.size();
2521 Op->Barrier.HasnXSModifier = HasnXSModifier;
2527 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2530 uint32_t PStateField,
2532 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2533 Op->SysReg.Data = Str.data();
2534 Op->SysReg.Length = Str.size();
2535 Op->SysReg.MRSReg = MRSReg;
2536 Op->SysReg.MSRReg = MSRReg;
2537 Op->SysReg.PStateField = PStateField;
2543 static std::unique_ptr<AArch64Operand>
2544 CreatePHintInst(
unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2545 auto Op = std::make_unique<AArch64Operand>(k_PHint, Ctx);
2546 Op->PHint.Val = Val;
2547 Op->PHint.Data = Str.data();
2548 Op->PHint.Length = Str.size();
2554 static std::unique_ptr<AArch64Operand> CreateSysCR(
unsigned Val, SMLoc S,
2555 SMLoc
E, MCContext &Ctx) {
2556 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2557 Op->SysCRImm.Val = Val;
2563 static std::unique_ptr<AArch64Operand> CreatePrefetch(
unsigned Val,
2567 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2568 Op->Prefetch.Val = Val;
2569 Op->Barrier.Data = Str.data();
2570 Op->Barrier.Length = Str.size();
2576 static std::unique_ptr<AArch64Operand> CreatePSBHint(
unsigned Val,
2580 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2581 Op->PSBHint.Val = Val;
2582 Op->PSBHint.Data = Str.data();
2583 Op->PSBHint.Length = Str.size();
2589 static std::unique_ptr<AArch64Operand> CreateBTIHint(
unsigned Val,
2593 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2594 Op->BTIHint.Val = Val | 32;
2595 Op->BTIHint.Data = Str.data();
2596 Op->BTIHint.Length = Str.size();
2602 static std::unique_ptr<AArch64Operand>
2603 CreateCMHPriorityHint(
unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2604 auto Op = std::make_unique<AArch64Operand>(k_CMHPriorityHint, Ctx);
2605 Op->CMHPriorityHint.Val = Val;
2606 Op->CMHPriorityHint.Data = Str.data();
2607 Op->CMHPriorityHint.Length = Str.size();
2613 static std::unique_ptr<AArch64Operand>
2614 CreateTIndexHint(
unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2615 auto Op = std::make_unique<AArch64Operand>(k_TIndexHint, Ctx);
2616 Op->TIndexHint.Val = Val;
2617 Op->TIndexHint.Data = Str.data();
2618 Op->TIndexHint.Length = Str.size();
2624 static std::unique_ptr<AArch64Operand>
2625 CreateMatrixRegister(MCRegister
Reg,
unsigned ElementWidth, MatrixKind Kind,
2626 SMLoc S, SMLoc
E, MCContext &Ctx) {
2627 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2628 Op->MatrixReg.Reg =
Reg;
2629 Op->MatrixReg.ElementWidth = ElementWidth;
2630 Op->MatrixReg.Kind = Kind;
2636 static std::unique_ptr<AArch64Operand>
2637 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2638 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2639 Op->SVCR.PStateField = PStateField;
2640 Op->SVCR.Data = Str.data();
2641 Op->SVCR.Length = Str.size();
2647 static std::unique_ptr<AArch64Operand>
2649 bool HasExplicitAmount, SMLoc S, SMLoc
E, MCContext &Ctx) {
2650 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2651 Op->ShiftExtend.Type = ShOp;
2652 Op->ShiftExtend.Amount = Val;
2653 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2665 OS <<
"<fpimm " <<
getFPImm().bitcastToAPInt().getZExtValue();
2666 if (!getFPImmIsExact())
2671 StringRef
Name = getBarrierName();
2673 OS <<
"<barrier " <<
Name <<
">";
2675 OS <<
"<barrier invalid #" << getBarrier() <<
">";
2681 case k_ShiftedImm: {
2682 unsigned Shift = getShiftedImmShift();
2683 OS <<
"<shiftedimm ";
2690 OS << getFirstImmVal();
2691 OS <<
":" << getLastImmVal() <<
">";
2697 case k_VectorList: {
2698 OS <<
"<vectorlist ";
2699 MCRegister
Reg = getVectorListStart();
2700 for (
unsigned i = 0, e = getVectorListCount(); i !=
e; ++i)
2701 OS <<
Reg.
id() + i * getVectorListStride() <<
" ";
2706 OS <<
"<vectorindex " << getVectorIndex() <<
">";
2709 OS <<
"<sysreg: " << getSysReg() <<
'>';
2715 OS <<
"c" << getSysCR();
2718 StringRef
Name = getPrefetchName();
2720 OS <<
"<prfop " <<
Name <<
">";
2722 OS <<
"<prfop invalid #" << getPrefetch() <<
">";
2726 OS << getPSBHintName();
2729 OS << getPHintName();
2732 OS << getBTIHintName();
2734 case k_CMHPriorityHint:
2735 OS << getCMHPriorityHintName();
2738 OS << getTIndexHintName();
2740 case k_MatrixRegister:
2741 OS <<
"<matrix " << getMatrixReg().id() <<
">";
2743 case k_MatrixTileList: {
2744 OS <<
"<matrixlist ";
2745 unsigned RegMask = getMatrixTileListRegMask();
2746 unsigned MaxBits = 8;
2747 for (
unsigned I = MaxBits;
I > 0; --
I)
2748 OS << ((RegMask & (1 << (
I - 1))) >> (
I - 1));
2757 OS <<
"<register " <<
getReg().
id() <<
">";
2758 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2763 << getShiftExtendAmount();
2764 if (!hasShiftExtendAmount())
2780 .
Case(
"v0", AArch64::Q0)
2781 .
Case(
"v1", AArch64::Q1)
2782 .
Case(
"v2", AArch64::Q2)
2783 .
Case(
"v3", AArch64::Q3)
2784 .
Case(
"v4", AArch64::Q4)
2785 .
Case(
"v5", AArch64::Q5)
2786 .
Case(
"v6", AArch64::Q6)
2787 .
Case(
"v7", AArch64::Q7)
2788 .
Case(
"v8", AArch64::Q8)
2789 .
Case(
"v9", AArch64::Q9)
2790 .
Case(
"v10", AArch64::Q10)
2791 .
Case(
"v11", AArch64::Q11)
2792 .
Case(
"v12", AArch64::Q12)
2793 .
Case(
"v13", AArch64::Q13)
2794 .
Case(
"v14", AArch64::Q14)
2795 .
Case(
"v15", AArch64::Q15)
2796 .
Case(
"v16", AArch64::Q16)
2797 .
Case(
"v17", AArch64::Q17)
2798 .
Case(
"v18", AArch64::Q18)
2799 .
Case(
"v19", AArch64::Q19)
2800 .
Case(
"v20", AArch64::Q20)
2801 .
Case(
"v21", AArch64::Q21)
2802 .
Case(
"v22", AArch64::Q22)
2803 .
Case(
"v23", AArch64::Q23)
2804 .
Case(
"v24", AArch64::Q24)
2805 .
Case(
"v25", AArch64::Q25)
2806 .
Case(
"v26", AArch64::Q26)
2807 .
Case(
"v27", AArch64::Q27)
2808 .
Case(
"v28", AArch64::Q28)
2809 .
Case(
"v29", AArch64::Q29)
2810 .
Case(
"v30", AArch64::Q30)
2811 .
Case(
"v31", AArch64::Q31)
2820 RegKind VectorKind) {
2821 std::pair<int, int> Res = {-1, -1};
2823 switch (VectorKind) {
2824 case RegKind::NeonVector:
2827 .Case(
".1d", {1, 64})
2828 .Case(
".1q", {1, 128})
2830 .Case(
".2h", {2, 16})
2831 .Case(
".2b", {2, 8})
2832 .Case(
".2s", {2, 32})
2833 .Case(
".2d", {2, 64})
2836 .Case(
".4b", {4, 8})
2837 .Case(
".4h", {4, 16})
2838 .Case(
".4s", {4, 32})
2839 .Case(
".8b", {8, 8})
2840 .Case(
".8h", {8, 16})
2841 .Case(
".16b", {16, 8})
2846 .Case(
".h", {0, 16})
2847 .Case(
".s", {0, 32})
2848 .Case(
".d", {0, 64})
2851 case RegKind::SVEPredicateAsCounter:
2852 case RegKind::SVEPredicateVector:
2853 case RegKind::SVEDataVector:
2854 case RegKind::Matrix:
2858 .Case(
".h", {0, 16})
2859 .Case(
".s", {0, 32})
2860 .Case(
".d", {0, 64})
2861 .Case(
".q", {0, 128})
2868 if (Res == std::make_pair(-1, -1))
2869 return std::nullopt;
2871 return std::optional<std::pair<int, int>>(Res);
2880 .
Case(
"z0", AArch64::Z0)
2881 .
Case(
"z1", AArch64::Z1)
2882 .
Case(
"z2", AArch64::Z2)
2883 .
Case(
"z3", AArch64::Z3)
2884 .
Case(
"z4", AArch64::Z4)
2885 .
Case(
"z5", AArch64::Z5)
2886 .
Case(
"z6", AArch64::Z6)
2887 .
Case(
"z7", AArch64::Z7)
2888 .
Case(
"z8", AArch64::Z8)
2889 .
Case(
"z9", AArch64::Z9)
2890 .
Case(
"z10", AArch64::Z10)
2891 .
Case(
"z11", AArch64::Z11)
2892 .
Case(
"z12", AArch64::Z12)
2893 .
Case(
"z13", AArch64::Z13)
2894 .
Case(
"z14", AArch64::Z14)
2895 .
Case(
"z15", AArch64::Z15)
2896 .
Case(
"z16", AArch64::Z16)
2897 .
Case(
"z17", AArch64::Z17)
2898 .
Case(
"z18", AArch64::Z18)
2899 .
Case(
"z19", AArch64::Z19)
2900 .
Case(
"z20", AArch64::Z20)
2901 .
Case(
"z21", AArch64::Z21)
2902 .
Case(
"z22", AArch64::Z22)
2903 .
Case(
"z23", AArch64::Z23)
2904 .
Case(
"z24", AArch64::Z24)
2905 .
Case(
"z25", AArch64::Z25)
2906 .
Case(
"z26", AArch64::Z26)
2907 .
Case(
"z27", AArch64::Z27)
2908 .
Case(
"z28", AArch64::Z28)
2909 .
Case(
"z29", AArch64::Z29)
2910 .
Case(
"z30", AArch64::Z30)
2911 .
Case(
"z31", AArch64::Z31)
2917 .
Case(
"p0", AArch64::P0)
2918 .
Case(
"p1", AArch64::P1)
2919 .
Case(
"p2", AArch64::P2)
2920 .
Case(
"p3", AArch64::P3)
2921 .
Case(
"p4", AArch64::P4)
2922 .
Case(
"p5", AArch64::P5)
2923 .
Case(
"p6", AArch64::P6)
2924 .
Case(
"p7", AArch64::P7)
2925 .
Case(
"p8", AArch64::P8)
2926 .
Case(
"p9", AArch64::P9)
2927 .
Case(
"p10", AArch64::P10)
2928 .
Case(
"p11", AArch64::P11)
2929 .
Case(
"p12", AArch64::P12)
2930 .
Case(
"p13", AArch64::P13)
2931 .
Case(
"p14", AArch64::P14)
2932 .
Case(
"p15", AArch64::P15)
2938 .
Case(
"pn0", AArch64::PN0)
2939 .
Case(
"pn1", AArch64::PN1)
2940 .
Case(
"pn2", AArch64::PN2)
2941 .
Case(
"pn3", AArch64::PN3)
2942 .
Case(
"pn4", AArch64::PN4)
2943 .
Case(
"pn5", AArch64::PN5)
2944 .
Case(
"pn6", AArch64::PN6)
2945 .
Case(
"pn7", AArch64::PN7)
2946 .
Case(
"pn8", AArch64::PN8)
2947 .
Case(
"pn9", AArch64::PN9)
2948 .
Case(
"pn10", AArch64::PN10)
2949 .
Case(
"pn11", AArch64::PN11)
2950 .
Case(
"pn12", AArch64::PN12)
2951 .
Case(
"pn13", AArch64::PN13)
2952 .
Case(
"pn14", AArch64::PN14)
2953 .
Case(
"pn15", AArch64::PN15)
2959 .
Case(
"za0.d", AArch64::ZAD0)
2960 .
Case(
"za1.d", AArch64::ZAD1)
2961 .
Case(
"za2.d", AArch64::ZAD2)
2962 .
Case(
"za3.d", AArch64::ZAD3)
2963 .
Case(
"za4.d", AArch64::ZAD4)
2964 .
Case(
"za5.d", AArch64::ZAD5)
2965 .
Case(
"za6.d", AArch64::ZAD6)
2966 .
Case(
"za7.d", AArch64::ZAD7)
2967 .
Case(
"za0.s", AArch64::ZAS0)
2968 .
Case(
"za1.s", AArch64::ZAS1)
2969 .
Case(
"za2.s", AArch64::ZAS2)
2970 .
Case(
"za3.s", AArch64::ZAS3)
2971 .
Case(
"za0.h", AArch64::ZAH0)
2972 .
Case(
"za1.h", AArch64::ZAH1)
2973 .
Case(
"za0.b", AArch64::ZAB0)
2979 .
Case(
"za", AArch64::ZA)
2980 .
Case(
"za0.q", AArch64::ZAQ0)
2981 .
Case(
"za1.q", AArch64::ZAQ1)
2982 .
Case(
"za2.q", AArch64::ZAQ2)
2983 .
Case(
"za3.q", AArch64::ZAQ3)
2984 .
Case(
"za4.q", AArch64::ZAQ4)
2985 .
Case(
"za5.q", AArch64::ZAQ5)
2986 .
Case(
"za6.q", AArch64::ZAQ6)
2987 .
Case(
"za7.q", AArch64::ZAQ7)
2988 .
Case(
"za8.q", AArch64::ZAQ8)
2989 .
Case(
"za9.q", AArch64::ZAQ9)
2990 .
Case(
"za10.q", AArch64::ZAQ10)
2991 .
Case(
"za11.q", AArch64::ZAQ11)
2992 .
Case(
"za12.q", AArch64::ZAQ12)
2993 .
Case(
"za13.q", AArch64::ZAQ13)
2994 .
Case(
"za14.q", AArch64::ZAQ14)
2995 .
Case(
"za15.q", AArch64::ZAQ15)
2996 .
Case(
"za0.d", AArch64::ZAD0)
2997 .
Case(
"za1.d", AArch64::ZAD1)
2998 .
Case(
"za2.d", AArch64::ZAD2)
2999 .
Case(
"za3.d", AArch64::ZAD3)
3000 .
Case(
"za4.d", AArch64::ZAD4)
3001 .
Case(
"za5.d", AArch64::ZAD5)
3002 .
Case(
"za6.d", AArch64::ZAD6)
3003 .
Case(
"za7.d", AArch64::ZAD7)
3004 .
Case(
"za0.s", AArch64::ZAS0)
3005 .
Case(
"za1.s", AArch64::ZAS1)
3006 .
Case(
"za2.s", AArch64::ZAS2)
3007 .
Case(
"za3.s", AArch64::ZAS3)
3008 .
Case(
"za0.h", AArch64::ZAH0)
3009 .
Case(
"za1.h", AArch64::ZAH1)
3010 .
Case(
"za0.b", AArch64::ZAB0)
3011 .
Case(
"za0h.q", AArch64::ZAQ0)
3012 .
Case(
"za1h.q", AArch64::ZAQ1)
3013 .
Case(
"za2h.q", AArch64::ZAQ2)
3014 .
Case(
"za3h.q", AArch64::ZAQ3)
3015 .
Case(
"za4h.q", AArch64::ZAQ4)
3016 .
Case(
"za5h.q", AArch64::ZAQ5)
3017 .
Case(
"za6h.q", AArch64::ZAQ6)
3018 .
Case(
"za7h.q", AArch64::ZAQ7)
3019 .
Case(
"za8h.q", AArch64::ZAQ8)
3020 .
Case(
"za9h.q", AArch64::ZAQ9)
3021 .
Case(
"za10h.q", AArch64::ZAQ10)
3022 .
Case(
"za11h.q", AArch64::ZAQ11)
3023 .
Case(
"za12h.q", AArch64::ZAQ12)
3024 .
Case(
"za13h.q", AArch64::ZAQ13)
3025 .
Case(
"za14h.q", AArch64::ZAQ14)
3026 .
Case(
"za15h.q", AArch64::ZAQ15)
3027 .
Case(
"za0h.d", AArch64::ZAD0)
3028 .
Case(
"za1h.d", AArch64::ZAD1)
3029 .
Case(
"za2h.d", AArch64::ZAD2)
3030 .
Case(
"za3h.d", AArch64::ZAD3)
3031 .
Case(
"za4h.d", AArch64::ZAD4)
3032 .
Case(
"za5h.d", AArch64::ZAD5)
3033 .
Case(
"za6h.d", AArch64::ZAD6)
3034 .
Case(
"za7h.d", AArch64::ZAD7)
3035 .
Case(
"za0h.s", AArch64::ZAS0)
3036 .
Case(
"za1h.s", AArch64::ZAS1)
3037 .
Case(
"za2h.s", AArch64::ZAS2)
3038 .
Case(
"za3h.s", AArch64::ZAS3)
3039 .
Case(
"za0h.h", AArch64::ZAH0)
3040 .
Case(
"za1h.h", AArch64::ZAH1)
3041 .
Case(
"za0h.b", AArch64::ZAB0)
3042 .
Case(
"za0v.q", AArch64::ZAQ0)
3043 .
Case(
"za1v.q", AArch64::ZAQ1)
3044 .
Case(
"za2v.q", AArch64::ZAQ2)
3045 .
Case(
"za3v.q", AArch64::ZAQ3)
3046 .
Case(
"za4v.q", AArch64::ZAQ4)
3047 .
Case(
"za5v.q", AArch64::ZAQ5)
3048 .
Case(
"za6v.q", AArch64::ZAQ6)
3049 .
Case(
"za7v.q", AArch64::ZAQ7)
3050 .
Case(
"za8v.q", AArch64::ZAQ8)
3051 .
Case(
"za9v.q", AArch64::ZAQ9)
3052 .
Case(
"za10v.q", AArch64::ZAQ10)
3053 .
Case(
"za11v.q", AArch64::ZAQ11)
3054 .
Case(
"za12v.q", AArch64::ZAQ12)
3055 .
Case(
"za13v.q", AArch64::ZAQ13)
3056 .
Case(
"za14v.q", AArch64::ZAQ14)
3057 .
Case(
"za15v.q", AArch64::ZAQ15)
3058 .
Case(
"za0v.d", AArch64::ZAD0)
3059 .
Case(
"za1v.d", AArch64::ZAD1)
3060 .
Case(
"za2v.d", AArch64::ZAD2)
3061 .
Case(
"za3v.d", AArch64::ZAD3)
3062 .
Case(
"za4v.d", AArch64::ZAD4)
3063 .
Case(
"za5v.d", AArch64::ZAD5)
3064 .
Case(
"za6v.d", AArch64::ZAD6)
3065 .
Case(
"za7v.d", AArch64::ZAD7)
3066 .
Case(
"za0v.s", AArch64::ZAS0)
3067 .
Case(
"za1v.s", AArch64::ZAS1)
3068 .
Case(
"za2v.s", AArch64::ZAS2)
3069 .
Case(
"za3v.s", AArch64::ZAS3)
3070 .
Case(
"za0v.h", AArch64::ZAH0)
3071 .
Case(
"za1v.h", AArch64::ZAH1)
3072 .
Case(
"za0v.b", AArch64::ZAB0)
3076bool AArch64AsmParser::parseRegister(MCRegister &
Reg, SMLoc &StartLoc,
3078 return !tryParseRegister(
Reg, StartLoc, EndLoc).isSuccess();
3081ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &
Reg, SMLoc &StartLoc,
3083 StartLoc = getLoc();
3084 ParseStatus Res = tryParseScalarRegister(
Reg);
3090MCRegister AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3092 MCRegister
Reg = MCRegister();
3094 return Kind == RegKind::SVEDataVector ?
Reg : MCRegister();
3097 return Kind == RegKind::SVEPredicateVector ?
Reg : MCRegister();
3100 return Kind == RegKind::SVEPredicateAsCounter ?
Reg : MCRegister();
3103 return Kind == RegKind::NeonVector ?
Reg : MCRegister();
3106 return Kind == RegKind::Matrix ?
Reg : MCRegister();
3108 if (
Name.equals_insensitive(
"zt0"))
3109 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3113 return (Kind == RegKind::Scalar) ?
Reg : MCRegister();
3117 if (MCRegister
Reg = StringSwitch<unsigned>(
Name.lower())
3118 .Case(
"fp", AArch64::FP)
3119 .Case(
"lr", AArch64::LR)
3120 .Case(
"x31", AArch64::XZR)
3121 .Case(
"w31", AArch64::WZR)
3123 return Kind == RegKind::Scalar ?
Reg : MCRegister();
3129 if (Entry == RegisterReqs.
end())
3130 return MCRegister();
3133 if (Kind ==
Entry->getValue().first)
3139unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3141 case RegKind::Scalar:
3142 case RegKind::NeonVector:
3143 case RegKind::SVEDataVector:
3145 case RegKind::Matrix:
3146 case RegKind::SVEPredicateVector:
3147 case RegKind::SVEPredicateAsCounter:
3149 case RegKind::LookupTable:
3158ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3159 const AsmToken &Tok = getTok();
3164 MCRegister
Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3174ParseStatus AArch64AsmParser::tryParseSysCROperand(
OperandVector &Operands) {
3178 return Error(S,
"Expected cN operand where 0 <= N <= 15");
3181 if (Tok[0] !=
'c' && Tok[0] !=
'C')
3182 return Error(S,
"Expected cN operand where 0 <= N <= 15");
3186 if (BadNum || CRNum > 15)
3187 return Error(S,
"Expected cN operand where 0 <= N <= 15");
3191 AArch64Operand::CreateSysCR(CRNum, S, getLoc(),
getContext()));
3196ParseStatus AArch64AsmParser::tryParseRPRFMOperand(
OperandVector &Operands) {
3198 const AsmToken &Tok = getTok();
3200 unsigned MaxVal = 63;
3205 const MCExpr *ImmVal;
3206 if (getParser().parseExpression(ImmVal))
3211 return TokError(
"immediate value expected for prefetch operand");
3214 return TokError(
"prefetch operand out of range, [0," +
utostr(MaxVal) +
3217 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->
getValue());
3218 Operands.
push_back(AArch64Operand::CreatePrefetch(
3219 prfop, RPRFM ? RPRFM->Name :
"", S,
getContext()));
3224 return TokError(
"prefetch hint expected");
3226 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.
getString());
3228 return TokError(
"prefetch hint expected");
3230 Operands.
push_back(AArch64Operand::CreatePrefetch(
3237template <
bool IsSVEPrefetch>
3238ParseStatus AArch64AsmParser::tryParsePrefetch(
OperandVector &Operands) {
3240 const AsmToken &Tok = getTok();
3242 auto LookupByName = [](StringRef
N) {
3243 if (IsSVEPrefetch) {
3244 if (
auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(
N))
3245 return std::optional<unsigned>(Res->Encoding);
3246 }
else if (
auto Res = AArch64PRFM::lookupPRFMByName(
N))
3247 return std::optional<unsigned>(Res->Encoding);
3248 return std::optional<unsigned>();
3251 auto LookupByEncoding = [](
unsigned E) {
3252 if (IsSVEPrefetch) {
3253 if (
auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(
E))
3254 return std::optional<StringRef>(Res->Name);
3255 }
else if (
auto Res = AArch64PRFM::lookupPRFMByEncoding(
E))
3256 return std::optional<StringRef>(Res->Name);
3257 return std::optional<StringRef>();
3259 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3265 const MCExpr *ImmVal;
3266 if (getParser().parseExpression(ImmVal))
3271 return TokError(
"immediate value expected for prefetch operand");
3274 return TokError(
"prefetch operand out of range, [0," +
utostr(MaxVal) +
3277 auto PRFM = LookupByEncoding(MCE->
getValue());
3278 Operands.
push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(
""),
3284 return TokError(
"prefetch hint expected");
3286 auto PRFM = LookupByName(Tok.
getString());
3288 return TokError(
"prefetch hint expected");
3290 Operands.
push_back(AArch64Operand::CreatePrefetch(
3297ParseStatus AArch64AsmParser::tryParsePSBHint(
OperandVector &Operands) {
3299 const AsmToken &Tok = getTok();
3301 return TokError(
"invalid operand for instruction");
3303 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.
getString());
3305 return TokError(
"invalid operand for instruction");
3307 Operands.
push_back(AArch64Operand::CreatePSBHint(
3313ParseStatus AArch64AsmParser::tryParseSyspXzrPair(
OperandVector &Operands) {
3314 SMLoc StartLoc = getLoc();
3320 auto RegTok = getTok();
3321 if (!tryParseScalarRegister(RegNum).isSuccess())
3324 if (RegNum != AArch64::XZR) {
3325 getLexer().UnLex(RegTok);
3332 if (!tryParseScalarRegister(RegNum).isSuccess())
3333 return TokError(
"expected register operand");
3335 if (RegNum != AArch64::XZR)
3336 return TokError(
"xzr must be followed by xzr");
3340 Operands.
push_back(AArch64Operand::CreateReg(
3341 RegNum, RegKind::Scalar, StartLoc, getLoc(),
getContext()));
3347ParseStatus AArch64AsmParser::tryParseBTIHint(
OperandVector &Operands) {
3349 const AsmToken &Tok = getTok();
3351 return TokError(
"invalid operand for instruction");
3353 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.
getString());
3355 return TokError(
"invalid operand for instruction");
3357 Operands.
push_back(AArch64Operand::CreateBTIHint(
3364ParseStatus AArch64AsmParser::tryParseCMHPriorityHint(
OperandVector &Operands) {
3366 const AsmToken &Tok = getTok();
3368 return TokError(
"invalid operand for instruction");
3371 AArch64CMHPriorityHint::lookupCMHPriorityHintByName(Tok.
getString());
3373 return TokError(
"invalid operand for instruction");
3375 Operands.
push_back(AArch64Operand::CreateCMHPriorityHint(
3382ParseStatus AArch64AsmParser::tryParseTIndexHint(
OperandVector &Operands) {
3384 const AsmToken &Tok = getTok();
3386 return TokError(
"invalid operand for instruction");
3388 auto TIndex = AArch64TIndexHint::lookupTIndexByName(Tok.
getString());
3390 return TokError(
"invalid operand for instruction");
3392 Operands.
push_back(AArch64Operand::CreateTIndexHint(
3400ParseStatus AArch64AsmParser::tryParseAdrpLabel(
OperandVector &Operands) {
3402 const MCExpr *Expr =
nullptr;
3408 if (parseSymbolicImmVal(Expr))
3414 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3423 return Error(S,
"gotpage label reference not allowed an addend");
3435 return Error(S,
"page or gotpage label reference expected");
3450ParseStatus AArch64AsmParser::tryParseAdrLabel(
OperandVector &Operands) {
3452 const MCExpr *Expr =
nullptr;
3461 if (parseSymbolicImmVal(Expr))
3467 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3479 return Error(S,
"unexpected adr label");
3489template <
bool AddFPZeroAsLiteral>
3490ParseStatus AArch64AsmParser::tryParseFPImm(
OperandVector &Operands) {
3498 const AsmToken &Tok = getTok();
3502 return TokError(
"invalid floating point immediate");
3507 if (Tok.
getIntVal() > 255 || isNegative)
3508 return TokError(
"encoded floating point value out of range");
3512 AArch64Operand::CreateFPImm(
F,
true, S,
getContext()));
3515 APFloat RealVal(APFloat::IEEEdouble());
3517 RealVal.convertFromString(Tok.
getString(), APFloat::rmTowardZero);
3519 return TokError(
"invalid floating point representation");
3522 RealVal.changeSign();
3524 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3528 Operands.
push_back(AArch64Operand::CreateFPImm(
3529 RealVal, *StatusOrErr == APFloat::opOK, S,
getContext()));
3540AArch64AsmParser::tryParseImmWithOptionalShift(
OperandVector &Operands) {
3551 return tryParseImmRange(Operands);
3553 const MCExpr *
Imm =
nullptr;
3554 if (parseSymbolicImmVal(Imm))
3558 AArch64Operand::CreateImm(Imm, S, getLoc(),
getContext()));
3565 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3567 AArch64Operand::CreateImm(Imm, S, getLoc(),
getContext()));
3569 AArch64Operand::CreateToken(VecGroup, getLoc(),
getContext()));
3575 !getTok().getIdentifier().equals_insensitive(
"lsl"))
3576 return Error(getLoc(),
"only 'lsl #+N' valid after immediate");
3584 return Error(getLoc(),
"only 'lsl #+N' valid after immediate");
3586 int64_t ShiftAmount = getTok().getIntVal();
3588 if (ShiftAmount < 0)
3589 return Error(getLoc(),
"positive shift amount required");
3593 if (ShiftAmount == 0 && Imm !=
nullptr) {
3595 AArch64Operand::CreateImm(Imm, S, getLoc(),
getContext()));
3599 Operands.
push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3607AArch64AsmParser::parseCondCodeString(StringRef
Cond, std::string &Suggestion) {
3641 Suggestion =
"nfrst";
3647bool AArch64AsmParser::parseCondCode(
OperandVector &Operands,
3648 bool invertCondCode) {
3650 const AsmToken &Tok = getTok();
3654 std::string Suggestion;
3657 std::string Msg =
"invalid condition code";
3658 if (!Suggestion.empty())
3659 Msg +=
", did you mean " + Suggestion +
"?";
3660 return TokError(Msg);
3664 if (invertCondCode) {
3666 return TokError(
"condition codes AL and NV are invalid for this instruction");
3671 AArch64Operand::CreateCondCode(CC, S, getLoc(),
getContext()));
3675ParseStatus AArch64AsmParser::tryParseSVCR(
OperandVector &Operands) {
3676 const AsmToken &Tok = getTok();
3680 return TokError(
"invalid operand for instruction");
3682 unsigned PStateImm = -1;
3683 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.
getString());
3686 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3687 PStateImm = SVCR->Encoding;
3695ParseStatus AArch64AsmParser::tryParseMatrixRegister(
OperandVector &Operands) {
3696 const AsmToken &Tok = getTok();
3701 if (
Name.equals_insensitive(
"za") ||
Name.starts_with_insensitive(
"za.")) {
3703 unsigned ElementWidth = 0;
3704 auto DotPosition =
Name.find(
'.');
3706 const auto &KindRes =
3710 "Expected the register to be followed by element width suffix");
3711 ElementWidth = KindRes->second;
3713 Operands.
push_back(AArch64Operand::CreateMatrixRegister(
3714 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3719 if (parseOperand(Operands,
false,
false))
3726 MCRegister
Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3730 size_t DotPosition =
Name.find(
'.');
3733 StringRef Head =
Name.take_front(DotPosition);
3734 StringRef
Tail =
Name.drop_front(DotPosition);
3735 StringRef RowOrColumn = Head.
take_back();
3737 MatrixKind
Kind = StringSwitch<MatrixKind>(RowOrColumn.
lower())
3738 .Case(
"h", MatrixKind::Row)
3739 .Case(
"v", MatrixKind::Col)
3740 .Default(MatrixKind::Tile);
3746 "Expected the register to be followed by element width suffix");
3747 unsigned ElementWidth = KindRes->second;
3751 Operands.
push_back(AArch64Operand::CreateMatrixRegister(
3757 if (parseOperand(Operands,
false,
false))
3766AArch64AsmParser::tryParseOptionalShiftExtend(
OperandVector &Operands) {
3767 const AsmToken &Tok = getTok();
3770 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3799 return TokError(
"expected #imm after shift specifier");
3805 AArch64Operand::CreateShiftExtend(ShOp, 0,
false, S,
E,
getContext()));
3814 return Error(
E,
"expected integer shift amount");
3816 const MCExpr *ImmVal;
3817 if (getParser().parseExpression(ImmVal))
3822 return Error(
E,
"expected constant '#imm' after shift specifier");
3825 Operands.
push_back(AArch64Operand::CreateShiftExtend(
3834 {
"crc", {AArch64::FeatureCRC}},
3835 {
"sm4", {AArch64::FeatureSM4}},
3836 {
"sha3", {AArch64::FeatureSHA3}},
3837 {
"sha2", {AArch64::FeatureSHA2}},
3838 {
"aes", {AArch64::FeatureAES}},
3839 {
"crypto", {AArch64::FeatureCrypto}},
3840 {
"fp", {AArch64::FeatureFPARMv8}},
3841 {
"simd", {AArch64::FeatureNEON}},
3842 {
"ras", {AArch64::FeatureRAS}},
3843 {
"rasv2", {AArch64::FeatureRASv2}},
3844 {
"lse", {AArch64::FeatureLSE}},
3845 {
"predres", {AArch64::FeaturePredRes}},
3846 {
"predres2", {AArch64::FeatureSPECRES2}},
3847 {
"ccdp", {AArch64::FeatureCacheDeepPersist}},
3848 {
"mte", {AArch64::FeatureMTE}},
3849 {
"memtag", {AArch64::FeatureMTE}},
3850 {
"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3851 {
"pan", {AArch64::FeaturePAN}},
3852 {
"pan-rwv", {AArch64::FeaturePAN_RWV}},
3853 {
"ccpp", {AArch64::FeatureCCPP}},
3854 {
"rcpc", {AArch64::FeatureRCPC}},
3855 {
"rng", {AArch64::FeatureRandGen}},
3856 {
"sve", {AArch64::FeatureSVE}},
3857 {
"sve-b16b16", {AArch64::FeatureSVEB16B16}},
3858 {
"sve2", {AArch64::FeatureSVE2}},
3859 {
"sve-aes", {AArch64::FeatureSVEAES}},
3860 {
"sve2-aes", {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3861 {
"sve-sm4", {AArch64::FeatureSVESM4}},
3862 {
"sve2-sm4", {AArch64::FeatureAliasSVE2SM4, AArch64::FeatureSVESM4}},
3863 {
"sve-sha3", {AArch64::FeatureSVESHA3}},
3864 {
"sve2-sha3", {AArch64::FeatureAliasSVE2SHA3, AArch64::FeatureSVESHA3}},
3865 {
"sve-bitperm", {AArch64::FeatureSVEBitPerm}},
3867 {AArch64::FeatureAliasSVE2BitPerm, AArch64::FeatureSVEBitPerm,
3868 AArch64::FeatureSVE2}},
3869 {
"sve2p1", {AArch64::FeatureSVE2p1}},
3870 {
"ls64", {AArch64::FeatureLS64}},
3871 {
"xs", {AArch64::FeatureXS}},
3872 {
"pauth", {AArch64::FeaturePAuth}},
3873 {
"flagm", {AArch64::FeatureFlagM}},
3874 {
"rme", {AArch64::FeatureRME}},
3875 {
"sme", {AArch64::FeatureSME}},
3876 {
"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3877 {
"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3878 {
"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3879 {
"sme2", {AArch64::FeatureSME2}},
3880 {
"sme2p1", {AArch64::FeatureSME2p1}},
3881 {
"sme-b16b16", {AArch64::FeatureSMEB16B16}},
3882 {
"hbc", {AArch64::FeatureHBC}},
3883 {
"mops", {AArch64::FeatureMOPS}},
3884 {
"mec", {AArch64::FeatureMEC}},
3885 {
"the", {AArch64::FeatureTHE}},
3886 {
"d128", {AArch64::FeatureD128}},
3887 {
"lse128", {AArch64::FeatureLSE128}},
3888 {
"ite", {AArch64::FeatureITE}},
3889 {
"cssc", {AArch64::FeatureCSSC}},
3890 {
"rcpc3", {AArch64::FeatureRCPC3}},
3891 {
"gcs", {AArch64::FeatureGCS}},
3892 {
"bf16", {AArch64::FeatureBF16}},
3893 {
"compnum", {AArch64::FeatureComplxNum}},
3894 {
"dotprod", {AArch64::FeatureDotProd}},
3895 {
"f32mm", {AArch64::FeatureMatMulFP32}},
3896 {
"f64mm", {AArch64::FeatureMatMulFP64}},
3897 {
"fp16", {AArch64::FeatureFullFP16}},
3898 {
"fp16fml", {AArch64::FeatureFP16FML}},
3899 {
"i8mm", {AArch64::FeatureMatMulInt8}},
3900 {
"lor", {AArch64::FeatureLOR}},
3901 {
"profile", {AArch64::FeatureSPE}},
3905 {
"rdm", {AArch64::FeatureRDM}},
3906 {
"rdma", {AArch64::FeatureRDM}},
3907 {
"sb", {AArch64::FeatureSB}},
3908 {
"ssbs", {AArch64::FeatureSSBS}},
3909 {
"fp8", {AArch64::FeatureFP8}},
3910 {
"faminmax", {AArch64::FeatureFAMINMAX}},
3911 {
"fp8fma", {AArch64::FeatureFP8FMA}},
3912 {
"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3913 {
"fp8dot2", {AArch64::FeatureFP8DOT2}},
3914 {
"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3915 {
"fp8dot4", {AArch64::FeatureFP8DOT4}},
3916 {
"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3917 {
"lut", {AArch64::FeatureLUT}},
3918 {
"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3919 {
"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3920 {
"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3921 {
"sme-fa64", {AArch64::FeatureSMEFA64}},
3922 {
"cpa", {AArch64::FeatureCPA}},
3923 {
"tlbiw", {AArch64::FeatureTLBIW}},
3924 {
"pops", {AArch64::FeaturePoPS}},
3925 {
"cmpbr", {AArch64::FeatureCMPBR}},
3926 {
"f8f32mm", {AArch64::FeatureF8F32MM}},
3927 {
"f8f16mm", {AArch64::FeatureF8F16MM}},
3928 {
"fprcvt", {AArch64::FeatureFPRCVT}},
3929 {
"lsfe", {AArch64::FeatureLSFE}},
3930 {
"sme2p2", {AArch64::FeatureSME2p2}},
3931 {
"ssve-aes", {AArch64::FeatureSSVE_AES}},
3932 {
"sve2p2", {AArch64::FeatureSVE2p2}},
3933 {
"sve-aes2", {AArch64::FeatureSVEAES2}},
3934 {
"sve-bfscale", {AArch64::FeatureSVEBFSCALE}},
3935 {
"sve-f16f32mm", {AArch64::FeatureSVE_F16F32MM}},
3936 {
"lsui", {AArch64::FeatureLSUI}},
3937 {
"occmo", {AArch64::FeatureOCCMO}},
3938 {
"ssve-bitperm", {AArch64::FeatureSSVE_BitPerm}},
3939 {
"sme-mop4", {AArch64::FeatureSME_MOP4}},
3940 {
"sme-tmop", {AArch64::FeatureSME_TMOP}},
3941 {
"lscp", {AArch64::FeatureLSCP}},
3942 {
"tlbid", {AArch64::FeatureTLBID}},
3943 {
"mtetc", {AArch64::FeatureMTETC}},
3944 {
"gcie", {AArch64::FeatureGCIE}},
3945 {
"sme2p3", {AArch64::FeatureSME2p3}},
3946 {
"sve2p3", {AArch64::FeatureSVE2p3}},
3947 {
"sve-b16mm", {AArch64::FeatureSVE_B16MM}},
3948 {
"f16mm", {AArch64::FeatureF16MM}},
3949 {
"f16f32dot", {AArch64::FeatureF16F32DOT}},
3950 {
"f16f32mm", {AArch64::FeatureF16F32MM}},
3951 {
"mops-go", {AArch64::FeatureMOPS_GO}},
3952 {
"poe2", {AArch64::FeatureS1POE2}},
3953 {
"tev", {AArch64::FeatureTEV}},
3954 {
"btie", {AArch64::FeatureBTIE}},
3955 {
"dit", {AArch64::FeatureDIT}},
3956 {
"brbe", {AArch64::FeatureBRBE}},
3957 {
"bti", {AArch64::FeatureBranchTargetId}},
3958 {
"fcma", {AArch64::FeatureComplxNum}},
3959 {
"jscvt", {AArch64::FeatureJS}},
3960 {
"pauth-lr", {AArch64::FeaturePAuthLR}},
3961 {
"ssve-fexpa", {AArch64::FeatureSSVE_FEXPA}},
3962 {
"wfxt", {AArch64::FeatureWFxT}},
3966 if (FBS[AArch64::HasV8_0aOps])
3968 if (FBS[AArch64::HasV8_1aOps])
3970 else if (FBS[AArch64::HasV8_2aOps])
3972 else if (FBS[AArch64::HasV8_3aOps])
3974 else if (FBS[AArch64::HasV8_4aOps])
3976 else if (FBS[AArch64::HasV8_5aOps])
3978 else if (FBS[AArch64::HasV8_6aOps])
3980 else if (FBS[AArch64::HasV8_7aOps])
3982 else if (FBS[AArch64::HasV8_8aOps])
3984 else if (FBS[AArch64::HasV8_9aOps])
3986 else if (FBS[AArch64::HasV9_0aOps])
3988 else if (FBS[AArch64::HasV9_1aOps])
3990 else if (FBS[AArch64::HasV9_2aOps])
3992 else if (FBS[AArch64::HasV9_3aOps])
3994 else if (FBS[AArch64::HasV9_4aOps])
3996 else if (FBS[AArch64::HasV9_5aOps])
3998 else if (FBS[AArch64::HasV9_6aOps])
4000 else if (FBS[AArch64::HasV9_7aOps])
4002 else if (FBS[AArch64::HasV8_0rOps])
4011 Str += !ExtMatches.
empty() ?
llvm::join(ExtMatches,
", ") :
"(unknown)";
4015void AArch64AsmParser::createSysAlias(uint16_t Encoding,
OperandVector &Operands,
4017 const uint16_t Op2 = Encoding & 7;
4018 const uint16_t Cm = (Encoding & 0x78) >> 3;
4019 const uint16_t Cn = (Encoding & 0x780) >> 7;
4020 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
4025 AArch64Operand::CreateImm(Expr, S, getLoc(),
getContext()));
4027 AArch64Operand::CreateSysCR(Cn, S, getLoc(),
getContext()));
4029 AArch64Operand::CreateSysCR(Cm, S, getLoc(),
getContext()));
4032 AArch64Operand::CreateImm(Expr, S, getLoc(),
getContext()));
4038bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
4040 if (
Name.contains(
'.'))
4041 return TokError(
"invalid operand");
4046 const AsmToken &Tok = getTok();
4049 bool ExpectRegister =
true;
4050 bool OptionalRegister =
false;
4051 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
4052 bool hasTLBID = getSTI().hasFeature(AArch64::FeatureTLBID);
4054 if (Mnemonic ==
"ic") {
4055 const AArch64IC::IC *IC = AArch64IC::lookupICByName(
Op);
4057 return TokError(
"invalid operand for IC instruction");
4058 else if (!IC->
haveFeatures(getSTI().getFeatureBits())) {
4059 std::string Str(
"IC " + std::string(IC->
Name) +
" requires: ");
4061 return TokError(Str);
4064 createSysAlias(IC->
Encoding, Operands, S);
4065 }
else if (Mnemonic ==
"dc") {
4066 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(
Op);
4068 return TokError(
"invalid operand for DC instruction");
4069 else if (!DC->
haveFeatures(getSTI().getFeatureBits())) {
4070 std::string Str(
"DC " + std::string(DC->
Name) +
" requires: ");
4072 return TokError(Str);
4074 createSysAlias(DC->
Encoding, Operands, S);
4075 }
else if (Mnemonic ==
"at") {
4076 const AArch64AT::AT *AT = AArch64AT::lookupATByName(
Op);
4078 return TokError(
"invalid operand for AT instruction");
4079 else if (!AT->
haveFeatures(getSTI().getFeatureBits())) {
4080 std::string Str(
"AT " + std::string(AT->
Name) +
" requires: ");
4082 return TokError(Str);
4084 createSysAlias(AT->
Encoding, Operands, S);
4085 }
else if (Mnemonic ==
"tlbi") {
4086 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(
Op);
4088 return TokError(
"invalid operand for TLBI instruction");
4089 else if (!TLBI->
haveFeatures(getSTI().getFeatureBits())) {
4090 std::string Str(
"TLBI " + std::string(TLBI->
Name) +
" requires: ");
4092 return TokError(Str);
4094 ExpectRegister = TLBI->
RegUse == REG_REQUIRED;
4095 if (hasAll || hasTLBID)
4096 OptionalRegister = TLBI->
RegUse == REG_OPTIONAL;
4097 createSysAlias(TLBI->
Encoding, Operands, S);
4098 }
else if (Mnemonic ==
"gic") {
4099 const AArch64GIC::GIC *GIC = AArch64GIC::lookupGICByName(
Op);
4101 return TokError(
"invalid operand for GIC instruction");
4102 else if (!GIC->
haveFeatures(getSTI().getFeatureBits())) {
4103 std::string Str(
"GIC " + std::string(GIC->
Name) +
" requires: ");
4105 return TokError(Str);
4108 createSysAlias(GIC->
Encoding, Operands, S);
4109 }
else if (Mnemonic ==
"gsb") {
4110 const AArch64GSB::GSB *GSB = AArch64GSB::lookupGSBByName(
Op);
4112 return TokError(
"invalid operand for GSB instruction");
4113 else if (!GSB->
haveFeatures(getSTI().getFeatureBits())) {
4114 std::string Str(
"GSB " + std::string(GSB->
Name) +
" requires: ");
4116 return TokError(Str);
4118 ExpectRegister =
false;
4119 createSysAlias(GSB->
Encoding, Operands, S);
4120 }
else if (Mnemonic ==
"plbi") {
4121 const AArch64PLBI::PLBI *PLBI = AArch64PLBI::lookupPLBIByName(
Op);
4123 return TokError(
"invalid operand for PLBI instruction");
4124 else if (!PLBI->
haveFeatures(getSTI().getFeatureBits())) {
4125 std::string Str(
"PLBI " + std::string(PLBI->
Name) +
" requires: ");
4127 return TokError(Str);
4129 ExpectRegister = PLBI->
RegUse == REG_REQUIRED;
4130 if (hasAll || hasTLBID)
4131 OptionalRegister = PLBI->
RegUse == REG_OPTIONAL;
4132 createSysAlias(PLBI->
Encoding, Operands, S);
4133 }
else if (Mnemonic ==
"cfp" || Mnemonic ==
"dvp" || Mnemonic ==
"cpp" ||
4134 Mnemonic ==
"cosp") {
4136 if (
Op.lower() !=
"rctx")
4137 return TokError(
"invalid operand for prediction restriction instruction");
4139 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
4140 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
4142 if (Mnemonic ==
"cosp" && !hasSpecres2)
4143 return TokError(
"COSP requires: predres2");
4145 return TokError(Mnemonic.
upper() +
"RCTX requires: predres");
4147 uint16_t PRCTX_Op2 = Mnemonic ==
"cfp" ? 0b100
4148 : Mnemonic ==
"dvp" ? 0b101
4149 : Mnemonic ==
"cosp" ? 0b110
4150 : Mnemonic ==
"cpp" ? 0b111
4153 "Invalid mnemonic for prediction restriction instruction");
4154 const auto SYS_3_7_3 = 0b01101110011;
4155 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
4157 createSysAlias(Encoding, Operands, S);
4162 bool HasRegister =
false;
4167 return TokError(
"expected register operand");
4171 if (!OptionalRegister) {
4172 if (ExpectRegister && !HasRegister)
4173 return TokError(
"specified " + Mnemonic +
" op requires a register");
4174 else if (!ExpectRegister && HasRegister)
4175 return TokError(
"specified " + Mnemonic +
" op does not use a register");
4187bool AArch64AsmParser::parseSyslAlias(StringRef Name, SMLoc NameLoc,
4192 AArch64Operand::CreateToken(
"sysl", NameLoc,
getContext()));
4195 SMLoc startLoc = getLoc();
4196 const AsmToken ®Tok = getTok();
4198 MCRegister
Reg = matchRegisterNameAlias(reg.
lower(), RegKind::Scalar);
4200 return TokError(
"expected register operand");
4202 Operands.
push_back(AArch64Operand::CreateReg(
4203 Reg, RegKind::Scalar, startLoc, getLoc(),
getContext(), EqualsReg));
4210 const AsmToken &operandTok = getTok();
4212 SMLoc S2 = operandTok.
getLoc();
4215 if (Mnemonic ==
"gicr") {
4216 const AArch64GICR::GICR *GICR = AArch64GICR::lookupGICRByName(
Op);
4218 return Error(S2,
"invalid operand for GICR instruction");
4219 else if (!GICR->
haveFeatures(getSTI().getFeatureBits())) {
4220 std::string Str(
"GICR " + std::string(GICR->
Name) +
" requires: ");
4222 return Error(S2, Str);
4224 createSysAlias(GICR->
Encoding, Operands, S2);
4235bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4237 if (
Name.contains(
'.'))
4238 return TokError(
"invalid operand");
4242 AArch64Operand::CreateToken(
"sysp", NameLoc,
getContext()));
4244 const AsmToken &Tok = getTok();
4248 if (Mnemonic ==
"tlbip") {
4249 const AArch64TLBIP::TLBIP *TLBIP = AArch64TLBIP::lookupTLBIPByName(
Op);
4251 return TokError(
"invalid operand for TLBIP instruction");
4254 std::string Str(
"instruction requires: ");
4256 return TokError(Str);
4258 createSysAlias(TLBIP->
Encoding, Operands, S);
4267 return TokError(
"expected register identifier");
4268 auto Result = tryParseSyspXzrPair(Operands);
4270 Result = tryParseGPRSeqPair(Operands);
4272 return TokError(
"specified " + Mnemonic +
4273 " op requires a pair of registers");
4281ParseStatus AArch64AsmParser::tryParseBarrierOperand(
OperandVector &Operands) {
4282 MCAsmParser &Parser = getParser();
4283 const AsmToken &Tok = getTok();
4286 return TokError(
"'csync' operand expected");
4289 const MCExpr *ImmVal;
4290 SMLoc ExprLoc = getLoc();
4291 AsmToken IntTok = Tok;
4292 if (getParser().parseExpression(ImmVal))
4296 return Error(ExprLoc,
"immediate value expected for barrier operand");
4298 if (Mnemonic ==
"dsb" &&
Value > 15) {
4306 return Error(ExprLoc,
"barrier operand out of range");
4307 auto DB = AArch64DB::lookupDBByEncoding(
Value);
4308 Operands.
push_back(AArch64Operand::CreateBarrier(
Value, DB ?
DB->Name :
"",
4315 return TokError(
"invalid operand for instruction");
4318 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4319 auto DB = AArch64DB::lookupDBByName(Operand);
4321 if (Mnemonic ==
"isb" && (!DB ||
DB->Encoding != AArch64DB::sy))
4322 return TokError(
"'sy' or #imm operand expected");
4324 if (Mnemonic ==
"tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4325 return TokError(
"'csync' operand expected");
4327 if (Mnemonic ==
"dsb") {
4332 return TokError(
"invalid barrier option name");
4335 Operands.
push_back(AArch64Operand::CreateBarrier(
4336 DB ?
DB->Encoding : TSB->Encoding, Tok.
getString(), getLoc(),
4344AArch64AsmParser::tryParseBarriernXSOperand(
OperandVector &Operands) {
4345 const AsmToken &Tok = getTok();
4347 assert(Mnemonic ==
"dsb" &&
"Instruction does not accept nXS operands");
4348 if (Mnemonic !=
"dsb")
4353 const MCExpr *ImmVal;
4354 SMLoc ExprLoc = getLoc();
4355 if (getParser().parseExpression(ImmVal))
4359 return Error(ExprLoc,
"immediate value expected for barrier operand");
4364 return Error(ExprLoc,
"barrier operand out of range");
4365 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(
Value);
4366 Operands.
push_back(AArch64Operand::CreateBarrier(
DB->Encoding,
DB->Name,
4373 return TokError(
"invalid operand for instruction");
4376 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4379 return TokError(
"invalid barrier option name");
4382 AArch64Operand::CreateBarrier(
DB->Encoding, Tok.
getString(), getLoc(),
4389ParseStatus AArch64AsmParser::tryParseSysReg(
OperandVector &Operands) {
4390 const AsmToken &Tok = getTok();
4395 if (AArch64SVCR::lookupSVCRByName(Tok.
getString()))
4399 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.
getString());
4400 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4401 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4402 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4406 unsigned PStateImm = -1;
4407 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.
getString());
4408 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4409 PStateImm = PState15->Encoding;
4411 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.
getString());
4412 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4413 PStateImm = PState1->Encoding;
4417 AArch64Operand::CreateSysReg(Tok.
getString(), getLoc(), MRSReg, MSRReg,
4425AArch64AsmParser::tryParsePHintInstOperand(
OperandVector &Operands) {
4427 const AsmToken &Tok = getTok();
4429 return TokError(
"invalid operand for instruction");
4433 return TokError(
"invalid operand for instruction");
4435 Operands.
push_back(AArch64Operand::CreatePHintInst(
4442bool AArch64AsmParser::tryParseNeonVectorRegister(
OperandVector &Operands) {
4450 ParseStatus Res = tryParseVectorRegister(
Reg, Kind, RegKind::NeonVector);
4458 unsigned ElementWidth = KindRes->second;
4460 AArch64Operand::CreateVectorReg(
Reg, RegKind::NeonVector, ElementWidth,
4468 return tryParseVectorIndex(Operands).isFailure();
4471ParseStatus AArch64AsmParser::tryParseVectorIndex(
OperandVector &Operands) {
4472 SMLoc SIdx = getLoc();
4474 const MCExpr *ImmVal;
4475 if (getParser().parseExpression(ImmVal))
4479 return TokError(
"immediate value expected for vector index");
4497ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &
Reg,
4499 RegKind MatchKind) {
4500 const AsmToken &Tok = getTok();
4509 StringRef Head =
Name.slice(Start,
Next);
4510 MCRegister RegNum = matchRegisterNameAlias(Head, MatchKind);
4516 return TokError(
"invalid vector kind qualifier");
4527ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4529 ParseStatus Status =
4530 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4532 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4537template <RegKind RK>
4539AArch64AsmParser::tryParseSVEPredicateVector(
OperandVector &Operands) {
4541 const SMLoc S = getLoc();
4544 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4552 unsigned ElementWidth = KindRes->second;
4553 Operands.
push_back(AArch64Operand::CreateVectorReg(
4554 RegNum, RK, ElementWidth, S,
4558 if (RK == RegKind::SVEPredicateAsCounter) {
4559 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4565 if (parseOperand(Operands,
false,
false))
4576 return Error(S,
"not expecting size suffix");
4584 auto Pred = getTok().getString().lower();
4585 if (RK == RegKind::SVEPredicateAsCounter && Pred !=
"z")
4586 return Error(getLoc(),
"expecting 'z' predication");
4588 if (RK == RegKind::SVEPredicateVector && Pred !=
"z" && Pred !=
"m")
4589 return Error(getLoc(),
"expecting 'm' or 'z' predication");
4592 const char *ZM = Pred ==
"z" ?
"z" :
"m";
4600bool AArch64AsmParser::parseRegister(
OperandVector &Operands) {
4602 if (!tryParseNeonVectorRegister(Operands))
4605 if (tryParseZTOperand(Operands).isSuccess())
4609 if (tryParseGPROperand<false>(Operands).isSuccess())
4615bool AArch64AsmParser::parseSymbolicImmVal(
const MCExpr *&ImmVal) {
4616 bool HasELFModifier =
false;
4618 SMLoc Loc = getLexer().getLoc();
4620 HasELFModifier =
true;
4623 return TokError(
"expect relocation specifier in operand after ':'");
4625 std::string LowerCase = getTok().getIdentifier().lower();
4626 RefKind = StringSwitch<AArch64::Specifier>(LowerCase)
4681 return TokError(
"expect relocation specifier in operand after ':'");
4685 if (parseToken(
AsmToken::Colon,
"expect ':' after relocation specifier"))
4689 if (getParser().parseExpression(ImmVal))
4696 if (
getContext().getAsmInfo()->hasSubsectionsViaSymbols()) {
4697 if (getParser().parseAtSpecifier(ImmVal, EndLoc))
4707 if (getParser().parsePrimaryExpr(Term, EndLoc))
4715ParseStatus AArch64AsmParser::tryParseMatrixTileList(
OperandVector &Operands) {
4719 auto ParseMatrixTile = [
this](
unsigned &
Reg,
4720 unsigned &ElementWidth) -> ParseStatus {
4721 StringRef
Name = getTok().getString();
4722 size_t DotPosition =
Name.find(
'.');
4730 StringRef
Tail =
Name.drop_front(DotPosition);
4731 const std::optional<std::pair<int, int>> &KindRes =
4735 "Expected the register to be followed by element width suffix");
4736 ElementWidth = KindRes->second;
4743 auto LCurly = getTok();
4748 Operands.
push_back(AArch64Operand::CreateMatrixTileList(
4754 if (getTok().getString().equals_insensitive(
"za")) {
4760 Operands.
push_back(AArch64Operand::CreateMatrixTileList(
4765 SMLoc TileLoc = getLoc();
4767 unsigned FirstReg, ElementWidth;
4768 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4769 if (!ParseRes.isSuccess()) {
4770 getLexer().UnLex(LCurly);
4774 const MCRegisterInfo *RI =
getContext().getRegisterInfo();
4776 unsigned PrevReg = FirstReg;
4778 SmallSet<unsigned, 8> DRegs;
4779 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4781 SmallSet<unsigned, 8> SeenRegs;
4782 SeenRegs.
insert(FirstReg);
4786 unsigned Reg, NextElementWidth;
4787 ParseRes = ParseMatrixTile(
Reg, NextElementWidth);
4788 if (!ParseRes.isSuccess())
4792 if (ElementWidth != NextElementWidth)
4793 return Error(TileLoc,
"mismatched register size suffix");
4796 Warning(TileLoc,
"tile list not in ascending order");
4799 Warning(TileLoc,
"duplicate tile in list");
4802 AArch64Operand::ComputeRegsForAlias(
Reg, DRegs, ElementWidth);
4811 unsigned RegMask = 0;
4812 for (
auto Reg : DRegs)
4816 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(),
getContext()));
4821template <RegKind VectorKind>
4822ParseStatus AArch64AsmParser::tryParseVectorList(
OperandVector &Operands,
4824 MCAsmParser &Parser = getParser();
4829 auto ParseVector = [
this](MCRegister &
Reg, StringRef &
Kind, SMLoc Loc,
4830 bool NoMatchIsError) -> ParseStatus {
4831 auto RegTok = getTok();
4832 auto ParseRes = tryParseVectorRegister(
Reg, Kind, VectorKind);
4833 if (ParseRes.isSuccess()) {
4840 RegTok.getString().equals_insensitive(
"zt0"))
4844 (ParseRes.isNoMatch() && NoMatchIsError &&
4845 !RegTok.getString().starts_with_insensitive(
"za")))
4846 return Error(Loc,
"vector register expected");
4851 unsigned NumRegs = getNumRegsForRegKind(VectorKind);
4853 auto LCurly = getTok();
4857 MCRegister FirstReg;
4858 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4862 if (ParseRes.isNoMatch())
4865 if (!ParseRes.isSuccess())
4868 MCRegister PrevReg = FirstReg;
4871 unsigned Stride = 1;
4873 SMLoc Loc = getLoc();
4877 ParseRes = ParseVector(
Reg, NextKind, getLoc(),
true);
4878 if (!ParseRes.isSuccess())
4882 if (Kind != NextKind)
4883 return Error(Loc,
"mismatched register size suffix");
4886 (PrevReg <
Reg) ? (
Reg - PrevReg) : (NumRegs - (PrevReg -
Reg));
4888 if (Space == 0 || Space > 3)
4889 return Error(Loc,
"invalid number of vectors");
4894 bool HasCalculatedStride =
false;
4896 SMLoc Loc = getLoc();
4899 ParseRes = ParseVector(
Reg, NextKind, getLoc(),
true);
4900 if (!ParseRes.isSuccess())
4904 if (Kind != NextKind)
4905 return Error(Loc,
"mismatched register size suffix");
4907 unsigned RegVal =
getContext().getRegisterInfo()->getEncodingValue(
Reg);
4908 unsigned PrevRegVal =
4909 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4910 if (!HasCalculatedStride) {
4911 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4912 : (NumRegs - (PrevRegVal - RegVal));
4913 HasCalculatedStride =
true;
4917 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4918 return Error(Loc,
"registers must have the same sequential stride");
4929 return Error(S,
"invalid number of vectors");
4931 unsigned NumElements = 0;
4932 unsigned ElementWidth = 0;
4933 if (!
Kind.empty()) {
4935 std::tie(NumElements, ElementWidth) = *VK;
4938 Operands.
push_back(AArch64Operand::CreateVectorList(
4939 FirstReg,
Count, Stride, NumElements, ElementWidth, VectorKind, S,
4943 ParseStatus Res = tryParseVectorIndex(Operands);
4953bool AArch64AsmParser::parseNeonVectorList(
OperandVector &Operands) {
4954 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands,
true);
4955 if (!ParseRes.isSuccess())
4958 return tryParseVectorIndex(Operands).isFailure();
4961ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(
OperandVector &Operands) {
4962 SMLoc StartLoc = getLoc();
4965 ParseStatus Res = tryParseScalarRegister(RegNum);
4970 Operands.
push_back(AArch64Operand::CreateReg(
4971 RegNum, RegKind::Scalar, StartLoc, getLoc(),
getContext()));
4978 return Error(getLoc(),
"index must be absent or #0");
4980 const MCExpr *ImmVal;
4983 return Error(getLoc(),
"index must be absent or #0");
4985 Operands.
push_back(AArch64Operand::CreateReg(
4986 RegNum, RegKind::Scalar, StartLoc, getLoc(),
getContext()));
4990ParseStatus AArch64AsmParser::tryParseZTOperand(
OperandVector &Operands) {
4991 SMLoc StartLoc = getLoc();
4992 const AsmToken &Tok = getTok();
4995 MCRegister
Reg = matchRegisterNameAlias(Name, RegKind::LookupTable);
5000 Operands.
push_back(AArch64Operand::CreateReg(
5001 Reg, RegKind::LookupTable, StartLoc, getLoc(),
getContext()));
5007 AArch64Operand::CreateToken(
"[", getLoc(),
getContext()));
5008 const MCExpr *ImmVal;
5009 if (getParser().parseExpression(ImmVal))
5013 return TokError(
"immediate value expected for vector index");
5014 Operands.
push_back(AArch64Operand::CreateImm(
5018 if (parseOptionalMulOperand(Operands))
5023 AArch64Operand::CreateToken(
"]", getLoc(),
getContext()));
5028template <
bool ParseShiftExtend, RegConstra
intEqualityTy EqTy>
5029ParseStatus AArch64AsmParser::tryParseGPROperand(
OperandVector &Operands) {
5030 SMLoc StartLoc = getLoc();
5033 ParseStatus Res = tryParseScalarRegister(RegNum);
5039 Operands.
push_back(AArch64Operand::CreateReg(
5040 RegNum, RegKind::Scalar, StartLoc, getLoc(),
getContext(), EqTy));
5049 Res = tryParseOptionalShiftExtend(ExtOpnd);
5053 auto Ext =
static_cast<AArch64Operand*
>(ExtOpnd.
back().
get());
5054 Operands.
push_back(AArch64Operand::CreateReg(
5055 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(),
getContext(), EqTy,
5056 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
5057 Ext->hasShiftExtendAmount()));
5062bool AArch64AsmParser::parseOptionalMulOperand(
OperandVector &Operands) {
5063 MCAsmParser &Parser = getParser();
5071 if (!getTok().getString().equals_insensitive(
"mul") ||
5072 !(NextIsVL || NextIsHash))
5076 AArch64Operand::CreateToken(
"mul", getLoc(),
getContext()));
5081 AArch64Operand::CreateToken(
"vl", getLoc(),
getContext()));
5091 const MCExpr *ImmVal;
5094 Operands.
push_back(AArch64Operand::CreateImm(
5101 return Error(getLoc(),
"expected 'vl' or '#<imm>'");
5104bool AArch64AsmParser::parseOptionalVGOperand(
OperandVector &Operands,
5105 StringRef &VecGroup) {
5106 MCAsmParser &Parser = getParser();
5107 auto Tok = Parser.
getTok();
5112 .Case(
"vgx2",
"vgx2")
5113 .Case(
"vgx4",
"vgx4")
5124bool AArch64AsmParser::parseKeywordOperand(
OperandVector &Operands) {
5125 auto Tok = getTok();
5143bool AArch64AsmParser::parseOperand(
OperandVector &Operands,
bool isCondCode,
5144 bool invertCondCode) {
5145 MCAsmParser &Parser = getParser();
5148 MatchOperandParserImpl(Operands, Mnemonic,
true);
5162 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
5164 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
5167 getLexer().UnLex(SavedTok);
5171 switch (getLexer().getKind()) {
5175 if (parseSymbolicImmVal(Expr))
5176 return Error(S,
"invalid operand");
5180 return parseOptionalShiftExtend(getTok());
5184 AArch64Operand::CreateToken(
"[", getLoc(),
getContext()));
5189 return parseOperand(Operands,
false,
false);
5192 if (!parseNeonVectorList(Operands))
5196 AArch64Operand::CreateToken(
"{", getLoc(),
getContext()));
5201 return parseOperand(Operands,
false,
false);
5206 if (!parseOptionalVGOperand(Operands, VecGroup)) {
5208 AArch64Operand::CreateToken(VecGroup, getLoc(),
getContext()));
5216 if (!parseRegister(Operands)) {
5218 AsmToken SavedTok = getTok();
5223 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
5227 Res = tryParseOptionalShiftExtend(Operands);
5230 getLexer().UnLex(SavedTok);
5237 if (!parseOptionalMulOperand(Operands))
5242 if (Mnemonic ==
"brb" || Mnemonic ==
"smstart" || Mnemonic ==
"smstop" ||
5244 return parseKeywordOperand(Operands);
5248 const MCExpr *IdVal, *
Term;
5250 if (getParser().parseExpression(IdVal))
5252 if (getParser().parseAtSpecifier(IdVal,
E))
5254 std::optional<MCBinaryExpr::Opcode> Opcode;
5260 if (getParser().parsePrimaryExpr(Term,
E))
5267 return parseOptionalShiftExtend(getTok());
5278 bool isNegative =
false;
5290 const AsmToken &Tok = getTok();
5293 uint64_t
IntVal = RealVal.bitcastToAPInt().getZExtValue();
5294 if (Mnemonic !=
"fcmp" && Mnemonic !=
"fcmpe" && Mnemonic !=
"fcmeq" &&
5295 Mnemonic !=
"fcmge" && Mnemonic !=
"fcmgt" && Mnemonic !=
"fcmle" &&
5296 Mnemonic !=
"fcmlt" && Mnemonic !=
"fcmne")
5297 return TokError(
"unexpected floating point literal");
5298 else if (IntVal != 0 || isNegative)
5299 return TokError(
"expected floating-point constant #0.0");
5307 const MCExpr *ImmVal;
5308 if (parseSymbolicImmVal(ImmVal))
5315 return parseOptionalShiftExtend(Tok);
5318 SMLoc Loc = getLoc();
5319 if (Mnemonic !=
"ldr")
5320 return TokError(
"unexpected token in operand");
5322 const MCExpr *SubExprVal;
5323 if (getParser().parseExpression(SubExprVal))
5326 if (Operands.
size() < 2 ||
5327 !
static_cast<AArch64Operand &
>(*Operands[1]).isScalarReg())
5328 return Error(Loc,
"Only valid when first operand is register");
5331 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5339 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
5344 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
5345 Operands[0] = AArch64Operand::CreateToken(
"movz", Loc, Ctx);
5346 Operands.
push_back(AArch64Operand::CreateImm(
5350 ShiftAmt,
true, S,
E, Ctx));
5353 APInt Simm = APInt(64, Imm << ShiftAmt);
5356 return Error(Loc,
"Immediate too large for register");
5359 const MCExpr *CPLoc =
5360 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
5361 Operands.
push_back(AArch64Operand::CreateImm(CPLoc, S,
E, Ctx));
5367bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
5368 const MCExpr *Expr =
nullptr;
5370 if (check(getParser().parseExpression(Expr), L,
"expected expression"))
5373 if (check(!
Value, L,
"expected constant expression"))
5375 Out =
Value->getValue();
5379bool AArch64AsmParser::parseComma() {
5387bool AArch64AsmParser::parseRegisterInRange(
unsigned &Out,
unsigned Base,
5391 if (check(parseRegister(
Reg, Start, End), getLoc(),
"expected register"))
5396 unsigned RangeEnd =
Last;
5397 if (
Base == AArch64::X0) {
5398 if (
Last == AArch64::FP) {
5399 RangeEnd = AArch64::X28;
5400 if (
Reg == AArch64::FP) {
5405 if (
Last == AArch64::LR) {
5406 RangeEnd = AArch64::X28;
5407 if (
Reg == AArch64::FP) {
5410 }
else if (
Reg == AArch64::LR) {
5418 Twine(
"expected register in range ") +
5426bool AArch64AsmParser::areEqualRegs(
const MCParsedAsmOperand &Op1,
5427 const MCParsedAsmOperand &Op2)
const {
5428 auto &AOp1 =
static_cast<const AArch64Operand&
>(Op1);
5429 auto &AOp2 =
static_cast<const AArch64Operand&
>(Op2);
5431 if (AOp1.isVectorList() && AOp2.isVectorList())
5432 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5433 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5434 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5436 if (!AOp1.isReg() || !AOp2.isReg())
5439 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5440 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5443 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5444 "Testing equality of non-scalar registers not supported");
5447 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5449 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5451 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5453 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5460bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &Info,
5461 StringRef Name, SMLoc NameLoc,
5463 Name = StringSwitch<StringRef>(
Name.lower())
5464 .Case(
"beq",
"b.eq")
5465 .Case(
"bne",
"b.ne")
5466 .Case(
"bhs",
"b.hs")
5467 .Case(
"bcs",
"b.cs")
5468 .Case(
"blo",
"b.lo")
5469 .Case(
"bcc",
"b.cc")
5470 .Case(
"bmi",
"b.mi")
5471 .Case(
"bpl",
"b.pl")
5472 .Case(
"bvs",
"b.vs")
5473 .Case(
"bvc",
"b.vc")
5474 .Case(
"bhi",
"b.hi")
5475 .Case(
"bls",
"b.ls")
5476 .Case(
"bge",
"b.ge")
5477 .Case(
"blt",
"b.lt")
5478 .Case(
"bgt",
"b.gt")
5479 .Case(
"ble",
"b.le")
5480 .Case(
"bal",
"b.al")
5481 .Case(
"bnv",
"b.nv")
5486 getTok().getIdentifier().lower() ==
".req") {
5487 parseDirectiveReq(Name, NameLoc);
5495 StringRef Head =
Name.slice(Start,
Next);
5499 if (Head ==
"ic" || Head ==
"dc" || Head ==
"at" || Head ==
"tlbi" ||
5500 Head ==
"cfp" || Head ==
"dvp" || Head ==
"cpp" || Head ==
"cosp" ||
5501 Head ==
"plbi" || Head ==
"gic" || Head ==
"gsb")
5502 return parseSysAlias(Head, NameLoc, Operands);
5506 return parseSyslAlias(Head, NameLoc, Operands);
5509 if (Head ==
"tlbip")
5510 return parseSyspAlias(Head, NameLoc, Operands);
5519 Head =
Name.slice(Start + 1,
Next);
5523 std::string Suggestion;
5526 std::string Msg =
"invalid condition code";
5527 if (!Suggestion.empty())
5528 Msg +=
", did you mean " + Suggestion +
"?";
5529 return Error(SuffixLoc, Msg);
5534 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc,
getContext()));
5544 Operands.
push_back(AArch64Operand::CreateToken(
5550 bool condCodeFourthOperand =
5551 (Head ==
"ccmp" || Head ==
"ccmn" || Head ==
"fccmp" ||
5552 Head ==
"fccmpe" || Head ==
"fcsel" || Head ==
"csel" ||
5553 Head ==
"csinc" || Head ==
"csinv" || Head ==
"csneg");
5561 bool condCodeSecondOperand = (Head ==
"cset" || Head ==
"csetm");
5562 bool condCodeThirdOperand =
5563 (Head ==
"cinc" || Head ==
"cinv" || Head ==
"cneg");
5571 if (parseOperand(Operands, (
N == 4 && condCodeFourthOperand) ||
5572 (
N == 3 && condCodeThirdOperand) ||
5573 (
N == 2 && condCodeSecondOperand),
5574 condCodeSecondOperand || condCodeThirdOperand)) {
5594 AArch64Operand::CreateToken(
"]", getLoc(),
getContext()));
5597 AArch64Operand::CreateToken(
"!", getLoc(),
getContext()));
5600 AArch64Operand::CreateToken(
"}", getLoc(),
getContext()));
5613 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5614 return (ZReg == ((
Reg - AArch64::B0) + AArch64::Z0)) ||
5615 (ZReg == ((
Reg - AArch64::H0) + AArch64::Z0)) ||
5616 (ZReg == ((
Reg - AArch64::S0) + AArch64::Z0)) ||
5617 (ZReg == ((
Reg - AArch64::D0) + AArch64::Z0)) ||
5618 (ZReg == ((
Reg - AArch64::Q0) + AArch64::Z0)) ||
5619 (ZReg == ((
Reg - AArch64::Z0) + AArch64::Z0));
5631bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5632 SmallVectorImpl<SMLoc> &Loc) {
5633 const MCRegisterInfo *RI =
getContext().getRegisterInfo();
5634 const MCInstrDesc &MCID = MII.get(Inst.
getOpcode());
5640 PrefixInfo
Prefix = NextPrefix;
5641 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.
TSFlags);
5652 return Error(IDLoc,
"instruction is unpredictable when following a"
5653 " movprfx, suggest replacing movprfx with mov");
5657 return Error(Loc[0],
"instruction is unpredictable when following a"
5658 " movprfx writing to a different destination");
5665 return Error(Loc[0],
"instruction is unpredictable when following a"
5666 " movprfx and destination also used as non-destructive"
5670 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5671 if (
Prefix.isPredicated()) {
5685 return Error(IDLoc,
"instruction is unpredictable when following a"
5686 " predicated movprfx, suggest using unpredicated movprfx");
5690 return Error(IDLoc,
"instruction is unpredictable when following a"
5691 " predicated movprfx using a different general predicate");
5695 return Error(IDLoc,
"instruction is unpredictable when following a"
5696 " predicated movprfx with a different element size");
5702 if (IsWindowsArm64EC) {
5708 if ((
Reg == AArch64::W13 ||
Reg == AArch64::X13) ||
5709 (
Reg == AArch64::W14 ||
Reg == AArch64::X14) ||
5710 (
Reg == AArch64::W23 ||
Reg == AArch64::X23) ||
5711 (
Reg == AArch64::W24 ||
Reg == AArch64::X24) ||
5712 (
Reg == AArch64::W28 ||
Reg == AArch64::X28) ||
5713 (
Reg >= AArch64::Q16 &&
Reg <= AArch64::Q31) ||
5714 (
Reg >= AArch64::D16 &&
Reg <= AArch64::D31) ||
5715 (
Reg >= AArch64::S16 &&
Reg <= AArch64::S31) ||
5716 (
Reg >= AArch64::H16 &&
Reg <= AArch64::H31) ||
5717 (
Reg >= AArch64::B16 &&
Reg <= AArch64::B31)) {
5719 " is disallowed on ARM64EC.");
5729 case AArch64::LDPSWpre:
5730 case AArch64::LDPWpost:
5731 case AArch64::LDPWpre:
5732 case AArch64::LDPXpost:
5733 case AArch64::LDPXpre: {
5738 return Error(Loc[0],
"unpredictable LDP instruction, writeback base "
5739 "is also a destination");
5741 return Error(Loc[1],
"unpredictable LDP instruction, writeback base "
5742 "is also a destination");
5745 case AArch64::LDR_ZA:
5746 case AArch64::STR_ZA: {
5749 return Error(Loc[1],
5750 "unpredictable instruction, immediate and offset mismatch.");
5753 case AArch64::LDPDi:
5754 case AArch64::LDPQi:
5755 case AArch64::LDPSi:
5756 case AArch64::LDPSWi:
5757 case AArch64::LDPWi:
5758 case AArch64::LDPXi: {
5762 return Error(Loc[1],
"unpredictable LDP instruction, Rt2==Rt");
5765 case AArch64::LDPDpost:
5766 case AArch64::LDPDpre:
5767 case AArch64::LDPQpost:
5768 case AArch64::LDPQpre:
5769 case AArch64::LDPSpost:
5770 case AArch64::LDPSpre:
5771 case AArch64::LDPSWpost: {
5775 return Error(Loc[1],
"unpredictable LDP instruction, Rt2==Rt");
5778 case AArch64::STPDpost:
5779 case AArch64::STPDpre:
5780 case AArch64::STPQpost:
5781 case AArch64::STPQpre:
5782 case AArch64::STPSpost:
5783 case AArch64::STPSpre:
5784 case AArch64::STPWpost:
5785 case AArch64::STPWpre:
5786 case AArch64::STPXpost:
5787 case AArch64::STPXpre: {
5792 return Error(Loc[0],
"unpredictable STP instruction, writeback base "
5793 "is also a source");
5795 return Error(Loc[1],
"unpredictable STP instruction, writeback base "
5796 "is also a source");
5799 case AArch64::LDRBBpre:
5800 case AArch64::LDRBpre:
5801 case AArch64::LDRHHpre:
5802 case AArch64::LDRHpre:
5803 case AArch64::LDRSBWpre:
5804 case AArch64::LDRSBXpre:
5805 case AArch64::LDRSHWpre:
5806 case AArch64::LDRSHXpre:
5807 case AArch64::LDRSWpre:
5808 case AArch64::LDRWpre:
5809 case AArch64::LDRXpre:
5810 case AArch64::LDRBBpost:
5811 case AArch64::LDRBpost:
5812 case AArch64::LDRHHpost:
5813 case AArch64::LDRHpost:
5814 case AArch64::LDRSBWpost:
5815 case AArch64::LDRSBXpost:
5816 case AArch64::LDRSHWpost:
5817 case AArch64::LDRSHXpost:
5818 case AArch64::LDRSWpost:
5819 case AArch64::LDRWpost:
5820 case AArch64::LDRXpost: {
5824 return Error(Loc[0],
"unpredictable LDR instruction, writeback base "
5825 "is also a source");
5828 case AArch64::STRBBpost:
5829 case AArch64::STRBpost:
5830 case AArch64::STRHHpost:
5831 case AArch64::STRHpost:
5832 case AArch64::STRWpost:
5833 case AArch64::STRXpost:
5834 case AArch64::STRBBpre:
5835 case AArch64::STRBpre:
5836 case AArch64::STRHHpre:
5837 case AArch64::STRHpre:
5838 case AArch64::STRWpre:
5839 case AArch64::STRXpre: {
5843 return Error(Loc[0],
"unpredictable STR instruction, writeback base "
5844 "is also a source");
5847 case AArch64::STXRB:
5848 case AArch64::STXRH:
5849 case AArch64::STXRW:
5850 case AArch64::STXRX:
5851 case AArch64::STLXRB:
5852 case AArch64::STLXRH:
5853 case AArch64::STLXRW:
5854 case AArch64::STLXRX: {
5860 return Error(Loc[0],
5861 "unpredictable STXR instruction, status is also a source");
5864 case AArch64::STXPW:
5865 case AArch64::STXPX:
5866 case AArch64::STLXPW:
5867 case AArch64::STLXPX: {
5874 return Error(Loc[0],
5875 "unpredictable STXP instruction, status is also a source");
5878 case AArch64::LDRABwriteback:
5879 case AArch64::LDRAAwriteback: {
5883 return Error(Loc[0],
5884 "unpredictable LDRA instruction, writeback base"
5885 " is also a destination");
5892 case AArch64::CPYFP:
5893 case AArch64::CPYFPWN:
5894 case AArch64::CPYFPRN:
5895 case AArch64::CPYFPN:
5896 case AArch64::CPYFPWT:
5897 case AArch64::CPYFPWTWN:
5898 case AArch64::CPYFPWTRN:
5899 case AArch64::CPYFPWTN:
5900 case AArch64::CPYFPRT:
5901 case AArch64::CPYFPRTWN:
5902 case AArch64::CPYFPRTRN:
5903 case AArch64::CPYFPRTN:
5904 case AArch64::CPYFPT:
5905 case AArch64::CPYFPTWN:
5906 case AArch64::CPYFPTRN:
5907 case AArch64::CPYFPTN:
5908 case AArch64::CPYFM:
5909 case AArch64::CPYFMWN:
5910 case AArch64::CPYFMRN:
5911 case AArch64::CPYFMN:
5912 case AArch64::CPYFMWT:
5913 case AArch64::CPYFMWTWN:
5914 case AArch64::CPYFMWTRN:
5915 case AArch64::CPYFMWTN:
5916 case AArch64::CPYFMRT:
5917 case AArch64::CPYFMRTWN:
5918 case AArch64::CPYFMRTRN:
5919 case AArch64::CPYFMRTN:
5920 case AArch64::CPYFMT:
5921 case AArch64::CPYFMTWN:
5922 case AArch64::CPYFMTRN:
5923 case AArch64::CPYFMTN:
5924 case AArch64::CPYFE:
5925 case AArch64::CPYFEWN:
5926 case AArch64::CPYFERN:
5927 case AArch64::CPYFEN:
5928 case AArch64::CPYFEWT:
5929 case AArch64::CPYFEWTWN:
5930 case AArch64::CPYFEWTRN:
5931 case AArch64::CPYFEWTN:
5932 case AArch64::CPYFERT:
5933 case AArch64::CPYFERTWN:
5934 case AArch64::CPYFERTRN:
5935 case AArch64::CPYFERTN:
5936 case AArch64::CPYFET:
5937 case AArch64::CPYFETWN:
5938 case AArch64::CPYFETRN:
5939 case AArch64::CPYFETN:
5941 case AArch64::CPYPWN:
5942 case AArch64::CPYPRN:
5943 case AArch64::CPYPN:
5944 case AArch64::CPYPWT:
5945 case AArch64::CPYPWTWN:
5946 case AArch64::CPYPWTRN:
5947 case AArch64::CPYPWTN:
5948 case AArch64::CPYPRT:
5949 case AArch64::CPYPRTWN:
5950 case AArch64::CPYPRTRN:
5951 case AArch64::CPYPRTN:
5952 case AArch64::CPYPT:
5953 case AArch64::CPYPTWN:
5954 case AArch64::CPYPTRN:
5955 case AArch64::CPYPTN:
5957 case AArch64::CPYMWN:
5958 case AArch64::CPYMRN:
5959 case AArch64::CPYMN:
5960 case AArch64::CPYMWT:
5961 case AArch64::CPYMWTWN:
5962 case AArch64::CPYMWTRN:
5963 case AArch64::CPYMWTN:
5964 case AArch64::CPYMRT:
5965 case AArch64::CPYMRTWN:
5966 case AArch64::CPYMRTRN:
5967 case AArch64::CPYMRTN:
5968 case AArch64::CPYMT:
5969 case AArch64::CPYMTWN:
5970 case AArch64::CPYMTRN:
5971 case AArch64::CPYMTN:
5973 case AArch64::CPYEWN:
5974 case AArch64::CPYERN:
5975 case AArch64::CPYEN:
5976 case AArch64::CPYEWT:
5977 case AArch64::CPYEWTWN:
5978 case AArch64::CPYEWTRN:
5979 case AArch64::CPYEWTN:
5980 case AArch64::CPYERT:
5981 case AArch64::CPYERTWN:
5982 case AArch64::CPYERTRN:
5983 case AArch64::CPYERTN:
5984 case AArch64::CPYET:
5985 case AArch64::CPYETWN:
5986 case AArch64::CPYETRN:
5987 case AArch64::CPYETN: {
5998 return Error(Loc[0],
"invalid CPY instruction, destination and source"
5999 " registers are the same");
6001 return Error(Loc[0],
"invalid CPY instruction, destination and size"
6002 " registers are the same");
6004 return Error(Loc[0],
"invalid CPY instruction, source and size"
6005 " registers are the same");
6009 case AArch64::SETPT:
6010 case AArch64::SETPN:
6011 case AArch64::SETPTN:
6013 case AArch64::SETMT:
6014 case AArch64::SETMN:
6015 case AArch64::SETMTN:
6017 case AArch64::SETET:
6018 case AArch64::SETEN:
6019 case AArch64::SETETN:
6020 case AArch64::SETGP:
6021 case AArch64::SETGPT:
6022 case AArch64::SETGPN:
6023 case AArch64::SETGPTN:
6024 case AArch64::SETGM:
6025 case AArch64::SETGMT:
6026 case AArch64::SETGMN:
6027 case AArch64::SETGMTN:
6028 case AArch64::MOPSSETGE:
6029 case AArch64::MOPSSETGET:
6030 case AArch64::MOPSSETGEN:
6031 case AArch64::MOPSSETGETN: {
6041 return Error(Loc[0],
"invalid SET instruction, destination and size"
6042 " registers are the same");
6044 return Error(Loc[0],
"invalid SET instruction, destination and source"
6045 " registers are the same");
6047 return Error(Loc[0],
"invalid SET instruction, source and size"
6048 " registers are the same");
6051 case AArch64::SETGOP:
6052 case AArch64::SETGOPT:
6053 case AArch64::SETGOPN:
6054 case AArch64::SETGOPTN:
6055 case AArch64::SETGOM:
6056 case AArch64::SETGOMT:
6057 case AArch64::SETGOMN:
6058 case AArch64::SETGOMTN:
6059 case AArch64::SETGOE:
6060 case AArch64::SETGOET:
6061 case AArch64::SETGOEN:
6062 case AArch64::SETGOETN: {
6071 return Error(Loc[0],
"invalid SET instruction, destination and size"
6072 " registers are the same");
6081 case AArch64::ADDSWri:
6082 case AArch64::ADDSXri:
6083 case AArch64::ADDWri:
6084 case AArch64::ADDXri:
6085 case AArch64::SUBSWri:
6086 case AArch64::SUBSXri:
6087 case AArch64::SUBWri:
6088 case AArch64::SUBXri: {
6096 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
6121 return Error(Loc.
back(),
"invalid immediate expression");
6134 unsigned VariantID = 0);
6136bool AArch64AsmParser::showMatchError(
SMLoc Loc,
unsigned ErrCode,
6140 case Match_InvalidTiedOperand: {
6141 auto &
Op =
static_cast<const AArch64Operand &
>(*Operands[
ErrorInfo]);
6142 if (
Op.isVectorList())
6143 return Error(
Loc,
"operand must match destination register list");
6145 assert(
Op.isReg() &&
"Unexpected operand type");
6146 switch (
Op.getRegEqualityTy()) {
6147 case RegConstraintEqualityTy::EqualsSubReg:
6148 return Error(
Loc,
"operand must be 64-bit form of destination register");
6149 case RegConstraintEqualityTy::EqualsSuperReg:
6150 return Error(
Loc,
"operand must be 32-bit form of destination register");
6151 case RegConstraintEqualityTy::EqualsReg:
6152 return Error(
Loc,
"operand must match destination register");
6156 case Match_MissingFeature:
6158 "instruction requires a CPU feature not currently enabled");
6159 case Match_InvalidOperand:
6160 return Error(Loc,
"invalid operand for instruction");
6161 case Match_InvalidSuffix:
6162 return Error(Loc,
"invalid type suffix for instruction");
6163 case Match_InvalidCondCode:
6164 return Error(Loc,
"expected AArch64 condition code");
6165 case Match_AddSubRegExtendSmall:
6167 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
6168 case Match_AddSubRegExtendLarge:
6170 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
6171 case Match_AddSubSecondSource:
6173 "expected compatible register, symbol or integer in range [0, 4095]");
6174 case Match_LogicalSecondSource:
6175 return Error(Loc,
"expected compatible register or logical immediate");
6176 case Match_InvalidMovImm32Shift:
6177 return Error(Loc,
"expected 'lsl' with optional integer 0 or 16");
6178 case Match_InvalidMovImm64Shift:
6179 return Error(Loc,
"expected 'lsl' with optional integer 0, 16, 32 or 48");
6180 case Match_AddSubRegShift32:
6182 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
6183 case Match_AddSubRegShift64:
6185 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
6186 case Match_InvalidFPImm:
6188 "expected compatible register or floating-point constant");
6189 case Match_InvalidMemoryIndexedSImm6:
6190 return Error(Loc,
"index must be an integer in range [-32, 31].");
6191 case Match_InvalidMemoryIndexedSImm5:
6192 return Error(Loc,
"index must be an integer in range [-16, 15].");
6193 case Match_InvalidMemoryIndexed1SImm4:
6194 return Error(Loc,
"index must be an integer in range [-8, 7].");
6195 case Match_InvalidMemoryIndexed2SImm4:
6196 return Error(Loc,
"index must be a multiple of 2 in range [-16, 14].");
6197 case Match_InvalidMemoryIndexed3SImm4:
6198 return Error(Loc,
"index must be a multiple of 3 in range [-24, 21].");
6199 case Match_InvalidMemoryIndexed4SImm4:
6200 return Error(Loc,
"index must be a multiple of 4 in range [-32, 28].");
6201 case Match_InvalidMemoryIndexed16SImm4:
6202 return Error(Loc,
"index must be a multiple of 16 in range [-128, 112].");
6203 case Match_InvalidMemoryIndexed32SImm4:
6204 return Error(Loc,
"index must be a multiple of 32 in range [-256, 224].");
6205 case Match_InvalidMemoryIndexed1SImm6:
6206 return Error(Loc,
"index must be an integer in range [-32, 31].");
6207 case Match_InvalidMemoryIndexedSImm8:
6208 return Error(Loc,
"index must be an integer in range [-128, 127].");
6209 case Match_InvalidMemoryIndexedSImm9:
6210 return Error(Loc,
"index must be an integer in range [-256, 255].");
6211 case Match_InvalidMemoryIndexed16SImm9:
6212 return Error(Loc,
"index must be a multiple of 16 in range [-4096, 4080].");
6213 case Match_InvalidMemoryIndexed8SImm10:
6214 return Error(Loc,
"index must be a multiple of 8 in range [-4096, 4088].");
6215 case Match_InvalidMemoryIndexed4SImm7:
6216 return Error(Loc,
"index must be a multiple of 4 in range [-256, 252].");
6217 case Match_InvalidMemoryIndexed8SImm7:
6218 return Error(Loc,
"index must be a multiple of 8 in range [-512, 504].");
6219 case Match_InvalidMemoryIndexed16SImm7:
6220 return Error(Loc,
"index must be a multiple of 16 in range [-1024, 1008].");
6221 case Match_InvalidMemoryIndexed8UImm5:
6222 return Error(Loc,
"index must be a multiple of 8 in range [0, 248].");
6223 case Match_InvalidMemoryIndexed8UImm3:
6224 return Error(Loc,
"index must be a multiple of 8 in range [0, 56].");
6225 case Match_InvalidMemoryIndexed4UImm5:
6226 return Error(Loc,
"index must be a multiple of 4 in range [0, 124].");
6227 case Match_InvalidMemoryIndexed2UImm5:
6228 return Error(Loc,
"index must be a multiple of 2 in range [0, 62].");
6229 case Match_InvalidMemoryIndexed8UImm6:
6230 return Error(Loc,
"index must be a multiple of 8 in range [0, 504].");
6231 case Match_InvalidMemoryIndexed16UImm6:
6232 return Error(Loc,
"index must be a multiple of 16 in range [0, 1008].");
6233 case Match_InvalidMemoryIndexed4UImm6:
6234 return Error(Loc,
"index must be a multiple of 4 in range [0, 252].");
6235 case Match_InvalidMemoryIndexed2UImm6:
6236 return Error(Loc,
"index must be a multiple of 2 in range [0, 126].");
6237 case Match_InvalidMemoryIndexed1UImm6:
6238 return Error(Loc,
"index must be in range [0, 63].");
6239 case Match_InvalidMemoryWExtend8:
6241 "expected 'uxtw' or 'sxtw' with optional shift of #0");
6242 case Match_InvalidMemoryWExtend16:
6244 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
6245 case Match_InvalidMemoryWExtend32:
6247 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
6248 case Match_InvalidMemoryWExtend64:
6250 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
6251 case Match_InvalidMemoryWExtend128:
6253 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
6254 case Match_InvalidMemoryXExtend8:
6256 "expected 'lsl' or 'sxtx' with optional shift of #0");
6257 case Match_InvalidMemoryXExtend16:
6259 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
6260 case Match_InvalidMemoryXExtend32:
6262 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
6263 case Match_InvalidMemoryXExtend64:
6265 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
6266 case Match_InvalidMemoryXExtend128:
6268 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
6269 case Match_InvalidMemoryIndexed1:
6270 return Error(Loc,
"index must be an integer in range [0, 4095].");
6271 case Match_InvalidMemoryIndexed2:
6272 return Error(Loc,
"index must be a multiple of 2 in range [0, 8190].");
6273 case Match_InvalidMemoryIndexed4:
6274 return Error(Loc,
"index must be a multiple of 4 in range [0, 16380].");
6275 case Match_InvalidMemoryIndexed8:
6276 return Error(Loc,
"index must be a multiple of 8 in range [0, 32760].");
6277 case Match_InvalidMemoryIndexed16:
6278 return Error(Loc,
"index must be a multiple of 16 in range [0, 65520].");
6279 case Match_InvalidImm0_0:
6280 return Error(Loc,
"immediate must be 0.");
6281 case Match_InvalidImm0_1:
6282 return Error(Loc,
"immediate must be an integer in range [0, 1].");
6283 case Match_InvalidImm0_3:
6284 return Error(Loc,
"immediate must be an integer in range [0, 3].");
6285 case Match_InvalidImm0_7:
6286 return Error(Loc,
"immediate must be an integer in range [0, 7].");
6287 case Match_InvalidImm0_15:
6288 return Error(Loc,
"immediate must be an integer in range [0, 15].");
6289 case Match_InvalidImm0_31:
6290 return Error(Loc,
"immediate must be an integer in range [0, 31].");
6291 case Match_InvalidImm0_63:
6292 return Error(Loc,
"immediate must be an integer in range [0, 63].");
6293 case Match_InvalidImm0_127:
6294 return Error(Loc,
"immediate must be an integer in range [0, 127].");
6295 case Match_InvalidImm0_255:
6296 return Error(Loc,
"immediate must be an integer in range [0, 255].");
6297 case Match_InvalidImm0_65535:
6298 return Error(Loc,
"immediate must be an integer in range [0, 65535].");
6299 case Match_InvalidImm1_8:
6300 return Error(Loc,
"immediate must be an integer in range [1, 8].");
6301 case Match_InvalidImm1_16:
6302 return Error(Loc,
"immediate must be an integer in range [1, 16].");
6303 case Match_InvalidImm1_32:
6304 return Error(Loc,
"immediate must be an integer in range [1, 32].");
6305 case Match_InvalidImm1_64:
6306 return Error(Loc,
"immediate must be an integer in range [1, 64].");
6307 case Match_InvalidImmM1_62:
6308 return Error(Loc,
"immediate must be an integer in range [-1, 62].");
6309 case Match_InvalidMemoryIndexedRange2UImm0:
6310 return Error(Loc,
"vector select offset must be the immediate range 0:1.");
6311 case Match_InvalidMemoryIndexedRange2UImm1:
6312 return Error(Loc,
"vector select offset must be an immediate range of the "
6313 "form <immf>:<imml>, where the first "
6314 "immediate is a multiple of 2 in the range [0, 2], and "
6315 "the second immediate is immf + 1.");
6316 case Match_InvalidMemoryIndexedRange2UImm2:
6317 case Match_InvalidMemoryIndexedRange2UImm3:
6320 "vector select offset must be an immediate range of the form "
6322 "where the first immediate is a multiple of 2 in the range [0, 6] or "
6324 "depending on the instruction, and the second immediate is immf + 1.");
6325 case Match_InvalidMemoryIndexedRange4UImm0:
6326 return Error(Loc,
"vector select offset must be the immediate range 0:3.");
6327 case Match_InvalidMemoryIndexedRange4UImm1:
6328 case Match_InvalidMemoryIndexedRange4UImm2:
6331 "vector select offset must be an immediate range of the form "
6333 "where the first immediate is a multiple of 4 in the range [0, 4] or "
6335 "depending on the instruction, and the second immediate is immf + 3.");
6336 case Match_InvalidSVEAddSubImm8:
6337 return Error(Loc,
"immediate must be an integer in range [0, 255]"
6338 " with a shift amount of 0");
6339 case Match_InvalidSVEAddSubImm16:
6340 case Match_InvalidSVEAddSubImm32:
6341 case Match_InvalidSVEAddSubImm64:
6342 return Error(Loc,
"immediate must be an integer in range [0, 255] or a "
6343 "multiple of 256 in range [256, 65280]");
6344 case Match_InvalidSVECpyImm8:
6345 return Error(Loc,
"immediate must be an integer in range [-128, 255]"
6346 " with a shift amount of 0");
6347 case Match_InvalidSVECpyImm16:
6348 return Error(Loc,
"immediate must be an integer in range [-128, 127] or a "
6349 "multiple of 256 in range [-32768, 65280]");
6350 case Match_InvalidSVECpyImm32:
6351 case Match_InvalidSVECpyImm64:
6352 return Error(Loc,
"immediate must be an integer in range [-128, 127] or a "
6353 "multiple of 256 in range [-32768, 32512]");
6354 case Match_InvalidIndexRange0_0:
6355 return Error(Loc,
"expected lane specifier '[0]'");
6356 case Match_InvalidIndexRange1_1:
6357 return Error(Loc,
"expected lane specifier '[1]'");
6358 case Match_InvalidIndexRange0_15:
6359 return Error(Loc,
"vector lane must be an integer in range [0, 15].");
6360 case Match_InvalidIndexRange0_7:
6361 return Error(Loc,
"vector lane must be an integer in range [0, 7].");
6362 case Match_InvalidIndexRange0_3:
6363 return Error(Loc,
"vector lane must be an integer in range [0, 3].");
6364 case Match_InvalidIndexRange0_1:
6365 return Error(Loc,
"vector lane must be an integer in range [0, 1].");
6366 case Match_InvalidSVEIndexRange0_63:
6367 return Error(Loc,
"vector lane must be an integer in range [0, 63].");
6368 case Match_InvalidSVEIndexRange0_31:
6369 return Error(Loc,
"vector lane must be an integer in range [0, 31].");
6370 case Match_InvalidSVEIndexRange0_15:
6371 return Error(Loc,
"vector lane must be an integer in range [0, 15].");
6372 case Match_InvalidSVEIndexRange0_7:
6373 return Error(Loc,
"vector lane must be an integer in range [0, 7].");
6374 case Match_InvalidSVEIndexRange0_3:
6375 return Error(Loc,
"vector lane must be an integer in range [0, 3].");
6376 case Match_InvalidLabel:
6377 return Error(Loc,
"expected label or encodable integer pc offset");
6379 return Error(Loc,
"expected readable system register");
6381 case Match_InvalidSVCR:
6382 return Error(Loc,
"expected writable system register or pstate");
6383 case Match_InvalidComplexRotationEven:
6384 return Error(Loc,
"complex rotation must be 0, 90, 180 or 270.");
6385 case Match_InvalidComplexRotationOdd:
6386 return Error(Loc,
"complex rotation must be 90 or 270.");
6387 case Match_MnemonicFail: {
6389 ((AArch64Operand &)*Operands[0]).
getToken(),
6390 ComputeAvailableFeatures(STI->getFeatureBits()));
6391 return Error(Loc,
"unrecognized instruction mnemonic" + Suggestion);
6393 case Match_InvalidGPR64shifted8:
6394 return Error(Loc,
"register must be x0..x30 or xzr, without shift");
6395 case Match_InvalidGPR64shifted16:
6396 return Error(Loc,
"register must be x0..x30 or xzr, with required shift 'lsl #1'");
6397 case Match_InvalidGPR64shifted32:
6398 return Error(Loc,
"register must be x0..x30 or xzr, with required shift 'lsl #2'");
6399 case Match_InvalidGPR64shifted64:
6400 return Error(Loc,
"register must be x0..x30 or xzr, with required shift 'lsl #3'");
6401 case Match_InvalidGPR64shifted128:
6403 Loc,
"register must be x0..x30 or xzr, with required shift 'lsl #4'");
6404 case Match_InvalidGPR64NoXZRshifted8:
6405 return Error(Loc,
"register must be x0..x30 without shift");
6406 case Match_InvalidGPR64NoXZRshifted16:
6407 return Error(Loc,
"register must be x0..x30 with required shift 'lsl #1'");
6408 case Match_InvalidGPR64NoXZRshifted32:
6409 return Error(Loc,
"register must be x0..x30 with required shift 'lsl #2'");
6410 case Match_InvalidGPR64NoXZRshifted64:
6411 return Error(Loc,
"register must be x0..x30 with required shift 'lsl #3'");
6412 case Match_InvalidGPR64NoXZRshifted128:
6413 return Error(Loc,
"register must be x0..x30 with required shift 'lsl #4'");
6414 case Match_InvalidZPR32UXTW8:
6415 case Match_InvalidZPR32SXTW8:
6416 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6417 case Match_InvalidZPR32UXTW16:
6418 case Match_InvalidZPR32SXTW16:
6419 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6420 case Match_InvalidZPR32UXTW32:
6421 case Match_InvalidZPR32SXTW32:
6422 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6423 case Match_InvalidZPR32UXTW64:
6424 case Match_InvalidZPR32SXTW64:
6425 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6426 case Match_InvalidZPR64UXTW8:
6427 case Match_InvalidZPR64SXTW8:
6428 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6429 case Match_InvalidZPR64UXTW16:
6430 case Match_InvalidZPR64SXTW16:
6431 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6432 case Match_InvalidZPR64UXTW32:
6433 case Match_InvalidZPR64SXTW32:
6434 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6435 case Match_InvalidZPR64UXTW64:
6436 case Match_InvalidZPR64SXTW64:
6437 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6438 case Match_InvalidZPR32LSL8:
6439 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s'");
6440 case Match_InvalidZPR32LSL16:
6441 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6442 case Match_InvalidZPR32LSL32:
6443 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6444 case Match_InvalidZPR32LSL64:
6445 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6446 case Match_InvalidZPR64LSL8:
6447 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d'");
6448 case Match_InvalidZPR64LSL16:
6449 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6450 case Match_InvalidZPR64LSL32:
6451 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6452 case Match_InvalidZPR64LSL64:
6453 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6454 case Match_InvalidZPR0:
6455 return Error(Loc,
"expected register without element width suffix");
6456 case Match_InvalidZPR8:
6457 case Match_InvalidZPR16:
6458 case Match_InvalidZPR32:
6459 case Match_InvalidZPR64:
6460 case Match_InvalidZPR128:
6461 return Error(Loc,
"invalid element width");
6462 case Match_InvalidZPR_3b8:
6463 return Error(Loc,
"Invalid restricted vector register, expected z0.b..z7.b");
6464 case Match_InvalidZPR_3b16:
6465 return Error(Loc,
"Invalid restricted vector register, expected z0.h..z7.h");
6466 case Match_InvalidZPR_3b32:
6467 return Error(Loc,
"Invalid restricted vector register, expected z0.s..z7.s");
6468 case Match_InvalidZPR_4b8:
6470 "Invalid restricted vector register, expected z0.b..z15.b");
6471 case Match_InvalidZPR_4b16:
6472 return Error(Loc,
"Invalid restricted vector register, expected z0.h..z15.h");
6473 case Match_InvalidZPR_4b32:
6474 return Error(Loc,
"Invalid restricted vector register, expected z0.s..z15.s");
6475 case Match_InvalidZPR_4b64:
6476 return Error(Loc,
"Invalid restricted vector register, expected z0.d..z15.d");
6477 case Match_InvalidZPRMul2_Lo8:
6478 return Error(Loc,
"Invalid restricted vector register, expected even "
6479 "register in z0.b..z14.b");
6480 case Match_InvalidZPRMul2_Hi8:
6481 return Error(Loc,
"Invalid restricted vector register, expected even "
6482 "register in z16.b..z30.b");
6483 case Match_InvalidZPRMul2_Lo16:
6484 return Error(Loc,
"Invalid restricted vector register, expected even "
6485 "register in z0.h..z14.h");
6486 case Match_InvalidZPRMul2_Hi16:
6487 return Error(Loc,
"Invalid restricted vector register, expected even "
6488 "register in z16.h..z30.h");
6489 case Match_InvalidZPRMul2_Lo32:
6490 return Error(Loc,
"Invalid restricted vector register, expected even "
6491 "register in z0.s..z14.s");
6492 case Match_InvalidZPRMul2_Hi32:
6493 return Error(Loc,
"Invalid restricted vector register, expected even "
6494 "register in z16.s..z30.s");
6495 case Match_InvalidZPRMul2_Lo64:
6496 return Error(Loc,
"Invalid restricted vector register, expected even "
6497 "register in z0.d..z14.d");
6498 case Match_InvalidZPRMul2_Hi64:
6499 return Error(Loc,
"Invalid restricted vector register, expected even "
6500 "register in z16.d..z30.d");
6501 case Match_InvalidZPR_K0:
6502 return Error(Loc,
"invalid restricted vector register, expected register "
6503 "in z20..z23 or z28..z31");
6504 case Match_InvalidSVEPattern:
6505 return Error(Loc,
"invalid predicate pattern");
6506 case Match_InvalidSVEPPRorPNRAnyReg:
6507 case Match_InvalidSVEPPRorPNRBReg:
6508 case Match_InvalidSVEPredicateAnyReg:
6509 case Match_InvalidSVEPredicateBReg:
6510 case Match_InvalidSVEPredicateHReg:
6511 case Match_InvalidSVEPredicateSReg:
6512 case Match_InvalidSVEPredicateDReg:
6513 return Error(Loc,
"invalid predicate register.");
6514 case Match_InvalidSVEPredicate3bAnyReg:
6515 return Error(Loc,
"invalid restricted predicate register, expected p0..p7 (without element suffix)");
6516 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6517 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6518 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6519 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6520 return Error(Loc,
"Invalid predicate register, expected PN in range "
6521 "pn8..pn15 with element suffix.");
6522 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6523 return Error(Loc,
"invalid restricted predicate-as-counter register "
6524 "expected pn8..pn15");
6525 case Match_InvalidSVEPNPredicateBReg:
6526 case Match_InvalidSVEPNPredicateHReg:
6527 case Match_InvalidSVEPNPredicateSReg:
6528 case Match_InvalidSVEPNPredicateDReg:
6529 return Error(Loc,
"Invalid predicate register, expected PN in range "
6530 "pn0..pn15 with element suffix.");
6531 case Match_InvalidSVEVecLenSpecifier:
6532 return Error(Loc,
"Invalid vector length specifier, expected VLx2 or VLx4");
6533 case Match_InvalidSVEPredicateListMul2x8:
6534 case Match_InvalidSVEPredicateListMul2x16:
6535 case Match_InvalidSVEPredicateListMul2x32:
6536 case Match_InvalidSVEPredicateListMul2x64:
6537 return Error(Loc,
"Invalid vector list, expected list with 2 consecutive "
6538 "predicate registers, where the first vector is a multiple of 2 "
6539 "and with correct element type");
6540 case Match_InvalidSVEExactFPImmOperandHalfOne:
6541 return Error(Loc,
"Invalid floating point constant, expected 0.5 or 1.0.");
6542 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6543 return Error(Loc,
"Invalid floating point constant, expected 0.5 or 2.0.");
6544 case Match_InvalidSVEExactFPImmOperandZeroOne:
6545 return Error(Loc,
"Invalid floating point constant, expected 0.0 or 1.0.");
6546 case Match_InvalidMatrixTileVectorH8:
6547 case Match_InvalidMatrixTileVectorV8:
6548 return Error(Loc,
"invalid matrix operand, expected za0h.b or za0v.b");
6549 case Match_InvalidMatrixTileVectorH16:
6550 case Match_InvalidMatrixTileVectorV16:
6552 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6553 case Match_InvalidMatrixTileVectorH32:
6554 case Match_InvalidMatrixTileVectorV32:
6556 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6557 case Match_InvalidMatrixTileVectorH64:
6558 case Match_InvalidMatrixTileVectorV64:
6560 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6561 case Match_InvalidMatrixTileVectorH128:
6562 case Match_InvalidMatrixTileVectorV128:
6564 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6565 case Match_InvalidMatrixTile16:
6566 return Error(Loc,
"invalid matrix operand, expected za[0-1].h");
6567 case Match_InvalidMatrixTile32:
6568 return Error(Loc,
"invalid matrix operand, expected za[0-3].s");
6569 case Match_InvalidMatrixTile64:
6570 return Error(Loc,
"invalid matrix operand, expected za[0-7].d");
6571 case Match_InvalidMatrix:
6572 return Error(Loc,
"invalid matrix operand, expected za");
6573 case Match_InvalidMatrix8:
6574 return Error(Loc,
"invalid matrix operand, expected suffix .b");
6575 case Match_InvalidMatrix16:
6576 return Error(Loc,
"invalid matrix operand, expected suffix .h");
6577 case Match_InvalidMatrix32:
6578 return Error(Loc,
"invalid matrix operand, expected suffix .s");
6579 case Match_InvalidMatrix64:
6580 return Error(Loc,
"invalid matrix operand, expected suffix .d");
6581 case Match_InvalidMatrixIndexGPR32_12_15:
6582 return Error(Loc,
"operand must be a register in range [w12, w15]");
6583 case Match_InvalidMatrixIndexGPR32_8_11:
6584 return Error(Loc,
"operand must be a register in range [w8, w11]");
6585 case Match_InvalidSVEVectorList2x8Mul2:
6586 case Match_InvalidSVEVectorList2x16Mul2:
6587 case Match_InvalidSVEVectorList2x32Mul2:
6588 case Match_InvalidSVEVectorList2x64Mul2:
6589 case Match_InvalidSVEVectorList2x128Mul2:
6590 return Error(Loc,
"Invalid vector list, expected list with 2 consecutive "
6591 "SVE vectors, where the first vector is a multiple of 2 "
6592 "and with matching element types");
6593 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6594 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6595 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6596 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6597 return Error(Loc,
"Invalid vector list, expected list with 2 consecutive "
6598 "SVE vectors in the range z0-z14, where the first vector "
6599 "is a multiple of 2 "
6600 "and with matching element types");
6601 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6602 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6603 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6604 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6606 "Invalid vector list, expected list with 2 consecutive "
6607 "SVE vectors in the range z16-z30, where the first vector "
6608 "is a multiple of 2 "
6609 "and with matching element types");
6610 case Match_InvalidSVEVectorList4x8Mul4:
6611 case Match_InvalidSVEVectorList4x16Mul4:
6612 case Match_InvalidSVEVectorList4x32Mul4:
6613 case Match_InvalidSVEVectorList4x64Mul4:
6614 case Match_InvalidSVEVectorList4x128Mul4:
6615 return Error(Loc,
"Invalid vector list, expected list with 4 consecutive "
6616 "SVE vectors, where the first vector is a multiple of 4 "
6617 "and with matching element types");
6618 case Match_InvalidLookupTable:
6619 return Error(Loc,
"Invalid lookup table, expected zt0");
6620 case Match_InvalidSVEVectorListStrided2x8:
6621 case Match_InvalidSVEVectorListStrided2x16:
6622 case Match_InvalidSVEVectorListStrided2x32:
6623 case Match_InvalidSVEVectorListStrided2x64:
6626 "Invalid vector list, expected list with each SVE vector in the list "
6627 "8 registers apart, and the first register in the range [z0, z7] or "
6628 "[z16, z23] and with correct element type");
6629 case Match_InvalidSVEVectorListStrided4x8:
6630 case Match_InvalidSVEVectorListStrided4x16:
6631 case Match_InvalidSVEVectorListStrided4x32:
6632 case Match_InvalidSVEVectorListStrided4x64:
6635 "Invalid vector list, expected list with each SVE vector in the list "
6636 "4 registers apart, and the first register in the range [z0, z3] or "
6637 "[z16, z19] and with correct element type");
6638 case Match_AddSubLSLImm3ShiftLarge:
6640 "expected 'lsl' with optional integer in range [0, 7]");
6648bool AArch64AsmParser::matchAndEmitInstruction(
SMLoc IDLoc,
unsigned &Opcode,
6652 bool MatchingInlineAsm) {
6653 assert(!Operands.
empty() &&
"Unexpected empty operand list!");
6654 AArch64Operand &
Op =
static_cast<AArch64Operand &
>(*Operands[0]);
6655 assert(
Op.isToken() &&
"Leading operand should always be a mnemonic!");
6658 unsigned NumOperands = Operands.
size();
6660 if (NumOperands == 4 && Tok ==
"lsl") {
6661 AArch64Operand &Op2 =
static_cast<AArch64Operand &
>(*Operands[2]);
6662 AArch64Operand &Op3 =
static_cast<AArch64Operand &
>(*Operands[3]);
6663 if (Op2.isScalarReg() && Op3.isImm()) {
6669 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].
contains(
6671 NewOp3Val = (32 - Op3Val) & 0x1f;
6672 NewOp4Val = 31 - Op3Val;
6674 NewOp3Val = (64 - Op3Val) & 0x3f;
6675 NewOp4Val = 63 - Op3Val;
6682 AArch64Operand::CreateToken(
"ubfm",
Op.getStartLoc(),
getContext());
6683 Operands.
push_back(AArch64Operand::CreateImm(
6684 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(),
getContext()));
6685 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6689 }
else if (NumOperands == 4 && Tok ==
"bfc") {
6691 AArch64Operand &Op1 =
static_cast<AArch64Operand &
>(*Operands[1]);
6692 AArch64Operand LSBOp =
static_cast<AArch64Operand &
>(*Operands[2]);
6693 AArch64Operand WidthOp =
static_cast<AArch64Operand &
>(*Operands[3]);
6695 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6699 if (LSBCE && WidthCE) {
6701 uint64_t Width = WidthCE->
getValue();
6703 uint64_t RegWidth = 0;
6704 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].
contains(
6710 if (LSB >= RegWidth)
6711 return Error(LSBOp.getStartLoc(),
6712 "expected integer in range [0, 31]");
6713 if (Width < 1 || Width > RegWidth)
6714 return Error(WidthOp.getStartLoc(),
6715 "expected integer in range [1, 32]");
6719 ImmR = (32 - LSB) & 0x1f;
6721 ImmR = (64 - LSB) & 0x3f;
6723 uint64_t ImmS = Width - 1;
6725 if (ImmR != 0 && ImmS >= ImmR)
6726 return Error(WidthOp.getStartLoc(),
6727 "requested insert overflows register");
6732 AArch64Operand::CreateToken(
"bfm",
Op.getStartLoc(),
getContext());
6733 Operands[2] = AArch64Operand::CreateReg(
6734 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6736 Operands[3] = AArch64Operand::CreateImm(
6737 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(),
getContext());
6739 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6743 }
else if (NumOperands == 5) {
6746 if (Tok ==
"bfi" || Tok ==
"sbfiz" || Tok ==
"ubfiz") {
6747 AArch64Operand &Op1 =
static_cast<AArch64Operand &
>(*Operands[1]);
6748 AArch64Operand &Op3 =
static_cast<AArch64Operand &
>(*Operands[3]);
6749 AArch64Operand &Op4 =
static_cast<AArch64Operand &
>(*Operands[4]);
6751 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6755 if (Op3CE && Op4CE) {
6756 uint64_t Op3Val = Op3CE->
getValue();
6757 uint64_t Op4Val = Op4CE->
getValue();
6759 uint64_t RegWidth = 0;
6760 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].
contains(
6766 if (Op3Val >= RegWidth)
6767 return Error(Op3.getStartLoc(),
6768 "expected integer in range [0, 31]");
6769 if (Op4Val < 1 || Op4Val > RegWidth)
6770 return Error(Op4.getStartLoc(),
6771 "expected integer in range [1, 32]");
6773 uint64_t NewOp3Val = 0;
6775 NewOp3Val = (32 - Op3Val) & 0x1f;
6777 NewOp3Val = (64 - Op3Val) & 0x3f;
6779 uint64_t NewOp4Val = Op4Val - 1;
6781 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6782 return Error(Op4.getStartLoc(),
6783 "requested insert overflows register");
6785 const MCExpr *NewOp3 =
6787 const MCExpr *NewOp4 =
6789 Operands[3] = AArch64Operand::CreateImm(
6790 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(),
getContext());
6791 Operands[4] = AArch64Operand::CreateImm(
6792 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(),
getContext());
6794 Operands[0] = AArch64Operand::CreateToken(
"bfm",
Op.getStartLoc(),
6796 else if (Tok ==
"sbfiz")
6797 Operands[0] = AArch64Operand::CreateToken(
"sbfm",
Op.getStartLoc(),
6799 else if (Tok ==
"ubfiz")
6800 Operands[0] = AArch64Operand::CreateToken(
"ubfm",
Op.getStartLoc(),
6809 }
else if (NumOperands == 5 &&
6810 (Tok ==
"bfxil" || Tok ==
"sbfx" || Tok ==
"ubfx")) {
6811 AArch64Operand &Op1 =
static_cast<AArch64Operand &
>(*Operands[1]);
6812 AArch64Operand &Op3 =
static_cast<AArch64Operand &
>(*Operands[3]);
6813 AArch64Operand &Op4 =
static_cast<AArch64Operand &
>(*Operands[4]);
6815 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6819 if (Op3CE && Op4CE) {
6820 uint64_t Op3Val = Op3CE->
getValue();
6821 uint64_t Op4Val = Op4CE->
getValue();
6823 uint64_t RegWidth = 0;
6824 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].
contains(
6830 if (Op3Val >= RegWidth)
6831 return Error(Op3.getStartLoc(),
6832 "expected integer in range [0, 31]");
6833 if (Op4Val < 1 || Op4Val > RegWidth)
6834 return Error(Op4.getStartLoc(),
6835 "expected integer in range [1, 32]");
6837 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6839 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6840 return Error(Op4.getStartLoc(),
6841 "requested extract overflows register");
6843 const MCExpr *NewOp4 =
6845 Operands[4] = AArch64Operand::CreateImm(
6846 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(),
getContext());
6848 Operands[0] = AArch64Operand::CreateToken(
"bfm",
Op.getStartLoc(),
6850 else if (Tok ==
"sbfx")
6851 Operands[0] = AArch64Operand::CreateToken(
"sbfm",
Op.getStartLoc(),
6853 else if (Tok ==
"ubfx")
6854 Operands[0] = AArch64Operand::CreateToken(
"ubfm",
Op.getStartLoc(),
6867 if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6868 NumOperands == 4 && Tok ==
"movi") {
6869 AArch64Operand &Op1 =
static_cast<AArch64Operand &
>(*Operands[1]);
6870 AArch64Operand &Op2 =
static_cast<AArch64Operand &
>(*Operands[2]);
6871 AArch64Operand &Op3 =
static_cast<AArch64Operand &
>(*Operands[3]);
6872 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6873 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6874 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6875 if (Suffix.
lower() ==
".2d" &&
6877 Warning(IDLoc,
"instruction movi.2d with immediate #0 may not function"
6878 " correctly on this CPU, converting to equivalent movi.16b");
6880 unsigned Idx = Op1.isToken() ? 1 : 2;
6882 AArch64Operand::CreateToken(
".16b", IDLoc,
getContext());
6890 if (NumOperands == 3 && (Tok ==
"sxtw" || Tok ==
"uxtw")) {
6893 AArch64Operand &
Op =
static_cast<AArch64Operand &
>(*Operands[2]);
6894 if (
Op.isScalarReg()) {
6896 Operands[2] = AArch64Operand::CreateReg(
Reg, RegKind::Scalar,
6897 Op.getStartLoc(),
Op.getEndLoc(),
6902 else if (NumOperands == 3 && (Tok ==
"sxtb" || Tok ==
"sxth")) {
6903 AArch64Operand &
Op =
static_cast<AArch64Operand &
>(*Operands[1]);
6904 if (
Op.isScalarReg() &&
6905 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6909 AArch64Operand &
Op =
static_cast<AArch64Operand &
>(*Operands[2]);
6910 if (
Op.isScalarReg()) {
6912 Operands[2] = AArch64Operand::CreateReg(
Reg, RegKind::Scalar,
6919 else if (NumOperands == 3 && (Tok ==
"uxtb" || Tok ==
"uxth")) {
6920 AArch64Operand &
Op =
static_cast<AArch64Operand &
>(*Operands[1]);
6921 if (
Op.isScalarReg() &&
6922 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6926 AArch64Operand &
Op =
static_cast<AArch64Operand &
>(*Operands[1]);
6927 if (
Op.isScalarReg()) {
6929 Operands[1] = AArch64Operand::CreateReg(
Reg, RegKind::Scalar,
6937 FeatureBitset MissingFeatures;
6940 unsigned MatchResult =
6941 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6942 MatchingInlineAsm, 1);
6946 if (MatchResult != Match_Success) {
6949 auto ShortFormNEONErrorInfo = ErrorInfo;
6950 auto ShortFormNEONMatchResult = MatchResult;
6951 auto ShortFormNEONMissingFeatures = MissingFeatures;
6954 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6955 MatchingInlineAsm, 0);
6960 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6961 Operands.
size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6962 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6963 MatchResult = ShortFormNEONMatchResult;
6964 ErrorInfo = ShortFormNEONErrorInfo;
6965 MissingFeatures = ShortFormNEONMissingFeatures;
6969 switch (MatchResult) {
6970 case Match_Success: {
6973 NumOperands = Operands.
size();
6974 for (
unsigned i = 1; i < NumOperands; ++i)
6975 OperandLocs.
push_back(Operands[i]->getStartLoc());
6976 if (validateInstruction(Inst, IDLoc, OperandLocs))
6983 case Match_MissingFeature: {
6984 assert(MissingFeatures.
any() &&
"Unknown missing feature!");
6987 std::string Msg =
"instruction requires:";
6988 for (
unsigned i = 0, e = MissingFeatures.
size(); i != e; ++i) {
6989 if (MissingFeatures[i]) {
6994 return Error(IDLoc, Msg);
6996 case Match_MnemonicFail:
6997 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
6998 case Match_InvalidOperand: {
6999 SMLoc ErrorLoc = IDLoc;
7001 if (ErrorInfo != ~0ULL) {
7002 if (ErrorInfo >= Operands.
size())
7003 return Error(IDLoc,
"too few operands for instruction",
7004 SMRange(IDLoc, getTok().getLoc()));
7006 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7007 if (ErrorLoc == SMLoc())
7012 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
7013 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
7014 MatchResult = Match_InvalidSuffix;
7016 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
7018 case Match_InvalidTiedOperand:
7019 case Match_InvalidMemoryIndexed1:
7020 case Match_InvalidMemoryIndexed2:
7021 case Match_InvalidMemoryIndexed4:
7022 case Match_InvalidMemoryIndexed8:
7023 case Match_InvalidMemoryIndexed16:
7024 case Match_InvalidCondCode:
7025 case Match_AddSubLSLImm3ShiftLarge:
7026 case Match_AddSubRegExtendSmall:
7027 case Match_AddSubRegExtendLarge:
7028 case Match_AddSubSecondSource:
7029 case Match_LogicalSecondSource:
7030 case Match_AddSubRegShift32:
7031 case Match_AddSubRegShift64:
7032 case Match_InvalidMovImm32Shift:
7033 case Match_InvalidMovImm64Shift:
7034 case Match_InvalidFPImm:
7035 case Match_InvalidMemoryWExtend8:
7036 case Match_InvalidMemoryWExtend16:
7037 case Match_InvalidMemoryWExtend32:
7038 case Match_InvalidMemoryWExtend64:
7039 case Match_InvalidMemoryWExtend128:
7040 case Match_InvalidMemoryXExtend8:
7041 case Match_InvalidMemoryXExtend16:
7042 case Match_InvalidMemoryXExtend32:
7043 case Match_InvalidMemoryXExtend64:
7044 case Match_InvalidMemoryXExtend128:
7045 case Match_InvalidMemoryIndexed1SImm4:
7046 case Match_InvalidMemoryIndexed2SImm4:
7047 case Match_InvalidMemoryIndexed3SImm4:
7048 case Match_InvalidMemoryIndexed4SImm4:
7049 case Match_InvalidMemoryIndexed1SImm6:
7050 case Match_InvalidMemoryIndexed16SImm4:
7051 case Match_InvalidMemoryIndexed32SImm4:
7052 case Match_InvalidMemoryIndexed4SImm7:
7053 case Match_InvalidMemoryIndexed8SImm7:
7054 case Match_InvalidMemoryIndexed16SImm7:
7055 case Match_InvalidMemoryIndexed8UImm5:
7056 case Match_InvalidMemoryIndexed8UImm3:
7057 case Match_InvalidMemoryIndexed4UImm5:
7058 case Match_InvalidMemoryIndexed2UImm5:
7059 case Match_InvalidMemoryIndexed1UImm6:
7060 case Match_InvalidMemoryIndexed2UImm6:
7061 case Match_InvalidMemoryIndexed4UImm6:
7062 case Match_InvalidMemoryIndexed8UImm6:
7063 case Match_InvalidMemoryIndexed16UImm6:
7064 case Match_InvalidMemoryIndexedSImm6:
7065 case Match_InvalidMemoryIndexedSImm5:
7066 case Match_InvalidMemoryIndexedSImm8:
7067 case Match_InvalidMemoryIndexedSImm9:
7068 case Match_InvalidMemoryIndexed16SImm9:
7069 case Match_InvalidMemoryIndexed8SImm10:
7070 case Match_InvalidImm0_0:
7071 case Match_InvalidImm0_1:
7072 case Match_InvalidImm0_3:
7073 case Match_InvalidImm0_7:
7074 case Match_InvalidImm0_15:
7075 case Match_InvalidImm0_31:
7076 case Match_InvalidImm0_63:
7077 case Match_InvalidImm0_127:
7078 case Match_InvalidImm0_255:
7079 case Match_InvalidImm0_65535:
7080 case Match_InvalidImm1_8:
7081 case Match_InvalidImm1_16:
7082 case Match_InvalidImm1_32:
7083 case Match_InvalidImm1_64:
7084 case Match_InvalidImmM1_62:
7085 case Match_InvalidMemoryIndexedRange2UImm0:
7086 case Match_InvalidMemoryIndexedRange2UImm1:
7087 case Match_InvalidMemoryIndexedRange2UImm2:
7088 case Match_InvalidMemoryIndexedRange2UImm3:
7089 case Match_InvalidMemoryIndexedRange4UImm0:
7090 case Match_InvalidMemoryIndexedRange4UImm1:
7091 case Match_InvalidMemoryIndexedRange4UImm2:
7092 case Match_InvalidSVEAddSubImm8:
7093 case Match_InvalidSVEAddSubImm16:
7094 case Match_InvalidSVEAddSubImm32:
7095 case Match_InvalidSVEAddSubImm64:
7096 case Match_InvalidSVECpyImm8:
7097 case Match_InvalidSVECpyImm16:
7098 case Match_InvalidSVECpyImm32:
7099 case Match_InvalidSVECpyImm64:
7100 case Match_InvalidIndexRange0_0:
7101 case Match_InvalidIndexRange1_1:
7102 case Match_InvalidIndexRange0_15:
7103 case Match_InvalidIndexRange0_7:
7104 case Match_InvalidIndexRange0_3:
7105 case Match_InvalidIndexRange0_1:
7106 case Match_InvalidSVEIndexRange0_63:
7107 case Match_InvalidSVEIndexRange0_31:
7108 case Match_InvalidSVEIndexRange0_15:
7109 case Match_InvalidSVEIndexRange0_7:
7110 case Match_InvalidSVEIndexRange0_3:
7111 case Match_InvalidLabel:
7112 case Match_InvalidComplexRotationEven:
7113 case Match_InvalidComplexRotationOdd:
7114 case Match_InvalidGPR64shifted8:
7115 case Match_InvalidGPR64shifted16:
7116 case Match_InvalidGPR64shifted32:
7117 case Match_InvalidGPR64shifted64:
7118 case Match_InvalidGPR64shifted128:
7119 case Match_InvalidGPR64NoXZRshifted8:
7120 case Match_InvalidGPR64NoXZRshifted16:
7121 case Match_InvalidGPR64NoXZRshifted32:
7122 case Match_InvalidGPR64NoXZRshifted64:
7123 case Match_InvalidGPR64NoXZRshifted128:
7124 case Match_InvalidZPR32UXTW8:
7125 case Match_InvalidZPR32UXTW16:
7126 case Match_InvalidZPR32UXTW32:
7127 case Match_InvalidZPR32UXTW64:
7128 case Match_InvalidZPR32SXTW8:
7129 case Match_InvalidZPR32SXTW16:
7130 case Match_InvalidZPR32SXTW32:
7131 case Match_InvalidZPR32SXTW64:
7132 case Match_InvalidZPR64UXTW8:
7133 case Match_InvalidZPR64SXTW8:
7134 case Match_InvalidZPR64UXTW16:
7135 case Match_InvalidZPR64SXTW16:
7136 case Match_InvalidZPR64UXTW32:
7137 case Match_InvalidZPR64SXTW32:
7138 case Match_InvalidZPR64UXTW64:
7139 case Match_InvalidZPR64SXTW64:
7140 case Match_InvalidZPR32LSL8:
7141 case Match_InvalidZPR32LSL16:
7142 case Match_InvalidZPR32LSL32:
7143 case Match_InvalidZPR32LSL64:
7144 case Match_InvalidZPR64LSL8:
7145 case Match_InvalidZPR64LSL16:
7146 case Match_InvalidZPR64LSL32:
7147 case Match_InvalidZPR64LSL64:
7148 case Match_InvalidZPR0:
7149 case Match_InvalidZPR8:
7150 case Match_InvalidZPR16:
7151 case Match_InvalidZPR32:
7152 case Match_InvalidZPR64:
7153 case Match_InvalidZPR128:
7154 case Match_InvalidZPR_3b8:
7155 case Match_InvalidZPR_3b16:
7156 case Match_InvalidZPR_3b32:
7157 case Match_InvalidZPR_4b8:
7158 case Match_InvalidZPR_4b16:
7159 case Match_InvalidZPR_4b32:
7160 case Match_InvalidZPR_4b64:
7161 case Match_InvalidSVEPPRorPNRAnyReg:
7162 case Match_InvalidSVEPPRorPNRBReg:
7163 case Match_InvalidSVEPredicateAnyReg:
7164 case Match_InvalidSVEPattern:
7165 case Match_InvalidSVEVecLenSpecifier:
7166 case Match_InvalidSVEPredicateBReg:
7167 case Match_InvalidSVEPredicateHReg:
7168 case Match_InvalidSVEPredicateSReg:
7169 case Match_InvalidSVEPredicateDReg:
7170 case Match_InvalidSVEPredicate3bAnyReg:
7171 case Match_InvalidSVEPNPredicateB_p8to15Reg:
7172 case Match_InvalidSVEPNPredicateH_p8to15Reg:
7173 case Match_InvalidSVEPNPredicateS_p8to15Reg:
7174 case Match_InvalidSVEPNPredicateD_p8to15Reg:
7175 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
7176 case Match_InvalidSVEPNPredicateBReg:
7177 case Match_InvalidSVEPNPredicateHReg:
7178 case Match_InvalidSVEPNPredicateSReg:
7179 case Match_InvalidSVEPNPredicateDReg:
7180 case Match_InvalidSVEPredicateListMul2x8:
7181 case Match_InvalidSVEPredicateListMul2x16:
7182 case Match_InvalidSVEPredicateListMul2x32:
7183 case Match_InvalidSVEPredicateListMul2x64:
7184 case Match_InvalidSVEExactFPImmOperandHalfOne:
7185 case Match_InvalidSVEExactFPImmOperandHalfTwo:
7186 case Match_InvalidSVEExactFPImmOperandZeroOne:
7187 case Match_InvalidMatrixTile16:
7188 case Match_InvalidMatrixTile32:
7189 case Match_InvalidMatrixTile64:
7190 case Match_InvalidMatrix:
7191 case Match_InvalidMatrix8:
7192 case Match_InvalidMatrix16:
7193 case Match_InvalidMatrix32:
7194 case Match_InvalidMatrix64:
7195 case Match_InvalidMatrixTileVectorH8:
7196 case Match_InvalidMatrixTileVectorH16:
7197 case Match_InvalidMatrixTileVectorH32:
7198 case Match_InvalidMatrixTileVectorH64:
7199 case Match_InvalidMatrixTileVectorH128:
7200 case Match_InvalidMatrixTileVectorV8:
7201 case Match_InvalidMatrixTileVectorV16:
7202 case Match_InvalidMatrixTileVectorV32:
7203 case Match_InvalidMatrixTileVectorV64:
7204 case Match_InvalidMatrixTileVectorV128:
7205 case Match_InvalidSVCR:
7206 case Match_InvalidMatrixIndexGPR32_12_15:
7207 case Match_InvalidMatrixIndexGPR32_8_11:
7208 case Match_InvalidLookupTable:
7209 case Match_InvalidZPRMul2_Lo8:
7210 case Match_InvalidZPRMul2_Hi8:
7211 case Match_InvalidZPRMul2_Lo16:
7212 case Match_InvalidZPRMul2_Hi16:
7213 case Match_InvalidZPRMul2_Lo32:
7214 case Match_InvalidZPRMul2_Hi32:
7215 case Match_InvalidZPRMul2_Lo64:
7216 case Match_InvalidZPRMul2_Hi64:
7217 case Match_InvalidZPR_K0:
7218 case Match_InvalidSVEVectorList2x8Mul2:
7219 case Match_InvalidSVEVectorList2x16Mul2:
7220 case Match_InvalidSVEVectorList2x32Mul2:
7221 case Match_InvalidSVEVectorList2x64Mul2:
7222 case Match_InvalidSVEVectorList2x128Mul2:
7223 case Match_InvalidSVEVectorList4x8Mul4:
7224 case Match_InvalidSVEVectorList4x16Mul4:
7225 case Match_InvalidSVEVectorList4x32Mul4:
7226 case Match_InvalidSVEVectorList4x64Mul4:
7227 case Match_InvalidSVEVectorList4x128Mul4:
7228 case Match_InvalidSVEVectorList2x8Mul2_Lo:
7229 case Match_InvalidSVEVectorList2x16Mul2_Lo:
7230 case Match_InvalidSVEVectorList2x32Mul2_Lo:
7231 case Match_InvalidSVEVectorList2x64Mul2_Lo:
7232 case Match_InvalidSVEVectorList2x8Mul2_Hi:
7233 case Match_InvalidSVEVectorList2x16Mul2_Hi:
7234 case Match_InvalidSVEVectorList2x32Mul2_Hi:
7235 case Match_InvalidSVEVectorList2x64Mul2_Hi:
7236 case Match_InvalidSVEVectorListStrided2x8:
7237 case Match_InvalidSVEVectorListStrided2x16:
7238 case Match_InvalidSVEVectorListStrided2x32:
7239 case Match_InvalidSVEVectorListStrided2x64:
7240 case Match_InvalidSVEVectorListStrided4x8:
7241 case Match_InvalidSVEVectorListStrided4x16:
7242 case Match_InvalidSVEVectorListStrided4x32:
7243 case Match_InvalidSVEVectorListStrided4x64:
7246 if (ErrorInfo >= Operands.
size())
7247 return Error(IDLoc,
"too few operands for instruction", SMRange(IDLoc, (*Operands.
back()).getEndLoc()));
7250 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7251 if (ErrorLoc == SMLoc())
7253 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
7261bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
7268 SMLoc Loc = DirectiveID.
getLoc();
7269 if (IDVal ==
".arch")
7270 parseDirectiveArch(Loc);
7271 else if (IDVal ==
".cpu")
7272 parseDirectiveCPU(Loc);
7273 else if (IDVal ==
".tlsdesccall")
7274 parseDirectiveTLSDescCall(Loc);
7275 else if (IDVal ==
".ltorg" || IDVal ==
".pool")
7276 parseDirectiveLtorg(Loc);
7277 else if (IDVal ==
".unreq")
7278 parseDirectiveUnreq(Loc);
7279 else if (IDVal ==
".inst")
7280 parseDirectiveInst(Loc);
7281 else if (IDVal ==
".cfi_negate_ra_state")
7282 parseDirectiveCFINegateRAState();
7283 else if (IDVal ==
".cfi_negate_ra_state_with_pc")
7284 parseDirectiveCFINegateRAStateWithPC();
7285 else if (IDVal ==
".cfi_b_key_frame")
7286 parseDirectiveCFIBKeyFrame();
7287 else if (IDVal ==
".cfi_mte_tagged_frame")
7288 parseDirectiveCFIMTETaggedFrame();
7289 else if (IDVal ==
".arch_extension")
7290 parseDirectiveArchExtension(Loc);
7291 else if (IDVal ==
".variant_pcs")
7292 parseDirectiveVariantPCS(Loc);
7295 parseDirectiveLOH(IDVal, Loc);
7298 }
else if (IsCOFF) {
7299 if (IDVal ==
".seh_stackalloc")
7300 parseDirectiveSEHAllocStack(Loc);
7301 else if (IDVal ==
".seh_endprologue")
7302 parseDirectiveSEHPrologEnd(Loc);
7303 else if (IDVal ==
".seh_save_r19r20_x")
7304 parseDirectiveSEHSaveR19R20X(Loc);
7305 else if (IDVal ==
".seh_save_fplr")
7306 parseDirectiveSEHSaveFPLR(Loc);
7307 else if (IDVal ==
".seh_save_fplr_x")
7308 parseDirectiveSEHSaveFPLRX(Loc);
7309 else if (IDVal ==
".seh_save_reg")
7310 parseDirectiveSEHSaveReg(Loc);
7311 else if (IDVal ==
".seh_save_reg_x")
7312 parseDirectiveSEHSaveRegX(Loc);
7313 else if (IDVal ==
".seh_save_regp")
7314 parseDirectiveSEHSaveRegP(Loc);
7315 else if (IDVal ==
".seh_save_regp_x")
7316 parseDirectiveSEHSaveRegPX(Loc);
7317 else if (IDVal ==
".seh_save_lrpair")
7318 parseDirectiveSEHSaveLRPair(Loc);
7319 else if (IDVal ==
".seh_save_freg")
7320 parseDirectiveSEHSaveFReg(Loc);
7321 else if (IDVal ==
".seh_save_freg_x")
7322 parseDirectiveSEHSaveFRegX(Loc);
7323 else if (IDVal ==
".seh_save_fregp")
7324 parseDirectiveSEHSaveFRegP(Loc);
7325 else if (IDVal ==
".seh_save_fregp_x")
7326 parseDirectiveSEHSaveFRegPX(Loc);
7327 else if (IDVal ==
".seh_set_fp")
7328 parseDirectiveSEHSetFP(Loc);
7329 else if (IDVal ==
".seh_add_fp")
7330 parseDirectiveSEHAddFP(Loc);
7331 else if (IDVal ==
".seh_nop")
7332 parseDirectiveSEHNop(Loc);
7333 else if (IDVal ==
".seh_save_next")
7334 parseDirectiveSEHSaveNext(Loc);
7335 else if (IDVal ==
".seh_startepilogue")
7336 parseDirectiveSEHEpilogStart(Loc);
7337 else if (IDVal ==
".seh_endepilogue")
7338 parseDirectiveSEHEpilogEnd(Loc);
7339 else if (IDVal ==
".seh_trap_frame")
7340 parseDirectiveSEHTrapFrame(Loc);
7341 else if (IDVal ==
".seh_pushframe")
7342 parseDirectiveSEHMachineFrame(Loc);
7343 else if (IDVal ==
".seh_context")
7344 parseDirectiveSEHContext(Loc);
7345 else if (IDVal ==
".seh_ec_context")
7346 parseDirectiveSEHECContext(Loc);
7347 else if (IDVal ==
".seh_clear_unwound_to_call")
7348 parseDirectiveSEHClearUnwoundToCall(Loc);
7349 else if (IDVal ==
".seh_pac_sign_lr")
7350 parseDirectiveSEHPACSignLR(Loc);
7351 else if (IDVal ==
".seh_save_any_reg")
7352 parseDirectiveSEHSaveAnyReg(Loc,
false,
false);
7353 else if (IDVal ==
".seh_save_any_reg_p")
7354 parseDirectiveSEHSaveAnyReg(Loc,
true,
false);
7355 else if (IDVal ==
".seh_save_any_reg_x")
7356 parseDirectiveSEHSaveAnyReg(Loc,
false,
true);
7357 else if (IDVal ==
".seh_save_any_reg_px")
7358 parseDirectiveSEHSaveAnyReg(Loc,
true,
true);
7359 else if (IDVal ==
".seh_allocz")
7360 parseDirectiveSEHAllocZ(Loc);
7361 else if (IDVal ==
".seh_save_zreg")
7362 parseDirectiveSEHSaveZReg(Loc);
7363 else if (IDVal ==
".seh_save_preg")
7364 parseDirectiveSEHSavePReg(Loc);
7368 if (IDVal ==
".aeabi_subsection")
7369 parseDirectiveAeabiSubSectionHeader(Loc);
7370 else if (IDVal ==
".aeabi_attribute")
7371 parseDirectiveAeabiAArch64Attr(Loc);
7384 if (!NoCrypto && Crypto) {
7387 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7388 ArchInfo == AArch64::ARMV8_3A) {
7392 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7393 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7394 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7395 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7396 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7397 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
7403 }
else if (NoCrypto) {
7406 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7407 ArchInfo == AArch64::ARMV8_3A) {
7408 RequestedExtensions.
push_back(
"nosha2");
7411 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7412 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7413 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7414 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7415 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7416 ArchInfo == AArch64::ARMV9_4A) {
7418 RequestedExtensions.
push_back(
"nosha3");
7419 RequestedExtensions.
push_back(
"nosha2");
7431bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
7432 SMLoc CurLoc = getLoc();
7434 StringRef
Name = getParser().parseStringToEndOfStatement().trim();
7435 StringRef Arch, ExtensionString;
7436 std::tie(Arch, ExtensionString) =
Name.split(
'+');
7440 return Error(CurLoc,
"unknown arch name");
7446 std::vector<StringRef> AArch64Features;
7450 MCSubtargetInfo &STI = copySTI();
7451 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
7453 join(ArchFeatures.begin(), ArchFeatures.end(),
","));
7456 if (!ExtensionString.
empty())
7457 ExtensionString.
split(RequestedExtensions,
'+');
7462 for (
auto Name : RequestedExtensions) {
7466 bool EnableFeature = !
Name.consume_front_insensitive(
"no");
7473 return Error(CurLoc,
"unsupported architectural extension: " + Name);
7481 FeatureBitset Features = ComputeAvailableFeatures(STI.
getFeatureBits());
7482 setAvailableFeatures(Features);
7484 getTargetStreamer().emitDirectiveArch(Name);
7490bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7491 SMLoc ExtLoc = getLoc();
7493 StringRef FullName = getParser().parseStringToEndOfStatement().trim();
7498 bool EnableFeature =
true;
7499 StringRef
Name = FullName;
7500 if (
Name.starts_with_insensitive(
"no")) {
7501 EnableFeature =
false;
7510 return Error(ExtLoc,
"unsupported architectural extension: " + Name);
7512 MCSubtargetInfo &STI = copySTI();
7517 FeatureBitset Features = ComputeAvailableFeatures(STI.
getFeatureBits());
7518 setAvailableFeatures(Features);
7520 getTargetStreamer().emitDirectiveArchExtension(FullName);
7526bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7527 SMLoc CurLoc = getLoc();
7529 StringRef CPU, ExtensionString;
7530 std::tie(CPU, ExtensionString) =
7531 getParser().parseStringToEndOfStatement().
trim().
split(
'+');
7537 if (!ExtensionString.
empty())
7538 ExtensionString.
split(RequestedExtensions,
'+');
7542 Error(CurLoc,
"unknown CPU name");
7547 MCSubtargetInfo &STI = copySTI();
7551 for (
auto Name : RequestedExtensions) {
7555 bool EnableFeature = !
Name.consume_front_insensitive(
"no");
7562 return Error(CurLoc,
"unsupported architectural extension: " + Name);
7570 FeatureBitset Features = ComputeAvailableFeatures(STI.
getFeatureBits());
7571 setAvailableFeatures(Features);
7577bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7579 return Error(Loc,
"expected expression following '.inst' directive");
7581 auto parseOp = [&]() ->
bool {
7583 const MCExpr *Expr =
nullptr;
7584 if (check(getParser().parseExpression(Expr), L,
"expected expression"))
7587 if (check(!
Value, L,
"expected constant expression"))
7589 getTargetStreamer().emitInst(
Value->getValue());
7593 return parseMany(parseOp);
7598bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7600 if (check(getParser().parseIdentifier(Name), L,
"expected symbol") ||
7612 getParser().getStreamer().emitInstruction(Inst, getSTI());
7618bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7622 return TokError(
"expected an identifier or a number in directive");
7625 int64_t
Id = getTok().getIntVal();
7627 return TokError(
"invalid numeric identifier in directive");
7630 StringRef
Name = getTok().getIdentifier();
7636 return TokError(
"invalid identifier in directive");
7644 assert(NbArgs != -1 &&
"Invalid number of arguments");
7647 for (
int Idx = 0; Idx < NbArgs; ++Idx) {
7649 if (getParser().parseIdentifier(Name))
7650 return TokError(
"expected identifier in directive");
7653 if (Idx + 1 == NbArgs)
7661 getStreamer().emitLOHDirective(Kind, Args);
7667bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7670 getTargetStreamer().emitCurrentConstantPool();
7676bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7678 SMLoc SRegLoc = getLoc();
7679 RegKind RegisterKind = RegKind::Scalar;
7681 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7685 RegisterKind = RegKind::NeonVector;
7686 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7692 return Error(SRegLoc,
"vector register without type specifier expected");
7697 RegisterKind = RegKind::SVEDataVector;
7699 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7705 return Error(SRegLoc,
7706 "sve vector register without type specifier expected");
7711 RegisterKind = RegKind::SVEPredicateVector;
7712 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7718 return Error(SRegLoc,
7719 "sve predicate register without type specifier expected");
7723 return Error(SRegLoc,
"register name or alias expected");
7729 auto pair = std::make_pair(RegisterKind, RegNum);
7730 if (RegisterReqs.
insert(std::make_pair(Name, pair)).first->second != pair)
7731 Warning(L,
"ignoring redefinition of register alias '" + Name +
"'");
7738bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7740 return TokError(
"unexpected input in .unreq directive.");
7741 RegisterReqs.
erase(getTok().getIdentifier().lower());
7746bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7749 getStreamer().emitCFINegateRAState();
7753bool AArch64AsmParser::parseDirectiveCFINegateRAStateWithPC() {
7756 getStreamer().emitCFINegateRAStateWithPC();
7762bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7765 getStreamer().emitCFIBKeyFrame();
7771bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7774 getStreamer().emitCFIMTETaggedFrame();
7780bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7782 if (getParser().parseIdentifier(Name))
7783 return TokError(
"expected symbol name");
7786 getTargetStreamer().emitDirectiveVariantPCS(
7793bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7795 if (parseImmExpr(
Size))
7797 getTargetStreamer().emitARM64WinCFIAllocStack(
Size);
7803bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7804 getTargetStreamer().emitARM64WinCFIPrologEnd();
7810bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7812 if (parseImmExpr(
Offset))
7814 getTargetStreamer().emitARM64WinCFISaveR19R20X(
Offset);
7820bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7822 if (parseImmExpr(
Offset))
7824 getTargetStreamer().emitARM64WinCFISaveFPLR(
Offset);
7830bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7832 if (parseImmExpr(
Offset))
7834 getTargetStreamer().emitARM64WinCFISaveFPLRX(
Offset);
7840bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7843 if (parseRegisterInRange(
Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7844 parseComma() || parseImmExpr(
Offset))
7846 getTargetStreamer().emitARM64WinCFISaveReg(
Reg,
Offset);
7852bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7855 if (parseRegisterInRange(
Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7856 parseComma() || parseImmExpr(
Offset))
7858 getTargetStreamer().emitARM64WinCFISaveRegX(
Reg,
Offset);
7864bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7867 if (parseRegisterInRange(
Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7868 parseComma() || parseImmExpr(
Offset))
7870 getTargetStreamer().emitARM64WinCFISaveRegP(
Reg,
Offset);
7876bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7879 if (parseRegisterInRange(
Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7880 parseComma() || parseImmExpr(
Offset))
7882 getTargetStreamer().emitARM64WinCFISaveRegPX(
Reg,
Offset);
7888bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7892 if (parseRegisterInRange(
Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7893 parseComma() || parseImmExpr(
Offset))
7895 if (check(((
Reg - 19) % 2 != 0), L,
7896 "expected register with even offset from x19"))
7898 getTargetStreamer().emitARM64WinCFISaveLRPair(
Reg,
Offset);
7904bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7907 if (parseRegisterInRange(
Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7908 parseComma() || parseImmExpr(
Offset))
7910 getTargetStreamer().emitARM64WinCFISaveFReg(
Reg,
Offset);
7916bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7919 if (parseRegisterInRange(
Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7920 parseComma() || parseImmExpr(
Offset))
7922 getTargetStreamer().emitARM64WinCFISaveFRegX(
Reg,
Offset);
7928bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7931 if (parseRegisterInRange(
Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7932 parseComma() || parseImmExpr(
Offset))
7934 getTargetStreamer().emitARM64WinCFISaveFRegP(
Reg,
Offset);
7940bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7943 if (parseRegisterInRange(
Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7944 parseComma() || parseImmExpr(
Offset))
7946 getTargetStreamer().emitARM64WinCFISaveFRegPX(
Reg,
Offset);
7952bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7953 getTargetStreamer().emitARM64WinCFISetFP();
7959bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7961 if (parseImmExpr(
Size))
7963 getTargetStreamer().emitARM64WinCFIAddFP(
Size);
7969bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7970 getTargetStreamer().emitARM64WinCFINop();
7976bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7977 getTargetStreamer().emitARM64WinCFISaveNext();
7983bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7984 getTargetStreamer().emitARM64WinCFIEpilogStart();
7990bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
7991 getTargetStreamer().emitARM64WinCFIEpilogEnd();
7997bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
7998 getTargetStreamer().emitARM64WinCFITrapFrame();
8004bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
8005 getTargetStreamer().emitARM64WinCFIMachineFrame();
8011bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
8012 getTargetStreamer().emitARM64WinCFIContext();
8018bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
8019 getTargetStreamer().emitARM64WinCFIECContext();
8025bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
8026 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
8032bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
8033 getTargetStreamer().emitARM64WinCFIPACSignLR();
8042bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L,
bool Paired,
8047 if (check(parseRegister(
Reg, Start, End), getLoc(),
"expected register") ||
8048 parseComma() || parseImmExpr(
Offset))
8051 if (
Reg == AArch64::FP ||
Reg == AArch64::LR ||
8052 (
Reg >= AArch64::X0 &&
Reg <= AArch64::X28)) {
8053 if (
Offset < 0 ||
Offset % (Paired || Writeback ? 16 : 8))
8054 return Error(L,
"invalid save_any_reg offset");
8055 unsigned EncodedReg;
8056 if (
Reg == AArch64::FP)
8058 else if (
Reg == AArch64::LR)
8061 EncodedReg =
Reg - AArch64::X0;
8063 if (
Reg == AArch64::LR)
8064 return Error(Start,
"lr cannot be paired with another register");
8066 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg,
Offset);
8068 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg,
Offset);
8071 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg,
Offset);
8073 getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg,
Offset);
8075 }
else if (
Reg >= AArch64::D0 &&
Reg <= AArch64::D31) {
8076 unsigned EncodedReg =
Reg - AArch64::D0;
8077 if (
Offset < 0 ||
Offset % (Paired || Writeback ? 16 : 8))
8078 return Error(L,
"invalid save_any_reg offset");
8080 if (
Reg == AArch64::D31)
8081 return Error(Start,
"d31 cannot be paired with another register");
8083 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg,
Offset);
8085 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg,
Offset);
8088 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg,
Offset);
8090 getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg,
Offset);
8092 }
else if (
Reg >= AArch64::Q0 &&
Reg <= AArch64::Q31) {
8093 unsigned EncodedReg =
Reg - AArch64::Q0;
8095 return Error(L,
"invalid save_any_reg offset");
8097 if (
Reg == AArch64::Q31)
8098 return Error(Start,
"q31 cannot be paired with another register");
8100 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg,
Offset);
8102 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg,
Offset);
8105 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg,
Offset);
8107 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg,
Offset);
8110 return Error(Start,
"save_any_reg register must be x, q or d register");
8117bool AArch64AsmParser::parseDirectiveSEHAllocZ(SMLoc L) {
8119 if (parseImmExpr(
Offset))
8121 getTargetStreamer().emitARM64WinCFIAllocZ(
Offset);
8127bool AArch64AsmParser::parseDirectiveSEHSaveZReg(SMLoc L) {
8132 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8135 if (check(RegNum < AArch64::Z8 || RegNum > AArch64::Z23, L,
8136 "expected register in range z8 to z23"))
8138 if (parseComma() || parseImmExpr(
Offset))
8140 getTargetStreamer().emitARM64WinCFISaveZReg(RegNum - AArch64::Z0,
Offset);
8146bool AArch64AsmParser::parseDirectiveSEHSavePReg(SMLoc L) {
8151 tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
8154 if (check(RegNum < AArch64::P4 || RegNum > AArch64::P15, L,
8155 "expected register in range p4 to p15"))
8157 if (parseComma() || parseImmExpr(
Offset))
8159 getTargetStreamer().emitARM64WinCFISavePReg(RegNum - AArch64::P0,
Offset);
8163bool AArch64AsmParser::parseDirectiveAeabiSubSectionHeader(SMLoc L) {
8169 MCAsmParser &Parser = getParser();
8172 StringRef SubsectionName;
8183 std::unique_ptr<MCELFStreamer::AttributeSubSection> SubsectionExists =
8184 getTargetStreamer().getAttributesSubsectionByName(SubsectionName);
8189 if (SubsectionExists) {
8190 getTargetStreamer().emitAttributesSubsection(
8193 SubsectionExists->IsOptional),
8195 SubsectionExists->ParameterType));
8201 "Could not switch to subsection '" + SubsectionName +
8202 "' using subsection name, subsection has not been defined");
8225 if (SubsectionExists) {
8226 if (IsOptional != SubsectionExists->IsOptional) {
8228 "optionality mismatch! subsection '" + SubsectionName +
8229 "' already exists with optionality defined as '" +
8231 SubsectionExists->IsOptional) +
8239 "optionality parameter not found, expected required|optional");
8246 "aeabi_feature_and_bits must be marked as optional");
8253 "aeabi_pauthabi must be marked as required");
8273 if (SubsectionExists) {
8274 if (
Type != SubsectionExists->ParameterType) {
8276 "type mismatch! subsection '" + SubsectionName +
8277 "' already exists with type defined as '" +
8279 SubsectionExists->ParameterType) +
8287 "type parameter not found, expected uleb128|ntbs");
8295 SubsectionName +
" must be marked as ULEB128");
8304 "attributes subsection header directive");
8308 getTargetStreamer().emitAttributesSubsection(SubsectionName, IsOptional,
Type);
8313bool AArch64AsmParser::parseDirectiveAeabiAArch64Attr(SMLoc L) {
8317 MCAsmParser &Parser = getParser();
8319 std::unique_ptr<MCELFStreamer::AttributeSubSection> ActiveSubsection =
8320 getTargetStreamer().getActiveAttributesSubsection();
8321 if (
nullptr == ActiveSubsection) {
8323 "no active subsection, build attribute can not be added");
8326 StringRef ActiveSubsectionName = ActiveSubsection->VendorName;
8327 unsigned ActiveSubsectionType = ActiveSubsection->ParameterType;
8335 ActiveSubsectionName)
8338 StringRef TagStr =
"";
8341 Tag = getTok().getIntVal();
8344 switch (ActiveSubsectionID) {
8349 "' \nExcept for public subsections, "
8350 "tags have to be an unsigned int.");
8357 TagStr +
"' for subsection '" +
8358 ActiveSubsectionName +
"'");
8366 TagStr +
"' for subsection '" +
8367 ActiveSubsectionName +
"'");
8385 unsigned ValueInt = unsigned(-1);
8386 std::string ValueStr =
"";
8391 "active subsection type is NTBS (string), found ULEB128 (unsigned)");
8394 ValueInt = getTok().getIntVal();
8399 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8407 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8418 if (0 != ValueInt && 1 != ValueInt) {
8420 "unknown AArch64 build attributes Value for Tag '" + TagStr +
8421 "' options are 0|1");
8430 "unexpected token for AArch64 build attributes tag and value "
8431 "attribute directive");
8435 if (
unsigned(-1) != ValueInt) {
8436 getTargetStreamer().emitAttribute(ActiveSubsectionName,
Tag, ValueInt,
"");
8438 if (
"" != ValueStr) {
8439 getTargetStreamer().emitAttribute(ActiveSubsectionName,
Tag,
unsigned(-1),
8445bool AArch64AsmParser::parseExprWithSpecifier(
const MCExpr *&Res, SMLoc &
E) {
8446 SMLoc Loc = getLoc();
8448 return TokError(
"expected '%' relocation specifier");
8449 StringRef
Identifier = getParser().getTok().getIdentifier();
8452 return TokError(
"invalid relocation specifier");
8458 const MCExpr *SubExpr;
8459 if (getParser().parseParenExpression(SubExpr,
E))
8466bool AArch64AsmParser::parseDataExpr(
const MCExpr *&Res) {
8469 return parseExprWithSpecifier(Res, EndLoc);
8471 if (getParser().parseExpression(Res))
8473 MCAsmParser &Parser = getParser();
8477 return Error(getLoc(),
"expected relocation specifier");
8480 SMLoc Loc = getLoc();
8482 if (Identifier ==
"auth")
8483 return parseAuthExpr(Res, EndLoc);
8487 if (Identifier ==
"got")
8491 return Error(Loc,
"invalid relocation specifier");
8496 return Error(Loc,
"@ specifier only allowed after a symbol");
8499 std::optional<MCBinaryExpr::Opcode> Opcode;
8507 if (getParser().parsePrimaryExpr(Term, EndLoc,
nullptr))
8518bool AArch64AsmParser::parseAuthExpr(
const MCExpr *&Res, SMLoc &EndLoc) {
8519 MCAsmParser &Parser = getParser();
8521 AsmToken Tok = Parser.
getTok();
8528 return TokError(
"expected key name");
8533 return TokError(
"invalid key '" + KeyStr +
"'");
8540 return TokError(
"expected integer discriminator");
8544 return TokError(
"integer discriminator " + Twine(Discriminator) +
8545 " out of range [0, 0xFFFF]");
8548 bool UseAddressDiversity =
false;
8553 return TokError(
"expected 'addr'");
8554 UseAddressDiversity =
true;
8563 UseAddressDiversity, Ctx, Res->
getLoc());
8567bool AArch64AsmParser::classifySymbolRef(
const MCExpr *Expr,
8576 ELFSpec = AE->getSpecifier();
8577 Expr = AE->getSubExpr();
8617#define GET_REGISTER_MATCHER
8618#define GET_SUBTARGET_FEATURE_NAME
8619#define GET_MATCHER_IMPLEMENTATION
8620#define GET_MNEMONIC_SPELL_CHECKER
8621#include "AArch64GenAsmMatcher.inc"
8627 AArch64Operand &
Op =
static_cast<AArch64Operand &
>(AsmOp);
8629 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
8631 return Match_InvalidOperand;
8634 return Match_InvalidOperand;
8635 if (CE->getValue() == ExpectedVal)
8636 return Match_Success;
8637 return Match_InvalidOperand;
8642 return Match_InvalidOperand;
8648 if (
Op.isTokenEqual(
"za"))
8649 return Match_Success;
8650 return Match_InvalidOperand;
8656#define MATCH_HASH(N) \
8657 case MCK__HASH_##N: \
8658 return MatchesOpImmediate(N);
8684#define MATCH_HASH_MINUS(N) \
8685 case MCK__HASH__MINUS_##N: \
8686 return MatchesOpImmediate(-N);
8690#undef MATCH_HASH_MINUS
8694ParseStatus AArch64AsmParser::tryParseGPRSeqPair(
OperandVector &Operands) {
8699 return Error(S,
"expected register");
8701 MCRegister FirstReg;
8702 ParseStatus Res = tryParseScalarRegister(FirstReg);
8704 return Error(S,
"expected first even register of a consecutive same-size "
8705 "even/odd register pair");
8707 const MCRegisterClass &WRegClass =
8708 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
8709 const MCRegisterClass &XRegClass =
8710 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
8712 bool isXReg = XRegClass.
contains(FirstReg),
8713 isWReg = WRegClass.
contains(FirstReg);
8714 if (!isXReg && !isWReg)
8715 return Error(S,
"expected first even register of a consecutive same-size "
8716 "even/odd register pair");
8718 const MCRegisterInfo *RI =
getContext().getRegisterInfo();
8721 if (FirstEncoding & 0x1)
8722 return Error(S,
"expected first even register of a consecutive same-size "
8723 "even/odd register pair");
8726 return Error(getLoc(),
"expected comma");
8731 MCRegister SecondReg;
8732 Res = tryParseScalarRegister(SecondReg);
8734 return Error(
E,
"expected second odd register of a consecutive same-size "
8735 "even/odd register pair");
8738 (isXReg && !XRegClass.
contains(SecondReg)) ||
8739 (isWReg && !WRegClass.
contains(SecondReg)))
8740 return Error(
E,
"expected second odd register of a consecutive same-size "
8741 "even/odd register pair");
8746 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
8749 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
8752 Operands.
push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
8758template <
bool ParseShiftExtend,
bool ParseSuffix>
8759ParseStatus AArch64AsmParser::tryParseSVEDataVector(
OperandVector &Operands) {
8760 const SMLoc S = getLoc();
8766 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8771 if (ParseSuffix &&
Kind.empty())
8778 unsigned ElementWidth = KindRes->second;
8782 Operands.
push_back(AArch64Operand::CreateVectorReg(
8783 RegNum, RegKind::SVEDataVector, ElementWidth, S, S,
getContext()));
8785 ParseStatus Res = tryParseVectorIndex(Operands);
8796 Res = tryParseOptionalShiftExtend(ExtOpnd);
8800 auto Ext =
static_cast<AArch64Operand *
>(ExtOpnd.
back().
get());
8801 Operands.
push_back(AArch64Operand::CreateVectorReg(
8802 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
8803 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
8804 Ext->hasShiftExtendAmount()));
8809ParseStatus AArch64AsmParser::tryParseSVEPattern(
OperandVector &Operands) {
8810 MCAsmParser &Parser = getParser();
8812 SMLoc
SS = getLoc();
8813 const AsmToken &TokE = getTok();
8824 const MCExpr *ImmVal;
8831 return TokError(
"invalid operand for instruction");
8836 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.
getString());
8841 Pattern = Pat->Encoding;
8842 assert(Pattern >= 0 && Pattern < 32);
8853AArch64AsmParser::tryParseSVEVecLenSpecifier(
OperandVector &Operands) {
8855 SMLoc
SS = getLoc();
8856 const AsmToken &TokE = getTok();
8858 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8864 Pattern = Pat->Encoding;
8865 assert(Pattern >= 0 && Pattern <= 1 &&
"Pattern does not exist");
8874ParseStatus AArch64AsmParser::tryParseGPR64x8(
OperandVector &Operands) {
8875 SMLoc
SS = getLoc();
8878 if (!tryParseScalarRegister(XReg).isSuccess())
8884 XReg, AArch64::x8sub_0,
8885 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8888 "expected an even-numbered x-register in the range [x0,x22]");
8891 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
8895ParseStatus AArch64AsmParser::tryParseImmRange(
OperandVector &Operands) {
8905 if (getParser().parseExpression(ImmF))
8915 SMLoc
E = getTok().getLoc();
8917 if (getParser().parseExpression(ImmL))
8924 AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S,
E,
getContext()));
8929ParseStatus AArch64AsmParser::tryParseAdjImm0_63(
OperandVector &Operands) {
8939 if (getParser().parseExpression(Ex))
8949 static_assert(Adj == 1 || Adj == -1,
"Unsafe immediate adjustment");
8956 Operands.
push_back(AArch64Operand::CreateImm(
static bool isGPR64(unsigned Reg, unsigned SubReg, const MachineRegisterInfo *MRI)
#define MATCH_HASH_MINUS(N)
static unsigned matchSVEDataVectorRegName(StringRef Name)
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind)
static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo, SmallVector< StringRef, 4 > &RequestedExtensions)
static unsigned matchSVEPredicateAsCounterRegName(StringRef Name)
static MCRegister MatchRegisterName(StringRef Name)
static bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg)
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser()
Force static initialization.
static const char * getSubtargetFeatureName(uint64_t Val)
static unsigned MatchNeonVectorRegName(StringRef Name)
}
static std::optional< std::pair< int, int > > parseVectorKind(StringRef Suffix, RegKind VectorKind)
Returns an optional pair of (elements, element-width) if Suffix is a valid vector kind.
static unsigned matchMatrixRegName(StringRef Name)
static bool isMovPrfxable(unsigned TSFlags)
static unsigned matchMatrixTileListRegName(StringRef Name)
static std::string AArch64MnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static SMLoc incrementLoc(SMLoc L, int Offset)
static const struct Extension ExtensionMap[]
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
static unsigned matchSVEPredicateVectorRegName(StringRef Name)
static AArch64CC::CondCode parseCondCode(ArrayRef< MachineOperand > Cond)
static SDValue getCondCode(SelectionDAG &DAG, AArch64CC::CondCode CC)
Like SelectionDAG::getCondCode(), but for AArch64 condition codes.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_EXTERNAL_VISIBILITY
Value * getPointer(Value *Ptr)
loop data Loop Data Prefetch
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
const SmallVectorImpl< MachineOperand > & Cond
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallSet class.
This file defines the SmallVector class.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
APInt bitcastToAPInt() const
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
int64_t getSExtValue() const
Get sign extended value.
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
void UnLex(AsmToken const &Token)
LLVM_ABI SMLoc getLoc() const
int64_t getIntVal() const
bool isNot(TokenKind K) const
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
bool is(TokenKind K) const
LLVM_ABI SMLoc getEndLoc() const
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Base class for user error types.
Container class for subtarget features.
constexpr size_t size() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
void printExpr(raw_ostream &, const MCExpr &) const
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
static LLVM_ABI const MCBinaryExpr * create(Opcode Op, const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
const MCRegisterInfo * getRegisterInfo() const
LLVM_ABI bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm) const
Try to evaluate the expression to a relocatable value, i.e.
unsigned getNumOperands() const
unsigned getOpcode() const
void addOperand(const MCOperand Op)
void setOpcode(unsigned Op)
const MCOperand & getOperand(unsigned i) const
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
static MCOperand createExpr(const MCExpr *Val)
static MCOperand createReg(MCRegister Reg)
static MCOperand createImm(int64_t Val)
MCRegister getReg() const
Returns the register number.
const MCExpr * getExpr() const
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual MCRegister getReg() const =0
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
constexpr unsigned id() const
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Streaming machine code generation interface.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
MCTargetStreamer * getTargetStreamer()
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
const FeatureBitset & ClearFeatureBitsTransitively(const FeatureBitset &FB)
const FeatureBitset & SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
VariantKind getKind() const
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual bool areEqualRegs(const MCParsedAsmOperand &Op1, const MCParsedAsmOperand &Op2) const
Returns whether two operands are registers and are equal.
const MCSymbol * getAddSym() const
int64_t getConstant() const
uint32_t getSpecifier() const
const MCSymbol * getSubSym() const
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
constexpr bool isNoMatch() const
constexpr unsigned id() const
Represents a location in source code.
static SMLoc getFromPointer(const char *Ptr)
constexpr const char * getPointer() const
void insert_range(Range &&R)
bool contains(const T &V) const
Check if the SmallSet contains the given element.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
iterator find(StringRef Key)
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
static constexpr size_t npos
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr bool empty() const
empty - Check if the string is empty.
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
LLVM_ABI std::string upper() const
Convert the given ASCII string to uppercase.
constexpr size_t size() const
size - Get the string size.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
StringRef take_back(size_t N=1) const
Return a StringRef equal to 'this' but with only the last N elements remaining.
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
LLVM_ABI std::string lower() const
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SubsectionType getTypeID(StringRef Type)
StringRef getVendorName(unsigned const Vendor)
StringRef getOptionalStr(unsigned Optional)
@ FEATURE_AND_BITS_TAG_NOT_FOUND
VendorID
AArch64 build attributes vendors IDs (a.k.a subsection name)
StringRef getSubsectionTypeUnknownError()
SubsectionOptional getOptionalID(StringRef Optional)
StringRef getSubsectionOptionalUnknownError()
FeatureAndBitsTags getFeatureAndBitsTagsID(StringRef FeatureAndBitsTag)
VendorID getVendorID(StringRef const Vendor)
PauthABITags getPauthABITagsID(StringRef PauthABITag)
StringRef getTypeStr(unsigned Type)
static CondCode getInvertedCondCode(CondCode Code)
const PHint * lookupPHintByName(StringRef)
uint32_t parseGenericRegister(StringRef Name)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static bool isSVEAddSubImm(int64_t Imm)
Returns true if Imm is valid for ADD/SUB.
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static float getFPImmFloat(unsigned Imm)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
static bool isSVECpyImm(int64_t Imm)
Returns true if Imm is valid for CPY/DUP.
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
Specifier parsePercentSpecifierName(StringRef)
LLVM_ABI const ArchInfo * parseArch(StringRef Arch)
LLVM_ABI const ArchInfo * getArchForCpu(StringRef CPU)
@ DestructiveInstTypeMask
LLVM_ABI bool getExtensionFeatures(const AArch64::ExtensionBitset &Extensions, std::vector< StringRef > &Features)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool isPredicated(const MCInst &MI, const MCInstrInfo *MCII)
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
float getFPImm(unsigned Imm)
@ CE
Windows NT (Windows on ARM)
NodeAddr< CodeNode * > Code
Context & getContext() const
This is an optimization pass for GlobalISel generic memory operations.
static std::optional< AArch64PACKey::ID > AArch64StringToPACKeyID(StringRef Name)
Return numeric key ID for 2-letter identifier string.
bool errorToBool(Error Err)
Helper for converting an Error to a bool.
FunctionAddr VTableAddr Value
static int MCLOHNameToId(StringRef Name)
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
static bool isMem(const MachineInstr &MI, unsigned Op)
LLVM_ABI std::pair< StringRef, StringRef > getToken(StringRef Source, StringRef Delimiters=" \t\n\v\f\r")
getToken - This function extracts one token from source, ignoring any leading characters that appear ...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Target & getTheAArch64beTarget()
static StringRef MCLOHDirectiveName()
std::string utostr(uint64_t X, bool isNeg=false)
static bool isValidMCLOHType(unsigned Kind)
Target & getTheAArch64leTarget()
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
auto dyn_cast_or_null(const Y &Val)
SmallVectorImpl< std::unique_ptr< MCParsedAsmOperand > > OperandVector
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Target & getTheAArch64_32Target()
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FunctionAddr VTableAddr Count
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Target & getTheARM64_32Target()
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
static int MCLOHIdToNbArgs(MCLOHType Kind)
std::string join(IteratorT Begin, IteratorT End, StringRef Separator)
Joins the strings in the range [Begin, End), adding Separator between the elements.
static MCRegister getXRegFromWReg(MCRegister Reg)
MCLOHType
Linker Optimization Hint Type.
FunctionAddr VTableAddr Next
Target & getTheARM64Target()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static MCRegister getWRegFromXReg(MCRegister Reg)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
const FeatureBitset Features
AArch64::ExtensionBitset DefaultExts
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
bool haveFeatures(FeatureBitset ActiveFeatures) const
FeatureBitset getRequiredFeatures() const
bool haveFeatures(FeatureBitset ActiveFeatures) const