LLVM  14.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
15 #include "AArch64InstrInfo.h"
16 #include "Utils/AArch64BaseInfo.h"
17 #include "llvm/ADT/APFloat.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/StringExtras.h"
24 #include "llvm/ADT/StringMap.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/StringSwitch.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/MC/MCContext.h"
29 #include "llvm/MC/MCExpr.h"
30 #include "llvm/MC/MCInst.h"
38 #include "llvm/MC/MCRegisterInfo.h"
39 #include "llvm/MC/MCStreamer.h"
41 #include "llvm/MC/MCSymbol.h"
44 #include "llvm/MC/MCValue.h"
45 #include "llvm/Support/Casting.h"
46 #include "llvm/Support/Compiler.h"
49 #include "llvm/Support/SMLoc.h"
53 #include <cassert>
54 #include <cctype>
55 #include <cstdint>
56 #include <cstdio>
57 #include <string>
58 #include <tuple>
59 #include <utility>
60 #include <vector>
61 
62 using namespace llvm;
63 
64 namespace {
65 
66 enum class RegKind {
67  Scalar,
68  NeonVector,
69  SVEDataVector,
70  SVEPredicateVector,
71  Matrix
72 };
73 
74 enum class MatrixKind { Array, Tile, Row, Col };
75 
76 enum RegConstraintEqualityTy {
77  EqualsReg,
78  EqualsSuperReg,
79  EqualsSubReg
80 };
81 
82 class AArch64AsmParser : public MCTargetAsmParser {
83 private:
84  StringRef Mnemonic; ///< Instruction mnemonic.
85 
86  // Map of register aliases registers via the .req directive.
88 
89  class PrefixInfo {
90  public:
91  static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
92  PrefixInfo Prefix;
93  switch (Inst.getOpcode()) {
94  case AArch64::MOVPRFX_ZZ:
95  Prefix.Active = true;
96  Prefix.Dst = Inst.getOperand(0).getReg();
97  break;
98  case AArch64::MOVPRFX_ZPmZ_B:
99  case AArch64::MOVPRFX_ZPmZ_H:
100  case AArch64::MOVPRFX_ZPmZ_S:
101  case AArch64::MOVPRFX_ZPmZ_D:
102  Prefix.Active = true;
103  Prefix.Predicated = true;
104  Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
105  assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
106  "No destructive element size set for movprfx");
107  Prefix.Dst = Inst.getOperand(0).getReg();
108  Prefix.Pg = Inst.getOperand(2).getReg();
109  break;
110  case AArch64::MOVPRFX_ZPzZ_B:
111  case AArch64::MOVPRFX_ZPzZ_H:
112  case AArch64::MOVPRFX_ZPzZ_S:
113  case AArch64::MOVPRFX_ZPzZ_D:
114  Prefix.Active = true;
115  Prefix.Predicated = true;
116  Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
117  assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
118  "No destructive element size set for movprfx");
119  Prefix.Dst = Inst.getOperand(0).getReg();
120  Prefix.Pg = Inst.getOperand(1).getReg();
121  break;
122  default:
123  break;
124  }
125 
126  return Prefix;
127  }
128 
129  PrefixInfo() : Active(false), Predicated(false) {}
130  bool isActive() const { return Active; }
131  bool isPredicated() const { return Predicated; }
132  unsigned getElementSize() const {
133  assert(Predicated);
134  return ElementSize;
135  }
136  unsigned getDstReg() const { return Dst; }
137  unsigned getPgReg() const {
138  assert(Predicated);
139  return Pg;
140  }
141 
142  private:
143  bool Active;
144  bool Predicated;
145  unsigned ElementSize;
146  unsigned Dst;
147  unsigned Pg;
148  } NextPrefix;
149 
150  AArch64TargetStreamer &getTargetStreamer() {
151  MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
152  return static_cast<AArch64TargetStreamer &>(TS);
153  }
154 
155  SMLoc getLoc() const { return getParser().getTok().getLoc(); }
156 
157  bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
158  void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
159  AArch64CC::CondCode parseCondCodeString(StringRef Cond);
160  bool parseCondCode(OperandVector &Operands, bool invertCondCode);
161  unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
162  bool parseRegister(OperandVector &Operands);
163  bool parseSymbolicImmVal(const MCExpr *&ImmVal);
164  bool parseNeonVectorList(OperandVector &Operands);
165  bool parseOptionalMulOperand(OperandVector &Operands);
166  bool parseKeywordOperand(OperandVector &Operands);
167  bool parseOperand(OperandVector &Operands, bool isCondCode,
168  bool invertCondCode);
169  bool parseImmExpr(int64_t &Out);
170  bool parseComma();
171  bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
172  unsigned Last);
173 
174  bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
176 
177  bool parseDirectiveArch(SMLoc L);
178  bool parseDirectiveArchExtension(SMLoc L);
179  bool parseDirectiveCPU(SMLoc L);
180  bool parseDirectiveInst(SMLoc L);
181 
182  bool parseDirectiveTLSDescCall(SMLoc L);
183 
184  bool parseDirectiveLOH(StringRef LOH, SMLoc L);
185  bool parseDirectiveLtorg(SMLoc L);
186 
187  bool parseDirectiveReq(StringRef Name, SMLoc L);
188  bool parseDirectiveUnreq(SMLoc L);
189  bool parseDirectiveCFINegateRAState();
190  bool parseDirectiveCFIBKeyFrame();
191 
192  bool parseDirectiveVariantPCS(SMLoc L);
193 
194  bool parseDirectiveSEHAllocStack(SMLoc L);
195  bool parseDirectiveSEHPrologEnd(SMLoc L);
196  bool parseDirectiveSEHSaveR19R20X(SMLoc L);
197  bool parseDirectiveSEHSaveFPLR(SMLoc L);
198  bool parseDirectiveSEHSaveFPLRX(SMLoc L);
199  bool parseDirectiveSEHSaveReg(SMLoc L);
200  bool parseDirectiveSEHSaveRegX(SMLoc L);
201  bool parseDirectiveSEHSaveRegP(SMLoc L);
202  bool parseDirectiveSEHSaveRegPX(SMLoc L);
203  bool parseDirectiveSEHSaveLRPair(SMLoc L);
204  bool parseDirectiveSEHSaveFReg(SMLoc L);
205  bool parseDirectiveSEHSaveFRegX(SMLoc L);
206  bool parseDirectiveSEHSaveFRegP(SMLoc L);
207  bool parseDirectiveSEHSaveFRegPX(SMLoc L);
208  bool parseDirectiveSEHSetFP(SMLoc L);
209  bool parseDirectiveSEHAddFP(SMLoc L);
210  bool parseDirectiveSEHNop(SMLoc L);
211  bool parseDirectiveSEHSaveNext(SMLoc L);
212  bool parseDirectiveSEHEpilogStart(SMLoc L);
213  bool parseDirectiveSEHEpilogEnd(SMLoc L);
214  bool parseDirectiveSEHTrapFrame(SMLoc L);
215  bool parseDirectiveSEHMachineFrame(SMLoc L);
216  bool parseDirectiveSEHContext(SMLoc L);
217  bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
218 
219  bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
221  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
224  bool MatchingInlineAsm) override;
225 /// @name Auto-generated Match Functions
226 /// {
227 
228 #define GET_ASSEMBLER_HEADER
229 #include "AArch64GenAsmMatcher.inc"
230 
231  /// }
232 
233  OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
234  OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
235  RegKind MatchKind);
236  OperandMatchResultTy tryParseMatrixRegister(OperandVector &Operands);
238  OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
239  OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
240  OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands);
241  OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
243  OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
244  template <bool IsSVEPrefetch = false>
245  OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
246  OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
247  OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
248  OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
249  OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
250  template<bool AddFPZeroAsLiteral>
252  OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
253  OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
254  bool tryParseNeonVectorRegister(OperandVector &Operands);
255  OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
256  OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
257  template <bool ParseShiftExtend,
258  RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
259  OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
260  template <bool ParseShiftExtend, bool ParseSuffix>
261  OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
262  OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
263  template <RegKind VectorKind>
264  OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
265  bool ExpectMatch = false);
266  OperandMatchResultTy tryParseMatrixTileList(OperandVector &Operands);
267  OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
268  OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands);
269 
270 public:
271  enum AArch64MatchResultTy {
272  Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
273 #define GET_OPERAND_DIAGNOSTIC_TYPES
274 #include "AArch64GenAsmMatcher.inc"
275  };
276  bool IsILP32;
277 
278  AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
279  const MCInstrInfo &MII, const MCTargetOptions &Options)
280  : MCTargetAsmParser(Options, STI, MII) {
281  IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
283  MCStreamer &S = getParser().getStreamer();
284  if (S.getTargetStreamer() == nullptr)
286 
287  // Alias .hword/.word/.[dx]word to the target-independent
288  // .2byte/.4byte/.8byte directives as they have the same form and
289  // semantics:
290  /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
291  Parser.addAliasForDirective(".hword", ".2byte");
292  Parser.addAliasForDirective(".word", ".4byte");
293  Parser.addAliasForDirective(".dword", ".8byte");
294  Parser.addAliasForDirective(".xword", ".8byte");
295 
296  // Initialize the set of available features.
297  setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
298  }
299 
300  bool regsEqual(const MCParsedAsmOperand &Op1,
301  const MCParsedAsmOperand &Op2) const override;
302  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
303  SMLoc NameLoc, OperandVector &Operands) override;
304  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
305  OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
306  SMLoc &EndLoc) override;
307  bool ParseDirective(AsmToken DirectiveID) override;
308  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
309  unsigned Kind) override;
310 
311  static bool classifySymbolRef(const MCExpr *Expr,
312  AArch64MCExpr::VariantKind &ELFRefKind,
313  MCSymbolRefExpr::VariantKind &DarwinRefKind,
314  int64_t &Addend);
315 };
316 
317 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
318 /// instruction.
319 class AArch64Operand : public MCParsedAsmOperand {
320 private:
321  enum KindTy {
322  k_Immediate,
323  k_ShiftedImm,
324  k_CondCode,
325  k_Register,
326  k_MatrixRegister,
327  k_MatrixTileList,
328  k_SVCR,
329  k_VectorList,
330  k_VectorIndex,
331  k_Token,
332  k_SysReg,
333  k_SysCR,
334  k_Prefetch,
335  k_ShiftExtend,
336  k_FPImm,
337  k_Barrier,
338  k_PSBHint,
339  k_BTIHint,
340  } Kind;
341 
342  SMLoc StartLoc, EndLoc;
343 
344  struct TokOp {
345  const char *Data;
346  unsigned Length;
347  bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
348  };
349 
350  // Separate shift/extend operand.
351  struct ShiftExtendOp {
353  unsigned Amount;
354  bool HasExplicitAmount;
355  };
356 
357  struct RegOp {
358  unsigned RegNum;
359  RegKind Kind;
360  int ElementWidth;
361 
362  // The register may be allowed as a different register class,
363  // e.g. for GPR64as32 or GPR32as64.
364  RegConstraintEqualityTy EqualityTy;
365 
366  // In some cases the shift/extend needs to be explicitly parsed together
367  // with the register, rather than as a separate operand. This is needed
368  // for addressing modes where the instruction as a whole dictates the
369  // scaling/extend, rather than specific bits in the instruction.
370  // By parsing them as a single operand, we avoid the need to pass an
371  // extra operand in all CodeGen patterns (because all operands need to
372  // have an associated value), and we avoid the need to update TableGen to
373  // accept operands that have no associated bits in the instruction.
374  //
375  // An added benefit of parsing them together is that the assembler
376  // can give a sensible diagnostic if the scaling is not correct.
377  //
378  // The default is 'lsl #0' (HasExplicitAmount = false) if no
379  // ShiftExtend is specified.
380  ShiftExtendOp ShiftExtend;
381  };
382 
383  struct MatrixRegOp {
384  unsigned RegNum;
385  unsigned ElementWidth;
386  MatrixKind Kind;
387  };
388 
389  struct MatrixTileListOp {
390  unsigned RegMask = 0;
391  };
392 
393  struct VectorListOp {
394  unsigned RegNum;
395  unsigned Count;
396  unsigned NumElements;
397  unsigned ElementWidth;
398  RegKind RegisterKind;
399  };
400 
401  struct VectorIndexOp {
402  int Val;
403  };
404 
405  struct ImmOp {
406  const MCExpr *Val;
407  };
408 
409  struct ShiftedImmOp {
410  const MCExpr *Val;
411  unsigned ShiftAmount;
412  };
413 
414  struct CondCodeOp {
416  };
417 
418  struct FPImmOp {
419  uint64_t Val; // APFloat value bitcasted to uint64_t.
420  bool IsExact; // describes whether parsed value was exact.
421  };
422 
423  struct BarrierOp {
424  const char *Data;
425  unsigned Length;
426  unsigned Val; // Not the enum since not all values have names.
427  bool HasnXSModifier;
428  };
429 
430  struct SysRegOp {
431  const char *Data;
432  unsigned Length;
433  uint32_t MRSReg;
434  uint32_t MSRReg;
435  uint32_t PStateField;
436  };
437 
438  struct SysCRImmOp {
439  unsigned Val;
440  };
441 
442  struct PrefetchOp {
443  const char *Data;
444  unsigned Length;
445  unsigned Val;
446  };
447 
448  struct PSBHintOp {
449  const char *Data;
450  unsigned Length;
451  unsigned Val;
452  };
453 
454  struct BTIHintOp {
455  const char *Data;
456  unsigned Length;
457  unsigned Val;
458  };
459 
460  struct SVCROp {
461  const char *Data;
462  unsigned Length;
463  unsigned PStateField;
464  };
465 
466  union {
467  struct TokOp Tok;
468  struct RegOp Reg;
469  struct MatrixRegOp MatrixReg;
470  struct MatrixTileListOp MatrixTileList;
471  struct VectorListOp VectorList;
472  struct VectorIndexOp VectorIndex;
473  struct ImmOp Imm;
474  struct ShiftedImmOp ShiftedImm;
475  struct CondCodeOp CondCode;
476  struct FPImmOp FPImm;
477  struct BarrierOp Barrier;
478  struct SysRegOp SysReg;
479  struct SysCRImmOp SysCRImm;
480  struct PrefetchOp Prefetch;
481  struct PSBHintOp PSBHint;
482  struct BTIHintOp BTIHint;
483  struct ShiftExtendOp ShiftExtend;
484  struct SVCROp SVCR;
485  };
486 
487  // Keep the MCContext around as the MCExprs may need manipulated during
488  // the add<>Operands() calls.
489  MCContext &Ctx;
490 
491 public:
492  AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
493 
494  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
495  Kind = o.Kind;
496  StartLoc = o.StartLoc;
497  EndLoc = o.EndLoc;
498  switch (Kind) {
499  case k_Token:
500  Tok = o.Tok;
501  break;
502  case k_Immediate:
503  Imm = o.Imm;
504  break;
505  case k_ShiftedImm:
506  ShiftedImm = o.ShiftedImm;
507  break;
508  case k_CondCode:
509  CondCode = o.CondCode;
510  break;
511  case k_FPImm:
512  FPImm = o.FPImm;
513  break;
514  case k_Barrier:
515  Barrier = o.Barrier;
516  break;
517  case k_Register:
518  Reg = o.Reg;
519  break;
520  case k_MatrixRegister:
521  MatrixReg = o.MatrixReg;
522  break;
523  case k_MatrixTileList:
524  MatrixTileList = o.MatrixTileList;
525  break;
526  case k_VectorList:
527  VectorList = o.VectorList;
528  break;
529  case k_VectorIndex:
530  VectorIndex = o.VectorIndex;
531  break;
532  case k_SysReg:
533  SysReg = o.SysReg;
534  break;
535  case k_SysCR:
536  SysCRImm = o.SysCRImm;
537  break;
538  case k_Prefetch:
539  Prefetch = o.Prefetch;
540  break;
541  case k_PSBHint:
542  PSBHint = o.PSBHint;
543  break;
544  case k_BTIHint:
545  BTIHint = o.BTIHint;
546  break;
547  case k_ShiftExtend:
548  ShiftExtend = o.ShiftExtend;
549  break;
550  case k_SVCR:
551  SVCR = o.SVCR;
552  break;
553  }
554  }
555 
556  /// getStartLoc - Get the location of the first token of this operand.
557  SMLoc getStartLoc() const override { return StartLoc; }
558  /// getEndLoc - Get the location of the last token of this operand.
559  SMLoc getEndLoc() const override { return EndLoc; }
560 
561  StringRef getToken() const {
562  assert(Kind == k_Token && "Invalid access!");
563  return StringRef(Tok.Data, Tok.Length);
564  }
565 
566  bool isTokenSuffix() const {
567  assert(Kind == k_Token && "Invalid access!");
568  return Tok.IsSuffix;
569  }
570 
571  const MCExpr *getImm() const {
572  assert(Kind == k_Immediate && "Invalid access!");
573  return Imm.Val;
574  }
575 
576  const MCExpr *getShiftedImmVal() const {
577  assert(Kind == k_ShiftedImm && "Invalid access!");
578  return ShiftedImm.Val;
579  }
580 
581  unsigned getShiftedImmShift() const {
582  assert(Kind == k_ShiftedImm && "Invalid access!");
583  return ShiftedImm.ShiftAmount;
584  }
585 
587  assert(Kind == k_CondCode && "Invalid access!");
588  return CondCode.Code;
589  }
590 
591  APFloat getFPImm() const {
592  assert (Kind == k_FPImm && "Invalid access!");
593  return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
594  }
595 
596  bool getFPImmIsExact() const {
597  assert (Kind == k_FPImm && "Invalid access!");
598  return FPImm.IsExact;
599  }
600 
601  unsigned getBarrier() const {
602  assert(Kind == k_Barrier && "Invalid access!");
603  return Barrier.Val;
604  }
605 
606  StringRef getBarrierName() const {
607  assert(Kind == k_Barrier && "Invalid access!");
608  return StringRef(Barrier.Data, Barrier.Length);
609  }
610 
611  bool getBarriernXSModifier() const {
612  assert(Kind == k_Barrier && "Invalid access!");
613  return Barrier.HasnXSModifier;
614  }
615 
616  unsigned getReg() const override {
617  assert(Kind == k_Register && "Invalid access!");
618  return Reg.RegNum;
619  }
620 
621  unsigned getMatrixReg() const {
622  assert(Kind == k_MatrixRegister && "Invalid access!");
623  return MatrixReg.RegNum;
624  }
625 
626  unsigned getMatrixElementWidth() const {
627  assert(Kind == k_MatrixRegister && "Invalid access!");
628  return MatrixReg.ElementWidth;
629  }
630 
631  MatrixKind getMatrixKind() const {
632  assert(Kind == k_MatrixRegister && "Invalid access!");
633  return MatrixReg.Kind;
634  }
635 
636  unsigned getMatrixTileListRegMask() const {
637  assert(isMatrixTileList() && "Invalid access!");
638  return MatrixTileList.RegMask;
639  }
640 
641  RegConstraintEqualityTy getRegEqualityTy() const {
642  assert(Kind == k_Register && "Invalid access!");
643  return Reg.EqualityTy;
644  }
645 
646  unsigned getVectorListStart() const {
647  assert(Kind == k_VectorList && "Invalid access!");
648  return VectorList.RegNum;
649  }
650 
651  unsigned getVectorListCount() const {
652  assert(Kind == k_VectorList && "Invalid access!");
653  return VectorList.Count;
654  }
655 
656  int getVectorIndex() const {
657  assert(Kind == k_VectorIndex && "Invalid access!");
658  return VectorIndex.Val;
659  }
660 
661  StringRef getSysReg() const {
662  assert(Kind == k_SysReg && "Invalid access!");
663  return StringRef(SysReg.Data, SysReg.Length);
664  }
665 
666  unsigned getSysCR() const {
667  assert(Kind == k_SysCR && "Invalid access!");
668  return SysCRImm.Val;
669  }
670 
671  unsigned getPrefetch() const {
672  assert(Kind == k_Prefetch && "Invalid access!");
673  return Prefetch.Val;
674  }
675 
676  unsigned getPSBHint() const {
677  assert(Kind == k_PSBHint && "Invalid access!");
678  return PSBHint.Val;
679  }
680 
681  StringRef getPSBHintName() const {
682  assert(Kind == k_PSBHint && "Invalid access!");
683  return StringRef(PSBHint.Data, PSBHint.Length);
684  }
685 
686  unsigned getBTIHint() const {
687  assert(Kind == k_BTIHint && "Invalid access!");
688  return BTIHint.Val;
689  }
690 
691  StringRef getBTIHintName() const {
692  assert(Kind == k_BTIHint && "Invalid access!");
693  return StringRef(BTIHint.Data, BTIHint.Length);
694  }
695 
696  StringRef getSVCR() const {
697  assert(Kind == k_SVCR && "Invalid access!");
698  return StringRef(SVCR.Data, SVCR.Length);
699  }
700 
701  StringRef getPrefetchName() const {
702  assert(Kind == k_Prefetch && "Invalid access!");
703  return StringRef(Prefetch.Data, Prefetch.Length);
704  }
705 
706  AArch64_AM::ShiftExtendType getShiftExtendType() const {
707  if (Kind == k_ShiftExtend)
708  return ShiftExtend.Type;
709  if (Kind == k_Register)
710  return Reg.ShiftExtend.Type;
711  llvm_unreachable("Invalid access!");
712  }
713 
714  unsigned getShiftExtendAmount() const {
715  if (Kind == k_ShiftExtend)
716  return ShiftExtend.Amount;
717  if (Kind == k_Register)
718  return Reg.ShiftExtend.Amount;
719  llvm_unreachable("Invalid access!");
720  }
721 
722  bool hasShiftExtendAmount() const {
723  if (Kind == k_ShiftExtend)
724  return ShiftExtend.HasExplicitAmount;
725  if (Kind == k_Register)
726  return Reg.ShiftExtend.HasExplicitAmount;
727  llvm_unreachable("Invalid access!");
728  }
729 
730  bool isImm() const override { return Kind == k_Immediate; }
731  bool isMem() const override { return false; }
732 
733  bool isUImm6() const {
734  if (!isImm())
735  return false;
736  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
737  if (!MCE)
738  return false;
739  int64_t Val = MCE->getValue();
740  return (Val >= 0 && Val < 64);
741  }
742 
743  template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
744 
745  template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
746  return isImmScaled<Bits, Scale>(true);
747  }
748 
749  template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
750  return isImmScaled<Bits, Scale>(false);
751  }
752 
753  template <int Bits, int Scale>
754  DiagnosticPredicate isImmScaled(bool Signed) const {
755  if (!isImm())
757 
758  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
759  if (!MCE)
761 
762  int64_t MinVal, MaxVal;
763  if (Signed) {
764  int64_t Shift = Bits - 1;
765  MinVal = (int64_t(1) << Shift) * -Scale;
766  MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
767  } else {
768  MinVal = 0;
769  MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
770  }
771 
772  int64_t Val = MCE->getValue();
773  if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
775 
777  }
778 
779  DiagnosticPredicate isSVEPattern() const {
780  if (!isImm())
782  auto *MCE = dyn_cast<MCConstantExpr>(getImm());
783  if (!MCE)
785  int64_t Val = MCE->getValue();
786  if (Val >= 0 && Val < 32)
789  }
790 
791  bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
792  AArch64MCExpr::VariantKind ELFRefKind;
793  MCSymbolRefExpr::VariantKind DarwinRefKind;
794  int64_t Addend;
795  if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
796  Addend)) {
797  // If we don't understand the expression, assume the best and
798  // let the fixup and relocation code deal with it.
799  return true;
800  }
801 
802  if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
803  ELFRefKind == AArch64MCExpr::VK_LO12 ||
804  ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
805  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
806  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
807  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
808  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
809  ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
810  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
811  ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
812  ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
813  ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
814  // Note that we don't range-check the addend. It's adjusted modulo page
815  // size when converted, so there is no "out of range" condition when using
816  // @pageoff.
817  return true;
818  } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
819  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
820  // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
821  return Addend == 0;
822  }
823 
824  return false;
825  }
826 
827  template <int Scale> bool isUImm12Offset() const {
828  if (!isImm())
829  return false;
830 
831  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
832  if (!MCE)
833  return isSymbolicUImm12Offset(getImm());
834 
835  int64_t Val = MCE->getValue();
836  return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
837  }
838 
839  template <int N, int M>
840  bool isImmInRange() const {
841  if (!isImm())
842  return false;
843  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
844  if (!MCE)
845  return false;
846  int64_t Val = MCE->getValue();
847  return (Val >= N && Val <= M);
848  }
849 
850  // NOTE: Also used for isLogicalImmNot as anything that can be represented as
851  // a logical immediate can always be represented when inverted.
852  template <typename T>
853  bool isLogicalImm() const {
854  if (!isImm())
855  return false;
856  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
857  if (!MCE)
858  return false;
859 
860  int64_t Val = MCE->getValue();
861  // Avoid left shift by 64 directly.
862  uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
863  // Allow all-0 or all-1 in top bits to permit bitwise NOT.
864  if ((Val & Upper) && (Val & Upper) != Upper)
865  return false;
866 
867  return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
868  }
869 
870  bool isShiftedImm() const { return Kind == k_ShiftedImm; }
871 
872  /// Returns the immediate value as a pair of (imm, shift) if the immediate is
873  /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
874  /// immediate that can be shifted by 'Shift'.
875  template <unsigned Width>
876  Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
877  if (isShiftedImm() && Width == getShiftedImmShift())
878  if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
879  return std::make_pair(CE->getValue(), Width);
880 
881  if (isImm())
882  if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
883  int64_t Val = CE->getValue();
884  if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
885  return std::make_pair(Val >> Width, Width);
886  else
887  return std::make_pair(Val, 0u);
888  }
889 
890  return {};
891  }
892 
893  bool isAddSubImm() const {
894  if (!isShiftedImm() && !isImm())
895  return false;
896 
897  const MCExpr *Expr;
898 
899  // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
900  if (isShiftedImm()) {
901  unsigned Shift = ShiftedImm.ShiftAmount;
902  Expr = ShiftedImm.Val;
903  if (Shift != 0 && Shift != 12)
904  return false;
905  } else {
906  Expr = getImm();
907  }
908 
909  AArch64MCExpr::VariantKind ELFRefKind;
910  MCSymbolRefExpr::VariantKind DarwinRefKind;
911  int64_t Addend;
912  if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
913  DarwinRefKind, Addend)) {
914  return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
915  || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
916  || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
917  || ELFRefKind == AArch64MCExpr::VK_LO12
918  || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
919  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
920  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
921  || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
922  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
923  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
924  || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
925  || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
926  || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
927  }
928 
929  // If it's a constant, it should be a real immediate in range.
930  if (auto ShiftedVal = getShiftedVal<12>())
931  return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
932 
933  // If it's an expression, we hope for the best and let the fixup/relocation
934  // code deal with it.
935  return true;
936  }
937 
938  bool isAddSubImmNeg() const {
939  if (!isShiftedImm() && !isImm())
940  return false;
941 
942  // Otherwise it should be a real negative immediate in range.
943  if (auto ShiftedVal = getShiftedVal<12>())
944  return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
945 
946  return false;
947  }
948 
949  // Signed value in the range -128 to +127. For element widths of
950  // 16 bits or higher it may also be a signed multiple of 256 in the
951  // range -32768 to +32512.
952  // For element-width of 8 bits a range of -128 to 255 is accepted,
953  // since a copy of a byte can be either signed/unsigned.
954  template <typename T>
956  if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
958 
959  bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
960  std::is_same<int8_t, T>::value;
961  if (auto ShiftedImm = getShiftedVal<8>())
962  if (!(IsByte && ShiftedImm->second) &&
963  AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
964  << ShiftedImm->second))
966 
968  }
969 
970  // Unsigned value in the range 0 to 255. For element widths of
971  // 16 bits or higher it may also be a signed multiple of 256 in the
972  // range 0 to 65280.
973  template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
974  if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
976 
977  bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
978  std::is_same<int8_t, T>::value;
979  if (auto ShiftedImm = getShiftedVal<8>())
980  if (!(IsByte && ShiftedImm->second) &&
981  AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
982  << ShiftedImm->second))
984 
986  }
987 
988  template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
989  if (isLogicalImm<T>() && !isSVECpyImm<T>())
992  }
993 
994  bool isCondCode() const { return Kind == k_CondCode; }
995 
996  bool isSIMDImmType10() const {
997  if (!isImm())
998  return false;
999  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1000  if (!MCE)
1001  return false;
1003  }
1004 
1005  template<int N>
1006  bool isBranchTarget() const {
1007  if (!isImm())
1008  return false;
1009  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1010  if (!MCE)
1011  return true;
1012  int64_t Val = MCE->getValue();
1013  if (Val & 0x3)
1014  return false;
1015  assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1016  return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1017  }
1018 
1019  bool
1020  isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1021  if (!isImm())
1022  return false;
1023 
1024  AArch64MCExpr::VariantKind ELFRefKind;
1025  MCSymbolRefExpr::VariantKind DarwinRefKind;
1026  int64_t Addend;
1027  if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1028  DarwinRefKind, Addend)) {
1029  return false;
1030  }
1031  if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1032  return false;
1033 
1034  for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
1035  if (ELFRefKind == AllowedModifiers[i])
1036  return true;
1037  }
1038 
1039  return false;
1040  }
1041 
1042  bool isMovWSymbolG3() const {
1043  return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
1044  }
1045 
1046  bool isMovWSymbolG2() const {
1047  return isMovWSymbol(
1052  }
1053 
1054  bool isMovWSymbolG1() const {
1055  return isMovWSymbol(
1061  }
1062 
1063  bool isMovWSymbolG0() const {
1064  return isMovWSymbol(
1070  }
1071 
1072  template<int RegWidth, int Shift>
1073  bool isMOVZMovAlias() const {
1074  if (!isImm()) return false;
1075 
1076  const MCExpr *E = getImm();
1077  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1078  uint64_t Value = CE->getValue();
1079 
1080  return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1081  }
1082  // Only supports the case of Shift being 0 if an expression is used as an
1083  // operand
1084  return !Shift && E;
1085  }
1086 
1087  template<int RegWidth, int Shift>
1088  bool isMOVNMovAlias() const {
1089  if (!isImm()) return false;
1090 
1091  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1092  if (!CE) return false;
1093  uint64_t Value = CE->getValue();
1094 
1095  return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1096  }
1097 
1098  bool isFPImm() const {
1099  return Kind == k_FPImm &&
1100  AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1101  }
1102 
1103  bool isBarrier() const {
1104  return Kind == k_Barrier && !getBarriernXSModifier();
1105  }
1106  bool isBarriernXS() const {
1107  return Kind == k_Barrier && getBarriernXSModifier();
1108  }
1109  bool isSysReg() const { return Kind == k_SysReg; }
1110 
1111  bool isMRSSystemRegister() const {
1112  if (!isSysReg()) return false;
1113 
1114  return SysReg.MRSReg != -1U;
1115  }
1116 
1117  bool isMSRSystemRegister() const {
1118  if (!isSysReg()) return false;
1119  return SysReg.MSRReg != -1U;
1120  }
1121 
1122  bool isSystemPStateFieldWithImm0_1() const {
1123  if (!isSysReg()) return false;
1124  return (SysReg.PStateField == AArch64PState::PAN ||
1125  SysReg.PStateField == AArch64PState::DIT ||
1126  SysReg.PStateField == AArch64PState::UAO ||
1127  SysReg.PStateField == AArch64PState::SSBS);
1128  }
1129 
1130  bool isSystemPStateFieldWithImm0_15() const {
1131  if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1132  return SysReg.PStateField != -1U;
1133  }
1134 
1135  bool isSVCR() const {
1136  if (Kind != k_SVCR)
1137  return false;
1138  return SVCR.PStateField != -1U;
1139  }
1140 
1141  bool isReg() const override {
1142  return Kind == k_Register;
1143  }
1144 
1145  bool isScalarReg() const {
1146  return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1147  }
1148 
1149  bool isNeonVectorReg() const {
1150  return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1151  }
1152 
1153  bool isNeonVectorRegLo() const {
1154  return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1155  (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1156  Reg.RegNum) ||
1157  AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1158  Reg.RegNum));
1159  }
1160 
1161  bool isMatrix() const { return Kind == k_MatrixRegister; }
1162  bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1163 
1164  template <unsigned Class> bool isSVEVectorReg() const {
1165  RegKind RK;
1166  switch (Class) {
1167  case AArch64::ZPRRegClassID:
1168  case AArch64::ZPR_3bRegClassID:
1169  case AArch64::ZPR_4bRegClassID:
1170  RK = RegKind::SVEDataVector;
1171  break;
1172  case AArch64::PPRRegClassID:
1173  case AArch64::PPR_3bRegClassID:
1174  RK = RegKind::SVEPredicateVector;
1175  break;
1176  default:
1177  llvm_unreachable("Unsupport register class");
1178  }
1179 
1180  return (Kind == k_Register && Reg.Kind == RK) &&
1181  AArch64MCRegisterClasses[Class].contains(getReg());
1182  }
1183 
1184  template <unsigned Class> bool isFPRasZPR() const {
1185  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1186  AArch64MCRegisterClasses[Class].contains(getReg());
1187  }
1188 
1189  template <int ElementWidth, unsigned Class>
1190  DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1191  if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1193 
1194  if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1196 
1198  }
1199 
1200  template <int ElementWidth, unsigned Class>
1201  DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1202  if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1204 
1205  if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1207 
1209  }
1210 
1211  template <int ElementWidth, unsigned Class,
1212  AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1213  bool ShiftWidthAlwaysSame>
1214  DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1215  auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1216  if (!VectorMatch.isMatch())
1218 
1219  // Give a more specific diagnostic when the user has explicitly typed in
1220  // a shift-amount that does not match what is expected, but for which
1221  // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1222  bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1223  if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1224  ShiftExtendTy == AArch64_AM::SXTW) &&
1225  !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1227 
1228  if (MatchShift && ShiftExtendTy == getShiftExtendType())
1230 
1232  }
1233 
1234  bool isGPR32as64() const {
1235  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1236  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1237  }
1238 
1239  bool isGPR64as32() const {
1240  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1241  AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1242  }
1243 
1244  bool isGPR64x8() const {
1245  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1246  AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1247  Reg.RegNum);
1248  }
1249 
1250  bool isWSeqPair() const {
1251  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1252  AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1253  Reg.RegNum);
1254  }
1255 
1256  bool isXSeqPair() const {
1257  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1258  AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1259  Reg.RegNum);
1260  }
1261 
1262  template<int64_t Angle, int64_t Remainder>
1263  DiagnosticPredicate isComplexRotation() const {
1264  if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1265 
1266  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1267  if (!CE) return DiagnosticPredicateTy::NoMatch;
1268  uint64_t Value = CE->getValue();
1269 
1270  if (Value % Angle == Remainder && Value <= 270)
1273  }
1274 
1275  template <unsigned RegClassID> bool isGPR64() const {
1276  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1277  AArch64MCRegisterClasses[RegClassID].contains(getReg());
1278  }
1279 
1280  template <unsigned RegClassID, int ExtWidth>
1281  DiagnosticPredicate isGPR64WithShiftExtend() const {
1282  if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1284 
1285  if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1286  getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1289  }
1290 
1291  /// Is this a vector list with the type implicit (presumably attached to the
1292  /// instruction itself)?
1293  template <RegKind VectorKind, unsigned NumRegs>
1294  bool isImplicitlyTypedVectorList() const {
1295  return Kind == k_VectorList && VectorList.Count == NumRegs &&
1296  VectorList.NumElements == 0 &&
1297  VectorList.RegisterKind == VectorKind;
1298  }
1299 
1300  template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1301  unsigned ElementWidth>
1302  bool isTypedVectorList() const {
1303  if (Kind != k_VectorList)
1304  return false;
1305  if (VectorList.Count != NumRegs)
1306  return false;
1307  if (VectorList.RegisterKind != VectorKind)
1308  return false;
1309  if (VectorList.ElementWidth != ElementWidth)
1310  return false;
1311  return VectorList.NumElements == NumElements;
1312  }
1313 
1314  template <int Min, int Max>
1315  DiagnosticPredicate isVectorIndex() const {
1316  if (Kind != k_VectorIndex)
1318  if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1321  }
1322 
1323  bool isToken() const override { return Kind == k_Token; }
1324 
1325  bool isTokenEqual(StringRef Str) const {
1326  return Kind == k_Token && getToken() == Str;
1327  }
1328  bool isSysCR() const { return Kind == k_SysCR; }
1329  bool isPrefetch() const { return Kind == k_Prefetch; }
1330  bool isPSBHint() const { return Kind == k_PSBHint; }
1331  bool isBTIHint() const { return Kind == k_BTIHint; }
1332  bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1333  bool isShifter() const {
1334  if (!isShiftExtend())
1335  return false;
1336 
1337  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1338  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1339  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1340  ST == AArch64_AM::MSL);
1341  }
1342 
1343  template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1344  if (Kind != k_FPImm)
1346 
1347  if (getFPImmIsExact()) {
1348  // Lookup the immediate from table of supported immediates.
1349  auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1350  assert(Desc && "Unknown enum value");
1351 
1352  // Calculate its FP value.
1353  APFloat RealVal(APFloat::IEEEdouble());
1354  auto StatusOrErr =
1355  RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1356  if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1357  llvm_unreachable("FP immediate is not exact");
1358 
1359  if (getFPImm().bitwiseIsEqual(RealVal))
1361  }
1362 
1364  }
1365 
1366  template <unsigned ImmA, unsigned ImmB>
1367  DiagnosticPredicate isExactFPImm() const {
1369  if ((Res = isExactFPImm<ImmA>()))
1371  if ((Res = isExactFPImm<ImmB>()))
1373  return Res;
1374  }
1375 
1376  bool isExtend() const {
1377  if (!isShiftExtend())
1378  return false;
1379 
1380  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1381  return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1382  ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1383  ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1384  ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1385  ET == AArch64_AM::LSL) &&
1386  getShiftExtendAmount() <= 4;
1387  }
1388 
1389  bool isExtend64() const {
1390  if (!isExtend())
1391  return false;
1392  // Make sure the extend expects a 32-bit source register.
1393  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1394  return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1395  ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1396  ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1397  }
1398 
1399  bool isExtendLSL64() const {
1400  if (!isExtend())
1401  return false;
1402  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1403  return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1404  ET == AArch64_AM::LSL) &&
1405  getShiftExtendAmount() <= 4;
1406  }
1407 
1408  template<int Width> bool isMemXExtend() const {
1409  if (!isExtend())
1410  return false;
1411  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1412  return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1413  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1414  getShiftExtendAmount() == 0);
1415  }
1416 
1417  template<int Width> bool isMemWExtend() const {
1418  if (!isExtend())
1419  return false;
1420  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1421  return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1422  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1423  getShiftExtendAmount() == 0);
1424  }
1425 
1426  template <unsigned width>
1427  bool isArithmeticShifter() const {
1428  if (!isShifter())
1429  return false;
1430 
1431  // An arithmetic shifter is LSL, LSR, or ASR.
1432  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1433  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1434  ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1435  }
1436 
1437  template <unsigned width>
1438  bool isLogicalShifter() const {
1439  if (!isShifter())
1440  return false;
1441 
1442  // A logical shifter is LSL, LSR, ASR or ROR.
1443  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1444  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1445  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1446  getShiftExtendAmount() < width;
1447  }
1448 
1449  bool isMovImm32Shifter() const {
1450  if (!isShifter())
1451  return false;
1452 
1453  // A MOVi shifter is LSL of 0, 16, 32, or 48.
1454  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1455  if (ST != AArch64_AM::LSL)
1456  return false;
1457  uint64_t Val = getShiftExtendAmount();
1458  return (Val == 0 || Val == 16);
1459  }
1460 
1461  bool isMovImm64Shifter() const {
1462  if (!isShifter())
1463  return false;
1464 
1465  // A MOVi shifter is LSL of 0 or 16.
1466  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1467  if (ST != AArch64_AM::LSL)
1468  return false;
1469  uint64_t Val = getShiftExtendAmount();
1470  return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1471  }
1472 
1473  bool isLogicalVecShifter() const {
1474  if (!isShifter())
1475  return false;
1476 
1477  // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1478  unsigned Shift = getShiftExtendAmount();
1479  return getShiftExtendType() == AArch64_AM::LSL &&
1480  (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1481  }
1482 
1483  bool isLogicalVecHalfWordShifter() const {
1484  if (!isLogicalVecShifter())
1485  return false;
1486 
1487  // A logical vector shifter is a left shift by 0 or 8.
1488  unsigned Shift = getShiftExtendAmount();
1489  return getShiftExtendType() == AArch64_AM::LSL &&
1490  (Shift == 0 || Shift == 8);
1491  }
1492 
1493  bool isMoveVecShifter() const {
1494  if (!isShiftExtend())
1495  return false;
1496 
1497  // A logical vector shifter is a left shift by 8 or 16.
1498  unsigned Shift = getShiftExtendAmount();
1499  return getShiftExtendType() == AArch64_AM::MSL &&
1500  (Shift == 8 || Shift == 16);
1501  }
1502 
1503  // Fallback unscaled operands are for aliases of LDR/STR that fall back
1504  // to LDUR/STUR when the offset is not legal for the former but is for
1505  // the latter. As such, in addition to checking for being a legal unscaled
1506  // address, also check that it is not a legal scaled address. This avoids
1507  // ambiguity in the matcher.
1508  template<int Width>
1509  bool isSImm9OffsetFB() const {
1510  return isSImm<9>() && !isUImm12Offset<Width / 8>();
1511  }
1512 
1513  bool isAdrpLabel() const {
1514  // Validation was handled during parsing, so we just sanity check that
1515  // something didn't go haywire.
1516  if (!isImm())
1517  return false;
1518 
1519  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1520  int64_t Val = CE->getValue();
1521  int64_t Min = - (4096 * (1LL << (21 - 1)));
1522  int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1523  return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1524  }
1525 
1526  return true;
1527  }
1528 
1529  bool isAdrLabel() const {
1530  // Validation was handled during parsing, so we just sanity check that
1531  // something didn't go haywire.
1532  if (!isImm())
1533  return false;
1534 
1535  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1536  int64_t Val = CE->getValue();
1537  int64_t Min = - (1LL << (21 - 1));
1538  int64_t Max = ((1LL << (21 - 1)) - 1);
1539  return Val >= Min && Val <= Max;
1540  }
1541 
1542  return true;
1543  }
1544 
1545  template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1546  DiagnosticPredicate isMatrixRegOperand() const {
1547  if (!isMatrix())
1549  if (getMatrixKind() != Kind ||
1550  !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1551  EltSize != getMatrixElementWidth())
1554  }
1555 
1556  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1557  // Add as immediates when possible. Null MCExpr = 0.
1558  if (!Expr)
1560  else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1561  Inst.addOperand(MCOperand::createImm(CE->getValue()));
1562  else
1563  Inst.addOperand(MCOperand::createExpr(Expr));
1564  }
1565 
1566  void addRegOperands(MCInst &Inst, unsigned N) const {
1567  assert(N == 1 && "Invalid number of operands!");
1569  }
1570 
1571  void addMatrixOperands(MCInst &Inst, unsigned N) const {
1572  assert(N == 1 && "Invalid number of operands!");
1573  Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1574  }
1575 
1576  void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1577  assert(N == 1 && "Invalid number of operands!");
1578  assert(
1579  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1580 
1581  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1582  uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1583  RI->getEncodingValue(getReg()));
1584 
1586  }
1587 
1588  void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1589  assert(N == 1 && "Invalid number of operands!");
1590  assert(
1591  AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1592 
1593  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1594  uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1595  RI->getEncodingValue(getReg()));
1596 
1598  }
1599 
1600  template <int Width>
1601  void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1602  unsigned Base;
1603  switch (Width) {
1604  case 8: Base = AArch64::B0; break;
1605  case 16: Base = AArch64::H0; break;
1606  case 32: Base = AArch64::S0; break;
1607  case 64: Base = AArch64::D0; break;
1608  case 128: Base = AArch64::Q0; break;
1609  default:
1610  llvm_unreachable("Unsupported width");
1611  }
1612  Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1613  }
1614 
1615  void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1616  assert(N == 1 && "Invalid number of operands!");
1617  assert(
1618  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1619  Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1620  }
1621 
1622  void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1623  assert(N == 1 && "Invalid number of operands!");
1624  assert(
1625  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1627  }
1628 
1629  void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1630  assert(N == 1 && "Invalid number of operands!");
1632  }
1633 
1634  enum VecListIndexType {
1635  VecListIdx_DReg = 0,
1636  VecListIdx_QReg = 1,
1637  VecListIdx_ZReg = 2,
1638  };
1639 
1640  template <VecListIndexType RegTy, unsigned NumRegs>
1641  void addVectorListOperands(MCInst &Inst, unsigned N) const {
1642  assert(N == 1 && "Invalid number of operands!");
1643  static const unsigned FirstRegs[][5] = {
1644  /* DReg */ { AArch64::Q0,
1645  AArch64::D0, AArch64::D0_D1,
1646  AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1647  /* QReg */ { AArch64::Q0,
1648  AArch64::Q0, AArch64::Q0_Q1,
1649  AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1650  /* ZReg */ { AArch64::Z0,
1651  AArch64::Z0, AArch64::Z0_Z1,
1652  AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1653  };
1654 
1655  assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1656  " NumRegs must be <= 4 for ZRegs");
1657 
1658  unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1659  Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1660  FirstRegs[(unsigned)RegTy][0]));
1661  }
1662 
1663  void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1664  assert(N == 1 && "Invalid number of operands!");
1665  unsigned RegMask = getMatrixTileListRegMask();
1666  assert(RegMask <= 0xFF && "Invalid mask!");
1667  Inst.addOperand(MCOperand::createImm(RegMask));
1668  }
1669 
1670  void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1671  assert(N == 1 && "Invalid number of operands!");
1672  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1673  }
1674 
1675  template <unsigned ImmIs0, unsigned ImmIs1>
1676  void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1677  assert(N == 1 && "Invalid number of operands!");
1678  assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1679  Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1680  }
1681 
1682  void addImmOperands(MCInst &Inst, unsigned N) const {
1683  assert(N == 1 && "Invalid number of operands!");
1684  // If this is a pageoff symrefexpr with an addend, adjust the addend
1685  // to be only the page-offset portion. Otherwise, just add the expr
1686  // as-is.
1687  addExpr(Inst, getImm());
1688  }
1689 
1690  template <int Shift>
1691  void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1692  assert(N == 2 && "Invalid number of operands!");
1693  if (auto ShiftedVal = getShiftedVal<Shift>()) {
1694  Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1695  Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1696  } else if (isShiftedImm()) {
1697  addExpr(Inst, getShiftedImmVal());
1698  Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1699  } else {
1700  addExpr(Inst, getImm());
1702  }
1703  }
1704 
1705  template <int Shift>
1706  void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1707  assert(N == 2 && "Invalid number of operands!");
1708  if (auto ShiftedVal = getShiftedVal<Shift>()) {
1709  Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1710  Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1711  } else
1712  llvm_unreachable("Not a shifted negative immediate");
1713  }
1714 
1715  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1716  assert(N == 1 && "Invalid number of operands!");
1718  }
1719 
1720  void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1721  assert(N == 1 && "Invalid number of operands!");
1722  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1723  if (!MCE)
1724  addExpr(Inst, getImm());
1725  else
1726  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1727  }
1728 
1729  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1730  addImmOperands(Inst, N);
1731  }
1732 
1733  template<int Scale>
1734  void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1735  assert(N == 1 && "Invalid number of operands!");
1736  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1737 
1738  if (!MCE) {
1739  Inst.addOperand(MCOperand::createExpr(getImm()));
1740  return;
1741  }
1742  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1743  }
1744 
1745  void addUImm6Operands(MCInst &Inst, unsigned N) const {
1746  assert(N == 1 && "Invalid number of operands!");
1747  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1749  }
1750 
1751  template <int Scale>
1752  void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1753  assert(N == 1 && "Invalid number of operands!");
1754  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1755  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1756  }
1757 
1758  template <typename T>
1759  void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1760  assert(N == 1 && "Invalid number of operands!");
1761  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1762  std::make_unsigned_t<T> Val = MCE->getValue();
1763  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1764  Inst.addOperand(MCOperand::createImm(encoding));
1765  }
1766 
1767  template <typename T>
1768  void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1769  assert(N == 1 && "Invalid number of operands!");
1770  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1771  std::make_unsigned_t<T> Val = ~MCE->getValue();
1772  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1773  Inst.addOperand(MCOperand::createImm(encoding));
1774  }
1775 
1776  void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1777  assert(N == 1 && "Invalid number of operands!");
1778  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1780  Inst.addOperand(MCOperand::createImm(encoding));
1781  }
1782 
1783  void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1784  // Branch operands don't encode the low bits, so shift them off
1785  // here. If it's a label, however, just put it on directly as there's
1786  // not enough information now to do anything.
1787  assert(N == 1 && "Invalid number of operands!");
1788  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1789  if (!MCE) {
1790  addExpr(Inst, getImm());
1791  return;
1792  }
1793  assert(MCE && "Invalid constant immediate operand!");
1794  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1795  }
1796 
1797  void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1798  // Branch operands don't encode the low bits, so shift them off
1799  // here. If it's a label, however, just put it on directly as there's
1800  // not enough information now to do anything.
1801  assert(N == 1 && "Invalid number of operands!");
1802  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1803  if (!MCE) {
1804  addExpr(Inst, getImm());
1805  return;
1806  }
1807  assert(MCE && "Invalid constant immediate operand!");
1808  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1809  }
1810 
1811  void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1812  // Branch operands don't encode the low bits, so shift them off
1813  // here. If it's a label, however, just put it on directly as there's
1814  // not enough information now to do anything.
1815  assert(N == 1 && "Invalid number of operands!");
1816  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1817  if (!MCE) {
1818  addExpr(Inst, getImm());
1819  return;
1820  }
1821  assert(MCE && "Invalid constant immediate operand!");
1822  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1823  }
1824 
1825  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1826  assert(N == 1 && "Invalid number of operands!");
1828  AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1829  }
1830 
1831  void addBarrierOperands(MCInst &Inst, unsigned N) const {
1832  assert(N == 1 && "Invalid number of operands!");
1833  Inst.addOperand(MCOperand::createImm(getBarrier()));
1834  }
1835 
1836  void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
1837  assert(N == 1 && "Invalid number of operands!");
1838  Inst.addOperand(MCOperand::createImm(getBarrier()));
1839  }
1840 
1841  void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1842  assert(N == 1 && "Invalid number of operands!");
1843 
1844  Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1845  }
1846 
1847  void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1848  assert(N == 1 && "Invalid number of operands!");
1849 
1850  Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1851  }
1852 
1853  void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1854  assert(N == 1 && "Invalid number of operands!");
1855 
1856  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1857  }
1858 
1859  void addSVCROperands(MCInst &Inst, unsigned N) const {
1860  assert(N == 1 && "Invalid number of operands!");
1861 
1862  Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
1863  }
1864 
1865  void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1866  assert(N == 1 && "Invalid number of operands!");
1867 
1868  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1869  }
1870 
1871  void addSysCROperands(MCInst &Inst, unsigned N) const {
1872  assert(N == 1 && "Invalid number of operands!");
1873  Inst.addOperand(MCOperand::createImm(getSysCR()));
1874  }
1875 
1876  void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1877  assert(N == 1 && "Invalid number of operands!");
1878  Inst.addOperand(MCOperand::createImm(getPrefetch()));
1879  }
1880 
1881  void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1882  assert(N == 1 && "Invalid number of operands!");
1883  Inst.addOperand(MCOperand::createImm(getPSBHint()));
1884  }
1885 
1886  void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1887  assert(N == 1 && "Invalid number of operands!");
1888  Inst.addOperand(MCOperand::createImm(getBTIHint()));
1889  }
1890 
1891  void addShifterOperands(MCInst &Inst, unsigned N) const {
1892  assert(N == 1 && "Invalid number of operands!");
1893  unsigned Imm =
1894  AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1895  Inst.addOperand(MCOperand::createImm(Imm));
1896  }
1897 
1898  void addExtendOperands(MCInst &Inst, unsigned N) const {
1899  assert(N == 1 && "Invalid number of operands!");
1900  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1901  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1902  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1903  Inst.addOperand(MCOperand::createImm(Imm));
1904  }
1905 
1906  void addExtend64Operands(MCInst &Inst, unsigned N) const {
1907  assert(N == 1 && "Invalid number of operands!");
1908  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1909  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1910  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1911  Inst.addOperand(MCOperand::createImm(Imm));
1912  }
1913 
1914  void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1915  assert(N == 2 && "Invalid number of operands!");
1916  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1917  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1918  Inst.addOperand(MCOperand::createImm(IsSigned));
1919  Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1920  }
1921 
1922  // For 8-bit load/store instructions with a register offset, both the
1923  // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1924  // they're disambiguated by whether the shift was explicit or implicit rather
1925  // than its size.
1926  void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1927  assert(N == 2 && "Invalid number of operands!");
1928  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1929  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1930  Inst.addOperand(MCOperand::createImm(IsSigned));
1931  Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1932  }
1933 
1934  template<int Shift>
1935  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1936  assert(N == 1 && "Invalid number of operands!");
1937 
1938  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1939  if (CE) {
1940  uint64_t Value = CE->getValue();
1941  Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1942  } else {
1943  addExpr(Inst, getImm());
1944  }
1945  }
1946 
1947  template<int Shift>
1948  void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1949  assert(N == 1 && "Invalid number of operands!");
1950 
1951  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1952  uint64_t Value = CE->getValue();
1953  Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1954  }
1955 
1956  void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1957  assert(N == 1 && "Invalid number of operands!");
1958  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1959  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1960  }
1961 
1962  void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1963  assert(N == 1 && "Invalid number of operands!");
1964  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1965  Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1966  }
1967 
1968  void print(raw_ostream &OS) const override;
1969 
1970  static std::unique_ptr<AArch64Operand>
1971  CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
1972  auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1973  Op->Tok.Data = Str.data();
1974  Op->Tok.Length = Str.size();
1975  Op->Tok.IsSuffix = IsSuffix;
1976  Op->StartLoc = S;
1977  Op->EndLoc = S;
1978  return Op;
1979  }
1980 
1981  static std::unique_ptr<AArch64Operand>
1982  CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1983  RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1985  unsigned ShiftAmount = 0,
1986  unsigned HasExplicitAmount = false) {
1987  auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1988  Op->Reg.RegNum = RegNum;
1989  Op->Reg.Kind = Kind;
1990  Op->Reg.ElementWidth = 0;
1991  Op->Reg.EqualityTy = EqTy;
1992  Op->Reg.ShiftExtend.Type = ExtTy;
1993  Op->Reg.ShiftExtend.Amount = ShiftAmount;
1994  Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1995  Op->StartLoc = S;
1996  Op->EndLoc = E;
1997  return Op;
1998  }
1999 
2000  static std::unique_ptr<AArch64Operand>
2001  CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2002  SMLoc S, SMLoc E, MCContext &Ctx,
2004  unsigned ShiftAmount = 0,
2005  unsigned HasExplicitAmount = false) {
2006  assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2007  Kind == RegKind::SVEPredicateVector) &&
2008  "Invalid vector kind");
2009  auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2010  HasExplicitAmount);
2011  Op->Reg.ElementWidth = ElementWidth;
2012  return Op;
2013  }
2014 
2015  static std::unique_ptr<AArch64Operand>
2016  CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
2017  unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
2018  MCContext &Ctx) {
2019  auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2020  Op->VectorList.RegNum = RegNum;
2021  Op->VectorList.Count = Count;
2022  Op->VectorList.NumElements = NumElements;
2023  Op->VectorList.ElementWidth = ElementWidth;
2024  Op->VectorList.RegisterKind = RegisterKind;
2025  Op->StartLoc = S;
2026  Op->EndLoc = E;
2027  return Op;
2028  }
2029 
2030  static std::unique_ptr<AArch64Operand>
2031  CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2032  auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2033  Op->VectorIndex.Val = Idx;
2034  Op->StartLoc = S;
2035  Op->EndLoc = E;
2036  return Op;
2037  }
2038 
2039  static std::unique_ptr<AArch64Operand>
2040  CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2041  auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2042  Op->MatrixTileList.RegMask = RegMask;
2043  Op->StartLoc = S;
2044  Op->EndLoc = E;
2045  return Op;
2046  }
2047 
2048  static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2049  const unsigned ElementWidth) {
2050  static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2051  RegMap = {
2052  {{0, AArch64::ZAB0},
2053  {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2054  AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2055  {{8, AArch64::ZAB0},
2056  {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2057  AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2058  {{16, AArch64::ZAH0},
2059  {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2060  {{16, AArch64::ZAH1},
2061  {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2062  {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2063  {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2064  {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2065  {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2066  };
2067 
2068  if (ElementWidth == 64)
2069  OutRegs.insert(Reg);
2070  else {
2071  std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2072  assert(!Regs.empty() && "Invalid tile or element width!");
2073  for (auto OutReg : Regs)
2074  OutRegs.insert(OutReg);
2075  }
2076  }
2077 
2078  static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2079  SMLoc E, MCContext &Ctx) {
2080  auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2081  Op->Imm.Val = Val;
2082  Op->StartLoc = S;
2083  Op->EndLoc = E;
2084  return Op;
2085  }
2086 
2087  static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2088  unsigned ShiftAmount,
2089  SMLoc S, SMLoc E,
2090  MCContext &Ctx) {
2091  auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2092  Op->ShiftedImm .Val = Val;
2093  Op->ShiftedImm.ShiftAmount = ShiftAmount;
2094  Op->StartLoc = S;
2095  Op->EndLoc = E;
2096  return Op;
2097  }
2098 
2099  static std::unique_ptr<AArch64Operand>
2100  CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2101  auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2102  Op->CondCode.Code = Code;
2103  Op->StartLoc = S;
2104  Op->EndLoc = E;
2105  return Op;
2106  }
2107 
2108  static std::unique_ptr<AArch64Operand>
2109  CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2110  auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2111  Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2112  Op->FPImm.IsExact = IsExact;
2113  Op->StartLoc = S;
2114  Op->EndLoc = S;
2115  return Op;
2116  }
2117 
2118  static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2119  StringRef Str,
2120  SMLoc S,
2121  MCContext &Ctx,
2122  bool HasnXSModifier) {
2123  auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2124  Op->Barrier.Val = Val;
2125  Op->Barrier.Data = Str.data();
2126  Op->Barrier.Length = Str.size();
2127  Op->Barrier.HasnXSModifier = HasnXSModifier;
2128  Op->StartLoc = S;
2129  Op->EndLoc = S;
2130  return Op;
2131  }
2132 
2133  static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2134  uint32_t MRSReg,
2135  uint32_t MSRReg,
2136  uint32_t PStateField,
2137  MCContext &Ctx) {
2138  auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2139  Op->SysReg.Data = Str.data();
2140  Op->SysReg.Length = Str.size();
2141  Op->SysReg.MRSReg = MRSReg;
2142  Op->SysReg.MSRReg = MSRReg;
2143  Op->SysReg.PStateField = PStateField;
2144  Op->StartLoc = S;
2145  Op->EndLoc = S;
2146  return Op;
2147  }
2148 
2149  static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2150  SMLoc E, MCContext &Ctx) {
2151  auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2152  Op->SysCRImm.Val = Val;
2153  Op->StartLoc = S;
2154  Op->EndLoc = E;
2155  return Op;
2156  }
2157 
2158  static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2159  StringRef Str,
2160  SMLoc S,
2161  MCContext &Ctx) {
2162  auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2163  Op->Prefetch.Val = Val;
2164  Op->Barrier.Data = Str.data();
2165  Op->Barrier.Length = Str.size();
2166  Op->StartLoc = S;
2167  Op->EndLoc = S;
2168  return Op;
2169  }
2170 
2171  static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2172  StringRef Str,
2173  SMLoc S,
2174  MCContext &Ctx) {
2175  auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2176  Op->PSBHint.Val = Val;
2177  Op->PSBHint.Data = Str.data();
2178  Op->PSBHint.Length = Str.size();
2179  Op->StartLoc = S;
2180  Op->EndLoc = S;
2181  return Op;
2182  }
2183 
2184  static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2185  StringRef Str,
2186  SMLoc S,
2187  MCContext &Ctx) {
2188  auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2189  Op->BTIHint.Val = Val | 32;
2190  Op->BTIHint.Data = Str.data();
2191  Op->BTIHint.Length = Str.size();
2192  Op->StartLoc = S;
2193  Op->EndLoc = S;
2194  return Op;
2195  }
2196 
2197  static std::unique_ptr<AArch64Operand>
2198  CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2199  SMLoc S, SMLoc E, MCContext &Ctx) {
2200  auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2201  Op->MatrixReg.RegNum = RegNum;
2202  Op->MatrixReg.ElementWidth = ElementWidth;
2203  Op->MatrixReg.Kind = Kind;
2204  Op->StartLoc = S;
2205  Op->EndLoc = E;
2206  return Op;
2207  }
2208 
2209  static std::unique_ptr<AArch64Operand>
2210  CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2211  auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2212  Op->SVCR.PStateField = PStateField;
2213  Op->SVCR.Data = Str.data();
2214  Op->SVCR.Length = Str.size();
2215  Op->StartLoc = S;
2216  Op->EndLoc = S;
2217  return Op;
2218  }
2219 
2220  static std::unique_ptr<AArch64Operand>
2221  CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2222  bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2223  auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2224  Op->ShiftExtend.Type = ShOp;
2225  Op->ShiftExtend.Amount = Val;
2226  Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2227  Op->StartLoc = S;
2228  Op->EndLoc = E;
2229  return Op;
2230  }
2231 };
2232 
2233 } // end anonymous namespace.
2234 
2235 void AArch64Operand::print(raw_ostream &OS) const {
2236  switch (Kind) {
2237  case k_FPImm:
2238  OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2239  if (!getFPImmIsExact())
2240  OS << " (inexact)";
2241  OS << ">";
2242  break;
2243  case k_Barrier: {
2244  StringRef Name = getBarrierName();
2245  if (!Name.empty())
2246  OS << "<barrier " << Name << ">";
2247  else
2248  OS << "<barrier invalid #" << getBarrier() << ">";
2249  break;
2250  }
2251  case k_Immediate:
2252  OS << *getImm();
2253  break;
2254  case k_ShiftedImm: {
2255  unsigned Shift = getShiftedImmShift();
2256  OS << "<shiftedimm ";
2257  OS << *getShiftedImmVal();
2258  OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2259  break;
2260  }
2261  case k_CondCode:
2262  OS << "<condcode " << getCondCode() << ">";
2263  break;
2264  case k_VectorList: {
2265  OS << "<vectorlist ";
2266  unsigned Reg = getVectorListStart();
2267  for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2268  OS << Reg + i << " ";
2269  OS << ">";
2270  break;
2271  }
2272  case k_VectorIndex:
2273  OS << "<vectorindex " << getVectorIndex() << ">";
2274  break;
2275  case k_SysReg:
2276  OS << "<sysreg: " << getSysReg() << '>';
2277  break;
2278  case k_Token:
2279  OS << "'" << getToken() << "'";
2280  break;
2281  case k_SysCR:
2282  OS << "c" << getSysCR();
2283  break;
2284  case k_Prefetch: {
2285  StringRef Name = getPrefetchName();
2286  if (!Name.empty())
2287  OS << "<prfop " << Name << ">";
2288  else
2289  OS << "<prfop invalid #" << getPrefetch() << ">";
2290  break;
2291  }
2292  case k_PSBHint:
2293  OS << getPSBHintName();
2294  break;
2295  case k_BTIHint:
2296  OS << getBTIHintName();
2297  break;
2298  case k_MatrixRegister:
2299  OS << "<matrix " << getMatrixReg() << ">";
2300  break;
2301  case k_MatrixTileList: {
2302  OS << "<matrixlist ";
2303  unsigned RegMask = getMatrixTileListRegMask();
2304  unsigned MaxBits = 8;
2305  for (unsigned I = MaxBits; I > 0; --I)
2306  OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2307  OS << '>';
2308  break;
2309  }
2310  case k_SVCR: {
2311  OS << getSVCR();
2312  break;
2313  }
2314  case k_Register:
2315  OS << "<register " << getReg() << ">";
2316  if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2317  break;
2318  LLVM_FALLTHROUGH;
2319  case k_ShiftExtend:
2320  OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2321  << getShiftExtendAmount();
2322  if (!hasShiftExtendAmount())
2323  OS << "<imp>";
2324  OS << '>';
2325  break;
2326  }
2327 }
2328 
2329 /// @name Auto-generated Match Functions
2330 /// {
2331 
2332 static unsigned MatchRegisterName(StringRef Name);
2333 
2334 /// }
2335 
2336 static unsigned MatchNeonVectorRegName(StringRef Name) {
2337  return StringSwitch<unsigned>(Name.lower())
2338  .Case("v0", AArch64::Q0)
2339  .Case("v1", AArch64::Q1)
2340  .Case("v2", AArch64::Q2)
2341  .Case("v3", AArch64::Q3)
2342  .Case("v4", AArch64::Q4)
2343  .Case("v5", AArch64::Q5)
2344  .Case("v6", AArch64::Q6)
2345  .Case("v7", AArch64::Q7)
2346  .Case("v8", AArch64::Q8)
2347  .Case("v9", AArch64::Q9)
2348  .Case("v10", AArch64::Q10)
2349  .Case("v11", AArch64::Q11)
2350  .Case("v12", AArch64::Q12)
2351  .Case("v13", AArch64::Q13)
2352  .Case("v14", AArch64::Q14)
2353  .Case("v15", AArch64::Q15)
2354  .Case("v16", AArch64::Q16)
2355  .Case("v17", AArch64::Q17)
2356  .Case("v18", AArch64::Q18)
2357  .Case("v19", AArch64::Q19)
2358  .Case("v20", AArch64::Q20)
2359  .Case("v21", AArch64::Q21)
2360  .Case("v22", AArch64::Q22)
2361  .Case("v23", AArch64::Q23)
2362  .Case("v24", AArch64::Q24)
2363  .Case("v25", AArch64::Q25)
2364  .Case("v26", AArch64::Q26)
2365  .Case("v27", AArch64::Q27)
2366  .Case("v28", AArch64::Q28)
2367  .Case("v29", AArch64::Q29)
2368  .Case("v30", AArch64::Q30)
2369  .Case("v31", AArch64::Q31)
2370  .Default(0);
2371 }
2372 
2373 /// Returns an optional pair of (#elements, element-width) if Suffix
2374 /// is a valid vector kind. Where the number of elements in a vector
2375 /// or the vector width is implicit or explicitly unknown (but still a
2376 /// valid suffix kind), 0 is used.
2377 static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2378  RegKind VectorKind) {
2379  std::pair<int, int> Res = {-1, -1};
2380 
2381  switch (VectorKind) {
2382  case RegKind::NeonVector:
2383  Res =
2384  StringSwitch<std::pair<int, int>>(Suffix.lower())
2385  .Case("", {0, 0})
2386  .Case(".1d", {1, 64})
2387  .Case(".1q", {1, 128})
2388  // '.2h' needed for fp16 scalar pairwise reductions
2389  .Case(".2h", {2, 16})
2390  .Case(".2s", {2, 32})
2391  .Case(".2d", {2, 64})
2392  // '.4b' is another special case for the ARMv8.2a dot product
2393  // operand
2394  .Case(".4b", {4, 8})
2395  .Case(".4h", {4, 16})
2396  .Case(".4s", {4, 32})
2397  .Case(".8b", {8, 8})
2398  .Case(".8h", {8, 16})
2399  .Case(".16b", {16, 8})
2400  // Accept the width neutral ones, too, for verbose syntax. If those
2401  // aren't used in the right places, the token operand won't match so
2402  // all will work out.
2403  .Case(".b", {0, 8})
2404  .Case(".h", {0, 16})
2405  .Case(".s", {0, 32})
2406  .Case(".d", {0, 64})
2407  .Default({-1, -1});
2408  break;
2409  case RegKind::SVEPredicateVector:
2410  case RegKind::SVEDataVector:
2411  case RegKind::Matrix:
2412  Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2413  .Case("", {0, 0})
2414  .Case(".b", {0, 8})
2415  .Case(".h", {0, 16})
2416  .Case(".s", {0, 32})
2417  .Case(".d", {0, 64})
2418  .Case(".q", {0, 128})
2419  .Default({-1, -1});
2420  break;
2421  default:
2422  llvm_unreachable("Unsupported RegKind");
2423  }
2424 
2425  if (Res == std::make_pair(-1, -1))
2426  return Optional<std::pair<int, int>>();
2427 
2428  return Optional<std::pair<int, int>>(Res);
2429 }
2430 
2431 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2432  return parseVectorKind(Suffix, VectorKind).hasValue();
2433 }
2434 
2435 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2436  return StringSwitch<unsigned>(Name.lower())
2437  .Case("z0", AArch64::Z0)
2438  .Case("z1", AArch64::Z1)
2439  .Case("z2", AArch64::Z2)
2440  .Case("z3", AArch64::Z3)
2441  .Case("z4", AArch64::Z4)
2442  .Case("z5", AArch64::Z5)
2443  .Case("z6", AArch64::Z6)
2444  .Case("z7", AArch64::Z7)
2445  .Case("z8", AArch64::Z8)
2446  .Case("z9", AArch64::Z9)
2447  .Case("z10", AArch64::Z10)
2448  .Case("z11", AArch64::Z11)
2449  .Case("z12", AArch64::Z12)
2450  .Case("z13", AArch64::Z13)
2451  .Case("z14", AArch64::Z14)
2452  .Case("z15", AArch64::Z15)
2453  .Case("z16", AArch64::Z16)
2454  .Case("z17", AArch64::Z17)
2455  .Case("z18", AArch64::Z18)
2456  .Case("z19", AArch64::Z19)
2457  .Case("z20", AArch64::Z20)
2458  .Case("z21", AArch64::Z21)
2459  .Case("z22", AArch64::Z22)
2460  .Case("z23", AArch64::Z23)
2461  .Case("z24", AArch64::Z24)
2462  .Case("z25", AArch64::Z25)
2463  .Case("z26", AArch64::Z26)
2464  .Case("z27", AArch64::Z27)
2465  .Case("z28", AArch64::Z28)
2466  .Case("z29", AArch64::Z29)
2467  .Case("z30", AArch64::Z30)
2468  .Case("z31", AArch64::Z31)
2469  .Default(0);
2470 }
2471 
2472 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2473  return StringSwitch<unsigned>(Name.lower())
2474  .Case("p0", AArch64::P0)
2475  .Case("p1", AArch64::P1)
2476  .Case("p2", AArch64::P2)
2477  .Case("p3", AArch64::P3)
2478  .Case("p4", AArch64::P4)
2479  .Case("p5", AArch64::P5)
2480  .Case("p6", AArch64::P6)
2481  .Case("p7", AArch64::P7)
2482  .Case("p8", AArch64::P8)
2483  .Case("p9", AArch64::P9)
2484  .Case("p10", AArch64::P10)
2485  .Case("p11", AArch64::P11)
2486  .Case("p12", AArch64::P12)
2487  .Case("p13", AArch64::P13)
2488  .Case("p14", AArch64::P14)
2489  .Case("p15", AArch64::P15)
2490  .Default(0);
2491 }
2492 
2493 static unsigned matchMatrixTileListRegName(StringRef Name) {
2494  return StringSwitch<unsigned>(Name.lower())
2495  .Case("za0.d", AArch64::ZAD0)
2496  .Case("za1.d", AArch64::ZAD1)
2497  .Case("za2.d", AArch64::ZAD2)
2498  .Case("za3.d", AArch64::ZAD3)
2499  .Case("za4.d", AArch64::ZAD4)
2500  .Case("za5.d", AArch64::ZAD5)
2501  .Case("za6.d", AArch64::ZAD6)
2502  .Case("za7.d", AArch64::ZAD7)
2503  .Case("za0.s", AArch64::ZAS0)
2504  .Case("za1.s", AArch64::ZAS1)
2505  .Case("za2.s", AArch64::ZAS2)
2506  .Case("za3.s", AArch64::ZAS3)
2507  .Case("za0.h", AArch64::ZAH0)
2508  .Case("za1.h", AArch64::ZAH1)
2509  .Case("za0.b", AArch64::ZAB0)
2510  .Default(0);
2511 }
2512 
2513 static unsigned matchMatrixRegName(StringRef Name) {
2514  return StringSwitch<unsigned>(Name.lower())
2515  .Case("za", AArch64::ZA)
2516  .Case("za0.q", AArch64::ZAQ0)
2517  .Case("za1.q", AArch64::ZAQ1)
2518  .Case("za2.q", AArch64::ZAQ2)
2519  .Case("za3.q", AArch64::ZAQ3)
2520  .Case("za4.q", AArch64::ZAQ4)
2521  .Case("za5.q", AArch64::ZAQ5)
2522  .Case("za6.q", AArch64::ZAQ6)
2523  .Case("za7.q", AArch64::ZAQ7)
2524  .Case("za8.q", AArch64::ZAQ8)
2525  .Case("za9.q", AArch64::ZAQ9)
2526  .Case("za10.q", AArch64::ZAQ10)
2527  .Case("za11.q", AArch64::ZAQ11)
2528  .Case("za12.q", AArch64::ZAQ12)
2529  .Case("za13.q", AArch64::ZAQ13)
2530  .Case("za14.q", AArch64::ZAQ14)
2531  .Case("za15.q", AArch64::ZAQ15)
2532  .Case("za0.d", AArch64::ZAD0)
2533  .Case("za1.d", AArch64::ZAD1)
2534  .Case("za2.d", AArch64::ZAD2)
2535  .Case("za3.d", AArch64::ZAD3)
2536  .Case("za4.d", AArch64::ZAD4)
2537  .Case("za5.d", AArch64::ZAD5)
2538  .Case("za6.d", AArch64::ZAD6)
2539  .Case("za7.d", AArch64::ZAD7)
2540  .Case("za0.s", AArch64::ZAS0)
2541  .Case("za1.s", AArch64::ZAS1)
2542  .Case("za2.s", AArch64::ZAS2)
2543  .Case("za3.s", AArch64::ZAS3)
2544  .Case("za0.h", AArch64::ZAH0)
2545  .Case("za1.h", AArch64::ZAH1)
2546  .Case("za0.b", AArch64::ZAB0)
2547  .Case("za0h.q", AArch64::ZAQ0)
2548  .Case("za1h.q", AArch64::ZAQ1)
2549  .Case("za2h.q", AArch64::ZAQ2)
2550  .Case("za3h.q", AArch64::ZAQ3)
2551  .Case("za4h.q", AArch64::ZAQ4)
2552  .Case("za5h.q", AArch64::ZAQ5)
2553  .Case("za6h.q", AArch64::ZAQ6)
2554  .Case("za7h.q", AArch64::ZAQ7)
2555  .Case("za8h.q", AArch64::ZAQ8)
2556  .Case("za9h.q", AArch64::ZAQ9)
2557  .Case("za10h.q", AArch64::ZAQ10)
2558  .Case("za11h.q", AArch64::ZAQ11)
2559  .Case("za12h.q", AArch64::ZAQ12)
2560  .Case("za13h.q", AArch64::ZAQ13)
2561  .Case("za14h.q", AArch64::ZAQ14)
2562  .Case("za15h.q", AArch64::ZAQ15)
2563  .Case("za0h.d", AArch64::ZAD0)
2564  .Case("za1h.d", AArch64::ZAD1)
2565  .Case("za2h.d", AArch64::ZAD2)
2566  .Case("za3h.d", AArch64::ZAD3)
2567  .Case("za4h.d", AArch64::ZAD4)
2568  .Case("za5h.d", AArch64::ZAD5)
2569  .Case("za6h.d", AArch64::ZAD6)
2570  .Case("za7h.d", AArch64::ZAD7)
2571  .Case("za0h.s", AArch64::ZAS0)
2572  .Case("za1h.s", AArch64::ZAS1)
2573  .Case("za2h.s", AArch64::ZAS2)
2574  .Case("za3h.s", AArch64::ZAS3)
2575  .Case("za0h.h", AArch64::ZAH0)
2576  .Case("za1h.h", AArch64::ZAH1)
2577  .Case("za0h.b", AArch64::ZAB0)
2578  .Case("za0v.q", AArch64::ZAQ0)
2579  .Case("za1v.q", AArch64::ZAQ1)
2580  .Case("za2v.q", AArch64::ZAQ2)
2581  .Case("za3v.q", AArch64::ZAQ3)
2582  .Case("za4v.q", AArch64::ZAQ4)
2583  .Case("za5v.q", AArch64::ZAQ5)
2584  .Case("za6v.q", AArch64::ZAQ6)
2585  .Case("za7v.q", AArch64::ZAQ7)
2586  .Case("za8v.q", AArch64::ZAQ8)
2587  .Case("za9v.q", AArch64::ZAQ9)
2588  .Case("za10v.q", AArch64::ZAQ10)
2589  .Case("za11v.q", AArch64::ZAQ11)
2590  .Case("za12v.q", AArch64::ZAQ12)
2591  .Case("za13v.q", AArch64::ZAQ13)
2592  .Case("za14v.q", AArch64::ZAQ14)
2593  .Case("za15v.q", AArch64::ZAQ15)
2594  .Case("za0v.d", AArch64::ZAD0)
2595  .Case("za1v.d", AArch64::ZAD1)
2596  .Case("za2v.d", AArch64::ZAD2)
2597  .Case("za3v.d", AArch64::ZAD3)
2598  .Case("za4v.d", AArch64::ZAD4)
2599  .Case("za5v.d", AArch64::ZAD5)
2600  .Case("za6v.d", AArch64::ZAD6)
2601  .Case("za7v.d", AArch64::ZAD7)
2602  .Case("za0v.s", AArch64::ZAS0)
2603  .Case("za1v.s", AArch64::ZAS1)
2604  .Case("za2v.s", AArch64::ZAS2)
2605  .Case("za3v.s", AArch64::ZAS3)
2606  .Case("za0v.h", AArch64::ZAH0)
2607  .Case("za1v.h", AArch64::ZAH1)
2608  .Case("za0v.b", AArch64::ZAB0)
2609  .Default(0);
2610 }
2611 
2612 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2613  SMLoc &EndLoc) {
2614  return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
2615 }
2616 
2617 OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2618  SMLoc &StartLoc,
2619  SMLoc &EndLoc) {
2620  StartLoc = getLoc();
2621  auto Res = tryParseScalarRegister(RegNo);
2622  EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2623  return Res;
2624 }
2625 
2626 // Matches a register name or register alias previously defined by '.req'
2627 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2628  RegKind Kind) {
2629  unsigned RegNum = 0;
2630  if ((RegNum = matchSVEDataVectorRegName(Name)))
2631  return Kind == RegKind::SVEDataVector ? RegNum : 0;
2632 
2633  if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2634  return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2635 
2636  if ((RegNum = MatchNeonVectorRegName(Name)))
2637  return Kind == RegKind::NeonVector ? RegNum : 0;
2638 
2639  if ((RegNum = matchMatrixRegName(Name)))
2640  return Kind == RegKind::Matrix ? RegNum : 0;
2641 
2642  // The parsed register must be of RegKind Scalar
2643  if ((RegNum = MatchRegisterName(Name)))
2644  return Kind == RegKind::Scalar ? RegNum : 0;
2645 
2646  if (!RegNum) {
2647  // Handle a few common aliases of registers.
2648  if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2649  .Case("fp", AArch64::FP)
2650  .Case("lr", AArch64::LR)
2651  .Case("x31", AArch64::XZR)
2652  .Case("w31", AArch64::WZR)
2653  .Default(0))
2654  return Kind == RegKind::Scalar ? RegNum : 0;
2655 
2656  // Check for aliases registered via .req. Canonicalize to lower case.
2657  // That's more consistent since register names are case insensitive, and
2658  // it's how the original entry was passed in from MC/MCParser/AsmParser.
2659  auto Entry = RegisterReqs.find(Name.lower());
2660  if (Entry == RegisterReqs.end())
2661  return 0;
2662 
2663  // set RegNum if the match is the right kind of register
2664  if (Kind == Entry->getValue().first)
2665  RegNum = Entry->getValue().second;
2666  }
2667  return RegNum;
2668 }
2669 
2670 /// tryParseScalarRegister - Try to parse a register name. The token must be an
2671 /// Identifier when called, and if it is a register name the token is eaten and
2672 /// the register is added to the operand list.
2674 AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2675  const AsmToken &Tok = getTok();
2676  if (Tok.isNot(AsmToken::Identifier))
2677  return MatchOperand_NoMatch;
2678 
2679  std::string lowerCase = Tok.getString().lower();
2680  unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2681  if (Reg == 0)
2682  return MatchOperand_NoMatch;
2683 
2684  RegNum = Reg;
2685  Lex(); // Eat identifier token.
2686  return MatchOperand_Success;
2687 }
2688 
2689 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2691 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2692  SMLoc S = getLoc();
2693 
2694  if (getTok().isNot(AsmToken::Identifier)) {
2695  Error(S, "Expected cN operand where 0 <= N <= 15");
2696  return MatchOperand_ParseFail;
2697  }
2698 
2699  StringRef Tok = getTok().getIdentifier();
2700  if (Tok[0] != 'c' && Tok[0] != 'C') {
2701  Error(S, "Expected cN operand where 0 <= N <= 15");
2702  return MatchOperand_ParseFail;
2703  }
2704 
2705  uint32_t CRNum;
2706  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2707  if (BadNum || CRNum > 15) {
2708  Error(S, "Expected cN operand where 0 <= N <= 15");
2709  return MatchOperand_ParseFail;
2710  }
2711 
2712  Lex(); // Eat identifier token.
2713  Operands.push_back(
2714  AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2715  return MatchOperand_Success;
2716 }
2717 
2718 /// tryParsePrefetch - Try to parse a prefetch operand.
2719 template <bool IsSVEPrefetch>
2721 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2722  SMLoc S = getLoc();
2723  const AsmToken &Tok = getTok();
2724 
2725  auto LookupByName = [](StringRef N) {
2726  if (IsSVEPrefetch) {
2727  if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2728  return Optional<unsigned>(Res->Encoding);
2729  } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2730  return Optional<unsigned>(Res->Encoding);
2731  return Optional<unsigned>();
2732  };
2733 
2734  auto LookupByEncoding = [](unsigned E) {
2735  if (IsSVEPrefetch) {
2736  if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2737  return Optional<StringRef>(Res->Name);
2738  } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2739  return Optional<StringRef>(Res->Name);
2740  return Optional<StringRef>();
2741  };
2742  unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2743 
2744  // Either an identifier for named values or a 5-bit immediate.
2745  // Eat optional hash.
2746  if (parseOptionalToken(AsmToken::Hash) ||
2747  Tok.is(AsmToken::Integer)) {
2748  const MCExpr *ImmVal;
2749  if (getParser().parseExpression(ImmVal))
2750  return MatchOperand_ParseFail;
2751 
2752  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2753  if (!MCE) {
2754  TokError("immediate value expected for prefetch operand");
2755  return MatchOperand_ParseFail;
2756  }
2757  unsigned prfop = MCE->getValue();
2758  if (prfop > MaxVal) {
2759  TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2760  "] expected");
2761  return MatchOperand_ParseFail;
2762  }
2763 
2764  auto PRFM = LookupByEncoding(MCE->getValue());
2765  Operands.push_back(AArch64Operand::CreatePrefetch(
2766  prfop, PRFM.getValueOr(""), S, getContext()));
2767  return MatchOperand_Success;
2768  }
2769 
2770  if (Tok.isNot(AsmToken::Identifier)) {
2771  TokError("prefetch hint expected");
2772  return MatchOperand_ParseFail;
2773  }
2774 
2775  auto PRFM = LookupByName(Tok.getString());
2776  if (!PRFM) {
2777  TokError("prefetch hint expected");
2778  return MatchOperand_ParseFail;
2779  }
2780 
2781  Operands.push_back(AArch64Operand::CreatePrefetch(
2782  *PRFM, Tok.getString(), S, getContext()));
2783  Lex(); // Eat identifier token.
2784  return MatchOperand_Success;
2785 }
2786 
2787 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2789 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2790  SMLoc S = getLoc();
2791  const AsmToken &Tok = getTok();
2792  if (Tok.isNot(AsmToken::Identifier)) {
2793  TokError("invalid operand for instruction");
2794  return MatchOperand_ParseFail;
2795  }
2796 
2797  auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2798  if (!PSB) {
2799  TokError("invalid operand for instruction");
2800  return MatchOperand_ParseFail;
2801  }
2802 
2803  Operands.push_back(AArch64Operand::CreatePSBHint(
2804  PSB->Encoding, Tok.getString(), S, getContext()));
2805  Lex(); // Eat identifier token.
2806  return MatchOperand_Success;
2807 }
2808 
2809 /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2811 AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2812  SMLoc S = getLoc();
2813  const AsmToken &Tok = getTok();
2814  if (Tok.isNot(AsmToken::Identifier)) {
2815  TokError("invalid operand for instruction");
2816  return MatchOperand_ParseFail;
2817  }
2818 
2819  auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2820  if (!BTI) {
2821  TokError("invalid operand for instruction");
2822  return MatchOperand_ParseFail;
2823  }
2824 
2825  Operands.push_back(AArch64Operand::CreateBTIHint(
2826  BTI->Encoding, Tok.getString(), S, getContext()));
2827  Lex(); // Eat identifier token.
2828  return MatchOperand_Success;
2829 }
2830 
2831 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2832 /// instruction.
2834 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2835  SMLoc S = getLoc();
2836  const MCExpr *Expr = nullptr;
2837 
2838  if (getTok().is(AsmToken::Hash)) {
2839  Lex(); // Eat hash token.
2840  }
2841 
2842  if (parseSymbolicImmVal(Expr))
2843  return MatchOperand_ParseFail;
2844 
2845  AArch64MCExpr::VariantKind ELFRefKind;
2846  MCSymbolRefExpr::VariantKind DarwinRefKind;
2847  int64_t Addend;
2848  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2849  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2850  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2851  // No modifier was specified at all; this is the syntax for an ELF basic
2852  // ADRP relocation (unfortunately).
2853  Expr =
2855  } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2856  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2857  Addend != 0) {
2858  Error(S, "gotpage label reference not allowed an addend");
2859  return MatchOperand_ParseFail;
2860  } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2861  DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2862  DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2863  ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2864  ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2865  ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
2866  ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2867  ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2868  // The operand must be an @page or @gotpage qualified symbolref.
2869  Error(S, "page or gotpage label reference expected");
2870  return MatchOperand_ParseFail;
2871  }
2872  }
2873 
2874  // We have either a label reference possibly with addend or an immediate. The
2875  // addend is a raw value here. The linker will adjust it to only reference the
2876  // page.
2877  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2878  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2879 
2880  return MatchOperand_Success;
2881 }
2882 
2883 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2884 /// instruction.
2886 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2887  SMLoc S = getLoc();
2888  const MCExpr *Expr = nullptr;
2889 
2890  // Leave anything with a bracket to the default for SVE
2891  if (getTok().is(AsmToken::LBrac))
2892  return MatchOperand_NoMatch;
2893 
2894  if (getTok().is(AsmToken::Hash))
2895  Lex(); // Eat hash token.
2896 
2897  if (parseSymbolicImmVal(Expr))
2898  return MatchOperand_ParseFail;
2899 
2900  AArch64MCExpr::VariantKind ELFRefKind;
2901  MCSymbolRefExpr::VariantKind DarwinRefKind;
2902  int64_t Addend;
2903  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2904  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2905  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2906  // No modifier was specified at all; this is the syntax for an ELF basic
2907  // ADR relocation (unfortunately).
2908  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2909  } else {
2910  Error(S, "unexpected adr label");
2911  return MatchOperand_ParseFail;
2912  }
2913  }
2914 
2915  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2916  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2917  return MatchOperand_Success;
2918 }
2919 
2920 /// tryParseFPImm - A floating point immediate expression operand.
2921 template<bool AddFPZeroAsLiteral>
2923 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2924  SMLoc S = getLoc();
2925 
2926  bool Hash = parseOptionalToken(AsmToken::Hash);
2927 
2928  // Handle negation, as that still comes through as a separate token.
2929  bool isNegative = parseOptionalToken(AsmToken::Minus);
2930 
2931  const AsmToken &Tok = getTok();
2932  if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2933  if (!Hash)
2934  return MatchOperand_NoMatch;
2935  TokError("invalid floating point immediate");
2936  return MatchOperand_ParseFail;
2937  }
2938 
2939  // Parse hexadecimal representation.
2940  if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2941  if (Tok.getIntVal() > 255 || isNegative) {
2942  TokError("encoded floating point value out of range");
2943  return MatchOperand_ParseFail;
2944  }
2945 
2946  APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2947  Operands.push_back(
2948  AArch64Operand::CreateFPImm(F, true, S, getContext()));
2949  } else {
2950  // Parse FP representation.
2951  APFloat RealVal(APFloat::IEEEdouble());
2952  auto StatusOrErr =
2953  RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2954  if (errorToBool(StatusOrErr.takeError())) {
2955  TokError("invalid floating point representation");
2956  return MatchOperand_ParseFail;
2957  }
2958 
2959  if (isNegative)
2960  RealVal.changeSign();
2961 
2962  if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2963  Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
2964  Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
2965  } else
2966  Operands.push_back(AArch64Operand::CreateFPImm(
2967  RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
2968  }
2969 
2970  Lex(); // Eat the token.
2971 
2972  return MatchOperand_Success;
2973 }
2974 
2975 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2976 /// a shift suffix, for example '#1, lsl #12'.
2978 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2979  SMLoc S = getLoc();
2980 
2981  if (getTok().is(AsmToken::Hash))
2982  Lex(); // Eat '#'
2983  else if (getTok().isNot(AsmToken::Integer))
2984  // Operand should start from # or should be integer, emit error otherwise.
2985  return MatchOperand_NoMatch;
2986 
2987  const MCExpr *Imm = nullptr;
2988  if (parseSymbolicImmVal(Imm))
2989  return MatchOperand_ParseFail;
2990  else if (getTok().isNot(AsmToken::Comma)) {
2991  Operands.push_back(
2992  AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
2993  return MatchOperand_Success;
2994  }
2995 
2996  // Eat ','
2997  Lex();
2998 
2999  // The optional operand must be "lsl #N" where N is non-negative.
3000  if (!getTok().is(AsmToken::Identifier) ||
3001  !getTok().getIdentifier().equals_insensitive("lsl")) {
3002  Error(getLoc(), "only 'lsl #+N' valid after immediate");
3003  return MatchOperand_ParseFail;
3004  }
3005 
3006  // Eat 'lsl'
3007  Lex();
3008 
3009  parseOptionalToken(AsmToken::Hash);
3010 
3011  if (getTok().isNot(AsmToken::Integer)) {
3012  Error(getLoc(), "only 'lsl #+N' valid after immediate");
3013  return MatchOperand_ParseFail;
3014  }
3015 
3016  int64_t ShiftAmount = getTok().getIntVal();
3017 
3018  if (ShiftAmount < 0) {
3019  Error(getLoc(), "positive shift amount required");
3020  return MatchOperand_ParseFail;
3021  }
3022  Lex(); // Eat the number
3023 
3024  // Just in case the optional lsl #0 is used for immediates other than zero.
3025  if (ShiftAmount == 0 && Imm != nullptr) {
3026  Operands.push_back(
3027  AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3028  return MatchOperand_Success;
3029  }
3030 
3031  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3032  getLoc(), getContext()));
3033  return MatchOperand_Success;
3034 }
3035 
3036 /// parseCondCodeString - Parse a Condition Code string.
3037 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
3039  .Case("eq", AArch64CC::EQ)
3040  .Case("ne", AArch64CC::NE)
3041  .Case("cs", AArch64CC::HS)
3042  .Case("hs", AArch64CC::HS)
3043  .Case("cc", AArch64CC::LO)
3044  .Case("lo", AArch64CC::LO)
3045  .Case("mi", AArch64CC::MI)
3046  .Case("pl", AArch64CC::PL)
3047  .Case("vs", AArch64CC::VS)
3048  .Case("vc", AArch64CC::VC)
3049  .Case("hi", AArch64CC::HI)
3050  .Case("ls", AArch64CC::LS)
3051  .Case("ge", AArch64CC::GE)
3052  .Case("lt", AArch64CC::LT)
3053  .Case("gt", AArch64CC::GT)
3054  .Case("le", AArch64CC::LE)
3055  .Case("al", AArch64CC::AL)
3056  .Case("nv", AArch64CC::NV)
3058 
3059  if (CC == AArch64CC::Invalid &&
3060  getSTI().getFeatureBits()[AArch64::FeatureSVE])
3062  .Case("none", AArch64CC::EQ)
3063  .Case("any", AArch64CC::NE)
3064  .Case("nlast", AArch64CC::HS)
3065  .Case("last", AArch64CC::LO)
3066  .Case("first", AArch64CC::MI)
3067  .Case("nfrst", AArch64CC::PL)
3068  .Case("pmore", AArch64CC::HI)
3069  .Case("plast", AArch64CC::LS)
3070  .Case("tcont", AArch64CC::GE)
3071  .Case("tstop", AArch64CC::LT)
3073 
3074  return CC;
3075 }
3076 
3077 /// parseCondCode - Parse a Condition Code operand.
3078 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3079  bool invertCondCode) {
3080  SMLoc S = getLoc();
3081  const AsmToken &Tok = getTok();
3082  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3083 
3084  StringRef Cond = Tok.getString();
3085  AArch64CC::CondCode CC = parseCondCodeString(Cond);
3086  if (CC == AArch64CC::Invalid)
3087  return TokError("invalid condition code");
3088  Lex(); // Eat identifier token.
3089 
3090  if (invertCondCode) {
3091  if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3092  return TokError("condition codes AL and NV are invalid for this instruction");
3094  }
3095 
3096  Operands.push_back(
3097  AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3098  return false;
3099 }
3100 
3102 AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3103  const AsmToken &Tok = getTok();
3104  SMLoc S = getLoc();
3105 
3106  if (Tok.isNot(AsmToken::Identifier)) {
3107  TokError("invalid operand for instruction");
3108  return MatchOperand_ParseFail;
3109  }
3110 
3111  unsigned PStateImm = -1;
3112  const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3113  if (SVCR && SVCR->haveFeatures(getSTI().getFeatureBits()))
3114  PStateImm = SVCR->Encoding;
3115 
3116  Operands.push_back(
3117  AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3118  Lex(); // Eat identifier token.
3119  return MatchOperand_Success;
3120 }
3121 
3123 AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3124  const AsmToken &Tok = getTok();
3125  SMLoc S = getLoc();
3126 
3127  StringRef Name = Tok.getString();
3128 
3129  if (Name.equals_insensitive("za")) {
3130  Lex(); // eat "za"
3131  Operands.push_back(AArch64Operand::CreateMatrixRegister(
3132  AArch64::ZA, /*ElementWidth=*/0, MatrixKind::Array, S, getLoc(),
3133  getContext()));
3134  if (getLexer().is(AsmToken::LBrac)) {
3135  // There's no comma after matrix operand, so we can parse the next operand
3136  // immediately.
3137  if (parseOperand(Operands, false, false))
3138  return MatchOperand_NoMatch;
3139  }
3140  return MatchOperand_Success;
3141  }
3142 
3143  // Try to parse matrix register.
3144  unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3145  if (!Reg)
3146  return MatchOperand_NoMatch;
3147 
3148  size_t DotPosition = Name.find('.');
3149  assert(DotPosition != StringRef::npos && "Unexpected register");
3150 
3151  StringRef Head = Name.take_front(DotPosition);
3152  StringRef Tail = Name.drop_front(DotPosition);
3153  StringRef RowOrColumn = Head.take_back();
3154 
3155  MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn)
3156  .Case("h", MatrixKind::Row)
3157  .Case("v", MatrixKind::Col)
3158  .Default(MatrixKind::Tile);
3159 
3160  // Next up, parsing the suffix
3161  const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3162  if (!KindRes) {
3163  TokError("Expected the register to be followed by element width suffix");
3164  return MatchOperand_ParseFail;
3165  }
3166  unsigned ElementWidth = KindRes->second;
3167 
3168  Lex();
3169 
3170  Operands.push_back(AArch64Operand::CreateMatrixRegister(
3171  Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3172 
3173  if (getLexer().is(AsmToken::LBrac)) {
3174  // There's no comma after matrix operand, so we can parse the next operand
3175  // immediately.
3176  if (parseOperand(Operands, false, false))
3177  return MatchOperand_NoMatch;
3178  }
3179  return MatchOperand_Success;
3180 }
3181 
3182 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3183 /// them if present.
3185 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3186  const AsmToken &Tok = getTok();
3187  std::string LowerID = Tok.getString().lower();
3190  .Case("lsl", AArch64_AM::LSL)
3191  .Case("lsr", AArch64_AM::LSR)
3192  .Case("asr", AArch64_AM::ASR)
3193  .Case("ror", AArch64_AM::ROR)
3194  .Case("msl", AArch64_AM::MSL)
3195  .Case("uxtb", AArch64_AM::UXTB)
3196  .Case("uxth", AArch64_AM::UXTH)
3197  .Case("uxtw", AArch64_AM::UXTW)
3198  .Case("uxtx", AArch64_AM::UXTX)
3199  .Case("sxtb", AArch64_AM::SXTB)
3200  .Case("sxth", AArch64_AM::SXTH)
3201  .Case("sxtw", AArch64_AM::SXTW)
3202  .Case("sxtx", AArch64_AM::SXTX)
3204 
3205  if (ShOp == AArch64_AM::InvalidShiftExtend)
3206  return MatchOperand_NoMatch;
3207 
3208  SMLoc S = Tok.getLoc();
3209  Lex();
3210 
3211  bool Hash = parseOptionalToken(AsmToken::Hash);
3212 
3213  if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3214  if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3215  ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3216  ShOp == AArch64_AM::MSL) {
3217  // We expect a number here.
3218  TokError("expected #imm after shift specifier");
3219  return MatchOperand_ParseFail;
3220  }
3221 
3222  // "extend" type operations don't need an immediate, #0 is implicit.
3223  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3224  Operands.push_back(
3225  AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3226  return MatchOperand_Success;
3227  }
3228 
3229  // Make sure we do actually have a number, identifier or a parenthesized
3230  // expression.
3231  SMLoc E = getLoc();
3232  if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3233  !getTok().is(AsmToken::Identifier)) {
3234  Error(E, "expected integer shift amount");
3235  return MatchOperand_ParseFail;
3236  }
3237 
3238  const MCExpr *ImmVal;
3239  if (getParser().parseExpression(ImmVal))
3240  return MatchOperand_ParseFail;
3241 
3242  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3243  if (!MCE) {
3244  Error(E, "expected constant '#imm' after shift specifier");
3245  return MatchOperand_ParseFail;
3246  }
3247 
3248  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3249  Operands.push_back(AArch64Operand::CreateShiftExtend(
3250  ShOp, MCE->getValue(), true, S, E, getContext()));
3251  return MatchOperand_Success;
3252 }
3253 
3254 static const struct Extension {
3255  const char *Name;
3257 } ExtensionMap[] = {
3258  {"crc", {AArch64::FeatureCRC}},
3259  {"sm4", {AArch64::FeatureSM4}},
3260  {"sha3", {AArch64::FeatureSHA3}},
3261  {"sha2", {AArch64::FeatureSHA2}},
3262  {"aes", {AArch64::FeatureAES}},
3263  {"crypto", {AArch64::FeatureCrypto}},
3264  {"fp", {AArch64::FeatureFPARMv8}},
3265  {"simd", {AArch64::FeatureNEON}},
3266  {"ras", {AArch64::FeatureRAS}},
3267  {"lse", {AArch64::FeatureLSE}},
3268  {"predres", {AArch64::FeaturePredRes}},
3269  {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3270  {"mte", {AArch64::FeatureMTE}},
3271  {"memtag", {AArch64::FeatureMTE}},
3272  {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3273  {"pan", {AArch64::FeaturePAN}},
3274  {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3275  {"ccpp", {AArch64::FeatureCCPP}},
3276  {"rcpc", {AArch64::FeatureRCPC}},
3277  {"rng", {AArch64::FeatureRandGen}},
3278  {"sve", {AArch64::FeatureSVE}},
3279  {"sve2", {AArch64::FeatureSVE2}},
3280  {"sve2-aes", {AArch64::FeatureSVE2AES}},
3281  {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3282  {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3283  {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3284  {"ls64", {AArch64::FeatureLS64}},
3285  {"xs", {AArch64::FeatureXS}},
3286  {"pauth", {AArch64::FeaturePAuth}},
3287  {"flagm", {AArch64::FeatureFlagM}},
3288  {"rme", {AArch64::FeatureRME}},
3289  {"sme", {AArch64::FeatureSME}},
3290  {"sme-f64", {AArch64::FeatureSMEF64}},
3291  {"sme-i64", {AArch64::FeatureSMEI64}},
3292  // FIXME: Unsupported extensions
3293  {"lor", {}},
3294  {"rdma", {}},
3295  {"profile", {}},
3296 };
3297 
3298 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3299  if (FBS[AArch64::HasV8_1aOps])
3300  Str += "ARMv8.1a";
3301  else if (FBS[AArch64::HasV8_2aOps])
3302  Str += "ARMv8.2a";
3303  else if (FBS[AArch64::HasV8_3aOps])
3304  Str += "ARMv8.3a";
3305  else if (FBS[AArch64::HasV8_4aOps])
3306  Str += "ARMv8.4a";
3307  else if (FBS[AArch64::HasV8_5aOps])
3308  Str += "ARMv8.5a";
3309  else if (FBS[AArch64::HasV8_6aOps])
3310  Str += "ARMv8.6a";
3311  else if (FBS[AArch64::HasV8_7aOps])
3312  Str += "ARMv8.7a";
3313  else {
3314  SmallVector<std::string, 2> ExtMatches;
3315  for (const auto& Ext : ExtensionMap) {
3316  // Use & in case multiple features are enabled
3317  if ((FBS & Ext.Features) != FeatureBitset())
3318  ExtMatches.push_back(Ext.Name);
3319  }
3320  Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3321  }
3322 }
3323 
3324 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3325  SMLoc S) {
3326  const uint16_t Op2 = Encoding & 7;
3327  const uint16_t Cm = (Encoding & 0x78) >> 3;
3328  const uint16_t Cn = (Encoding & 0x780) >> 7;
3329  const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3330 
3331  const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3332 
3333  Operands.push_back(
3334  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3335  Operands.push_back(
3336  AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3337  Operands.push_back(
3338  AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3339  Expr = MCConstantExpr::create(Op2, getContext());
3340  Operands.push_back(
3341  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3342 }
3343 
3344 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3345 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3346 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3348  if (Name.find('.') != StringRef::npos)
3349  return TokError("invalid operand");
3350 
3351  Mnemonic = Name;
3352  Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3353 
3354  const AsmToken &Tok = getTok();
3355  StringRef Op = Tok.getString();
3356  SMLoc S = Tok.getLoc();
3357 
3358  if (Mnemonic == "ic") {
3359  const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3360  if (!IC)
3361  return TokError("invalid operand for IC instruction");
3362  else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3363  std::string Str("IC " + std::string(IC->Name) + " requires: ");
3365  return TokError(Str);
3366  }
3367  createSysAlias(IC->Encoding, Operands, S);
3368  } else if (Mnemonic == "dc") {
3369  const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3370  if (!DC)
3371  return TokError("invalid operand for DC instruction");
3372  else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3373  std::string Str("DC " + std::string(DC->Name) + " requires: ");
3374  setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3375  return TokError(Str);
3376  }
3377  createSysAlias(DC->Encoding, Operands, S);
3378  } else if (Mnemonic == "at") {
3379  const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3380  if (!AT)
3381  return TokError("invalid operand for AT instruction");
3382  else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3383  std::string Str("AT " + std::string(AT->Name) + " requires: ");
3385  return TokError(Str);
3386  }
3387  createSysAlias(AT->Encoding, Operands, S);
3388  } else if (Mnemonic == "tlbi") {
3389  const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3390  if (!TLBI)
3391  return TokError("invalid operand for TLBI instruction");
3392  else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3393  std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3395  return TokError(Str);
3396  }
3397  createSysAlias(TLBI->Encoding, Operands, S);
3398  } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
3399  const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
3400  if (!PRCTX)
3401  return TokError("invalid operand for prediction restriction instruction");
3402  else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
3403  std::string Str(
3404  Mnemonic.upper() + std::string(PRCTX->Name) + " requires: ");
3406  return TokError(Str);
3407  }
3408  uint16_t PRCTX_Op2 =
3409  Mnemonic == "cfp" ? 4 :
3410  Mnemonic == "dvp" ? 5 :
3411  Mnemonic == "cpp" ? 7 :
3412  0;
3413  assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction");
3414  createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
3415  }
3416 
3417  Lex(); // Eat operand.
3418 
3419  bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3420  bool HasRegister = false;
3421 
3422  // Check for the optional register operand.
3423  if (parseOptionalToken(AsmToken::Comma)) {
3424  if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3425  return TokError("expected register operand");
3426  HasRegister = true;
3427  }
3428 
3429  if (ExpectRegister && !HasRegister)
3430  return TokError("specified " + Mnemonic + " op requires a register");
3431  else if (!ExpectRegister && HasRegister)
3432  return TokError("specified " + Mnemonic + " op does not use a register");
3433 
3434  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3435  return true;
3436 
3437  return false;
3438 }
3439 
3441 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3442  MCAsmParser &Parser = getParser();
3443  const AsmToken &Tok = getTok();
3444 
3445  if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3446  TokError("'csync' operand expected");
3447  return MatchOperand_ParseFail;
3448  } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3449  // Immediate operand.
3450  const MCExpr *ImmVal;
3451  SMLoc ExprLoc = getLoc();
3452  AsmToken IntTok = Tok;
3453  if (getParser().parseExpression(ImmVal))
3454  return MatchOperand_ParseFail;
3455  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3456  if (!MCE) {
3457  Error(ExprLoc, "immediate value expected for barrier operand");
3458  return MatchOperand_ParseFail;
3459  }
3460  int64_t Value = MCE->getValue();
3461  if (Mnemonic == "dsb" && Value > 15) {
3462  // This case is a no match here, but it might be matched by the nXS
3463  // variant. Deliberately not unlex the optional '#' as it is not necessary
3464  // to characterize an integer immediate.
3465  Parser.getLexer().UnLex(IntTok);
3466  return MatchOperand_NoMatch;
3467  }
3468  if (Value < 0 || Value > 15) {
3469  Error(ExprLoc, "barrier operand out of range");
3470  return MatchOperand_ParseFail;
3471  }
3472  auto DB = AArch64DB::lookupDBByEncoding(Value);
3473  Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3474  ExprLoc, getContext(),
3475  false /*hasnXSModifier*/));
3476  return MatchOperand_Success;
3477  }
3478 
3479  if (Tok.isNot(AsmToken::Identifier)) {
3480  TokError("invalid operand for instruction");
3481  return MatchOperand_ParseFail;
3482  }
3483 
3484  StringRef Operand = Tok.getString();
3485  auto TSB = AArch64TSB::lookupTSBByName(Operand);
3486  auto DB = AArch64DB::lookupDBByName(Operand);
3487  // The only valid named option for ISB is 'sy'
3488  if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3489  TokError("'sy' or #imm operand expected");
3490  return MatchOperand_ParseFail;
3491  // The only valid named option for TSB is 'csync'
3492  } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3493  TokError("'csync' operand expected");
3494  return MatchOperand_ParseFail;
3495  } else if (!DB && !TSB) {
3496  if (Mnemonic == "dsb") {
3497  // This case is a no match here, but it might be matched by the nXS
3498  // variant.
3499  return MatchOperand_NoMatch;
3500  }
3501  TokError("invalid barrier option name");
3502  return MatchOperand_ParseFail;
3503  }
3504 
3505  Operands.push_back(AArch64Operand::CreateBarrier(
3506  DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3507  getContext(), false /*hasnXSModifier*/));
3508  Lex(); // Consume the option
3509 
3510  return MatchOperand_Success;
3511 }
3512 
3514 AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3515  const AsmToken &Tok = getTok();
3516 
3517  assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
3518  if (Mnemonic != "dsb")
3519  return MatchOperand_ParseFail;
3520 
3521  if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3522  // Immediate operand.
3523  const MCExpr *ImmVal;
3524  SMLoc ExprLoc = getLoc();
3525  if (getParser().parseExpression(ImmVal))
3526  return MatchOperand_ParseFail;
3527  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3528  if (!MCE) {
3529  Error(ExprLoc, "immediate value expected for barrier operand");
3530  return MatchOperand_ParseFail;
3531  }
3532  int64_t Value = MCE->getValue();
3533  // v8.7-A DSB in the nXS variant accepts only the following immediate
3534  // values: 16, 20, 24, 28.
3535  if (Value != 16 && Value != 20 && Value != 24 && Value != 28) {
3536  Error(ExprLoc, "barrier operand out of range");
3537  return MatchOperand_ParseFail;
3538  }
3539  auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
3540  Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
3541  ExprLoc, getContext(),
3542  true /*hasnXSModifier*/));
3543  return MatchOperand_Success;
3544  }
3545 
3546  if (Tok.isNot(AsmToken::Identifier)) {
3547  TokError("invalid operand for instruction");
3548  return MatchOperand_ParseFail;
3549  }
3550 
3551  StringRef Operand = Tok.getString();
3552  auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
3553 
3554  if (!DB) {
3555  TokError("invalid barrier option name");
3556  return MatchOperand_ParseFail;
3557  }
3558 
3559  Operands.push_back(
3560  AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
3561  getContext(), true /*hasnXSModifier*/));
3562  Lex(); // Consume the option
3563 
3564  return MatchOperand_Success;
3565 }
3566 
3568 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3569  const AsmToken &Tok = getTok();
3570 
3571  if (Tok.isNot(AsmToken::Identifier))
3572  return MatchOperand_NoMatch;
3573 
3574  if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
3575  return MatchOperand_NoMatch;
3576 
3577  int MRSReg, MSRReg;
3578  auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3579  if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3580  MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3581  MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3582  } else
3583  MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3584 
3585  auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3586  unsigned PStateImm = -1;
3587  if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3588  PStateImm = PState->Encoding;
3589 
3590  Operands.push_back(
3591  AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3592  PStateImm, getContext()));
3593  Lex(); // Eat identifier
3594 
3595  return MatchOperand_Success;
3596 }
3597 
3598 /// tryParseNeonVectorRegister - Parse a vector register operand.
3599 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3600  if (getTok().isNot(AsmToken::Identifier))
3601  return true;
3602 
3603  SMLoc S = getLoc();
3604  // Check for a vector register specifier first.
3605  StringRef Kind;
3606  unsigned Reg;
3607  OperandMatchResultTy Res =
3608  tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3609  if (Res != MatchOperand_Success)
3610  return true;
3611 
3612  const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3613  if (!KindRes)
3614  return true;
3615 
3616  unsigned ElementWidth = KindRes->second;
3617  Operands.push_back(
3618  AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3619  S, getLoc(), getContext()));
3620 
3621  // If there was an explicit qualifier, that goes on as a literal text
3622  // operand.
3623  if (!Kind.empty())
3624  Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
3625 
3626  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3627 }
3628 
3630 AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3631  SMLoc SIdx = getLoc();
3632  if (parseOptionalToken(AsmToken::LBrac)) {
3633  const MCExpr *ImmVal;
3634  if (getParser().parseExpression(ImmVal))
3635  return MatchOperand_NoMatch;
3636  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3637  if (!MCE) {
3638  TokError("immediate value expected for vector index");
3639  return MatchOperand_ParseFail;;
3640  }
3641 
3642  SMLoc E = getLoc();
3643 
3644  if (parseToken(AsmToken::RBrac, "']' expected"))
3645  return MatchOperand_ParseFail;;
3646 
3647  Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3648  E, getContext()));
3649  return MatchOperand_Success;
3650  }
3651 
3652  return MatchOperand_NoMatch;
3653 }
3654 
3655 // tryParseVectorRegister - Try to parse a vector register name with
3656 // optional kind specifier. If it is a register specifier, eat the token
3657 // and return it.
3659 AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3660  RegKind MatchKind) {
3661  const AsmToken &Tok = getTok();
3662 
3663  if (Tok.isNot(AsmToken::Identifier))
3664  return MatchOperand_NoMatch;
3665 
3666  StringRef Name = Tok.getString();
3667  // If there is a kind specifier, it's separated from the register name by
3668  // a '.'.
3669  size_t Start = 0, Next = Name.find('.');
3670  StringRef Head = Name.slice(Start, Next);
3671  unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3672 
3673  if (RegNum) {
3674  if (Next != StringRef::npos) {
3675  Kind = Name.slice(Next, StringRef::npos);
3676  if (!isValidVectorKind(Kind, MatchKind)) {
3677  TokError("invalid vector kind qualifier");
3678  return MatchOperand_ParseFail;
3679  }
3680  }
3681  Lex(); // Eat the register token.
3682 
3683  Reg = RegNum;
3684  return MatchOperand_Success;
3685  }
3686 
3687  return MatchOperand_NoMatch;
3688 }
3689 
3690 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3692 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3693  // Check for a SVE predicate register specifier first.
3694  const SMLoc S = getLoc();
3695  StringRef Kind;
3696  unsigned RegNum;
3697  auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3698  if (Res != MatchOperand_Success)
3699  return Res;
3700 
3701  const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3702  if (!KindRes)
3703  return MatchOperand_NoMatch;
3704 
3705  unsigned ElementWidth = KindRes->second;
3706  Operands.push_back(AArch64Operand::CreateVectorReg(
3707  RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3708  getLoc(), getContext()));
3709 
3710  if (getLexer().is(AsmToken::LBrac)) {
3711  // Indexed predicate, there's no comma so try parse the next operand
3712  // immediately.
3713  if (parseOperand(Operands, false, false))
3714  return MatchOperand_NoMatch;
3715  }
3716 
3717  // Not all predicates are followed by a '/m' or '/z'.
3718  if (getTok().isNot(AsmToken::Slash))
3719  return MatchOperand_Success;
3720 
3721  // But when they do they shouldn't have an element type suffix.
3722  if (!Kind.empty()) {
3723  Error(S, "not expecting size suffix");
3724  return MatchOperand_ParseFail;
3725  }
3726 
3727  // Add a literal slash as operand
3728  Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
3729 
3730  Lex(); // Eat the slash.
3731 
3732  // Zeroing or merging?
3733  auto Pred = getTok().getString().lower();
3734  if (Pred != "z" && Pred != "m") {
3735  Error(getLoc(), "expecting 'm' or 'z' predication");
3736  return MatchOperand_ParseFail;
3737  }
3738 
3739  // Add zero/merge token.
3740  const char *ZM = Pred == "z" ? "z" : "m";
3741  Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
3742 
3743  Lex(); // Eat zero/merge token.
3744  return MatchOperand_Success;
3745 }
3746 
3747 /// parseRegister - Parse a register operand.
3748 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3749  // Try for a Neon vector register.
3750  if (!tryParseNeonVectorRegister(Operands))
3751  return false;
3752 
3753  // Otherwise try for a scalar register.
3754  if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3755  return false;
3756 
3757  return true;
3758 }
3759 
3760 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3761  bool HasELFModifier = false;
3763 
3764  if (parseOptionalToken(AsmToken::Colon)) {
3765  HasELFModifier = true;
3766 
3767  if (getTok().isNot(AsmToken::Identifier))
3768  return TokError("expect relocation specifier in operand after ':'");
3769 
3770  std::string LowerCase = getTok().getIdentifier().lower();
3771  RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3772  .Case("lo12", AArch64MCExpr::VK_LO12)
3773  .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3774  .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3775  .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3776  .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3777  .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3778  .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3779  .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3780  .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3781  .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3782  .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3783  .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3784  .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3785  .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3786  .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3787  .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3788  .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3789  .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3790  .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3791  .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3792  .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3793  .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3794  .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3795  .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3796  .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3797  .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3798  .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3799  .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3800  .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3801  .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3802  .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3803  .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3804  .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3805  .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3806  .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3807  .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3809  .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
3810  .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3812  .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3813  .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3814  .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3816  .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3817  .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3819 
3820  if (RefKind == AArch64MCExpr::VK_INVALID)
3821  return TokError("expect relocation specifier in operand after ':'");
3822 
3823  Lex(); // Eat identifier
3824 
3825  if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3826  return true;
3827  }
3828 
3829  if (getParser().parseExpression(ImmVal))
3830  return true;
3831 
3832  if (HasELFModifier)
3833  ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3834 
3835  return false;
3836 }
3837 
3839 AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
3840  if (getTok().isNot(AsmToken::LCurly))
3841  return MatchOperand_NoMatch;
3842 
3843  auto ParseMatrixTile = [this](unsigned &Reg, unsigned &ElementWidth) {
3844  StringRef Name = getTok().getString();
3845  size_t DotPosition = Name.find('.');
3846  if (DotPosition == StringRef::npos)
3847  return MatchOperand_NoMatch;
3848 
3849  unsigned RegNum = matchMatrixTileListRegName(Name);
3850  if (!RegNum)
3851  return MatchOperand_NoMatch;
3852 
3853  StringRef Tail = Name.drop_front(DotPosition);
3854  const Optional<std::pair<int, int>> &KindRes =
3856  if (!KindRes) {
3857  TokError("Expected the register to be followed by element width suffix");
3858  return MatchOperand_ParseFail;
3859  }
3860  ElementWidth = KindRes->second;
3861  Reg = RegNum;
3862  Lex(); // Eat the register.
3863  return MatchOperand_Success;
3864  };
3865 
3866  SMLoc S = getLoc();
3867  auto LCurly = getTok();
3868  Lex(); // Eat left bracket token.
3869 
3870  // Empty matrix list
3871  if (parseOptionalToken(AsmToken::RCurly)) {
3872  Operands.push_back(AArch64Operand::CreateMatrixTileList(
3873  /*RegMask=*/0, S, getLoc(), getContext()));
3874  return MatchOperand_Success;
3875  }
3876 
3877  // Try parse {za} alias early
3878  if (getTok().getString().equals_insensitive("za")) {
3879  Lex(); // Eat 'za'
3880 
3881  if (parseToken(AsmToken::RCurly, "'}' expected"))
3882  return MatchOperand_ParseFail;
3883 
3884  Operands.push_back(AArch64Operand::CreateMatrixTileList(
3885  /*RegMask=*/0xFF, S, getLoc(), getContext()));
3886  return MatchOperand_Success;
3887  }
3888 
3889  SMLoc TileLoc = getLoc();
3890 
3891  unsigned FirstReg, ElementWidth;
3892  auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
3893  if (ParseRes != MatchOperand_Success) {
3894  getLexer().UnLex(LCurly);
3895  return ParseRes;
3896  }
3897 
3898  const MCRegisterInfo *RI = getContext().getRegisterInfo();
3899 
3900  unsigned PrevReg = FirstReg;
3901  unsigned Count = 1;
3902 
3903  SmallSet<unsigned, 8> DRegs;
3904  AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
3905 
3906  SmallSet<unsigned, 8> SeenRegs;
3907  SeenRegs.insert(FirstReg);
3908 
3909  while (parseOptionalToken(AsmToken::Comma)) {
3910  TileLoc = getLoc();
3911  unsigned Reg, NextElementWidth;
3912  ParseRes = ParseMatrixTile(Reg, NextElementWidth);
3913  if (ParseRes != MatchOperand_Success)
3914  return ParseRes;
3915 
3916  // Element size must match on all regs in the list.
3917  if (ElementWidth != NextElementWidth) {
3918  Error(TileLoc, "mismatched register size suffix");
3919  return MatchOperand_ParseFail;
3920  }
3921 
3922  if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
3923  Warning(TileLoc, "tile list not in ascending order");
3924 
3925  if (SeenRegs.contains(Reg))
3926  Warning(TileLoc, "duplicate tile in list");
3927  else {
3928  SeenRegs.insert(Reg);
3929  AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
3930  }
3931 
3932  PrevReg = Reg;
3933  ++Count;
3934  }
3935 
3936  if (parseToken(AsmToken::RCurly, "'}' expected"))
3937  return MatchOperand_ParseFail;
3938 
3939  unsigned RegMask = 0;
3940  for (auto Reg : DRegs)
3941  RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
3942  RI->getEncodingValue(AArch64::ZAD0));
3943  Operands.push_back(
3944  AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
3945 
3946  return MatchOperand_Success;
3947 }
3948 
3949 template <RegKind VectorKind>
3951 AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3952  bool ExpectMatch) {
3953  MCAsmParser &Parser = getParser();
3954  if (!getTok().is(AsmToken::LCurly))
3955  return MatchOperand_NoMatch;
3956 
3957  // Wrapper around parse function
3958  auto ParseVector = [this](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3959  bool NoMatchIsError) {
3960  auto RegTok = getTok();
3961  auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3962  if (ParseRes == MatchOperand_Success) {
3963  if (parseVectorKind(Kind, VectorKind))
3964  return ParseRes;
3965  llvm_unreachable("Expected a valid vector kind");
3966  }
3967 
3968  if (RegTok.isNot(AsmToken::Identifier) ||
3969  ParseRes == MatchOperand_ParseFail ||
3970  (ParseRes == MatchOperand_NoMatch && NoMatchIsError &&
3971  !RegTok.getString().startswith_insensitive("za"))) {
3972  Error(Loc, "vector register expected");
3973  return MatchOperand_ParseFail;
3974  }
3975 
3976  return MatchOperand_NoMatch;
3977  };
3978 
3979  SMLoc S = getLoc();
3980  auto LCurly = getTok();
3981  Lex(); // Eat left bracket token.
3982 
3983  StringRef Kind;
3984  unsigned FirstReg;
3985  auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3986 
3987  // Put back the original left bracket if there was no match, so that
3988  // different types of list-operands can be matched (e.g. SVE, Neon).
3989  if (ParseRes == MatchOperand_NoMatch)
3990  Parser.getLexer().UnLex(LCurly);
3991 
3992  if (ParseRes != MatchOperand_Success)
3993  return ParseRes;
3994 
3995  int64_t PrevReg = FirstReg;
3996  unsigned Count = 1;
3997 
3998  if (parseOptionalToken(AsmToken::Minus)) {
3999  SMLoc Loc = getLoc();
4000  StringRef NextKind;
4001 
4002  unsigned Reg;
4003  ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4004  if (ParseRes != MatchOperand_Success)
4005  return ParseRes;
4006 
4007  // Any Kind suffices must match on all regs in the list.
4008  if (Kind != NextKind) {
4009  Error(Loc, "mismatched register size suffix");
4010  return MatchOperand_ParseFail;
4011  }
4012 
4013  unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
4014 
4015  if (Space == 0 || Space > 3) {
4016  Error(Loc, "invalid number of vectors");
4017  return MatchOperand_ParseFail;
4018  }
4019 
4020  Count += Space;
4021  }
4022  else {
4023  while (parseOptionalToken(AsmToken::Comma)) {
4024  SMLoc Loc = getLoc();
4025  StringRef NextKind;
4026  unsigned Reg;
4027  ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4028  if (ParseRes != MatchOperand_Success)
4029  return ParseRes;
4030 
4031  // Any Kind suffices must match on all regs in the list.
4032  if (Kind != NextKind) {
4033  Error(Loc, "mismatched register size suffix");
4034  return MatchOperand_ParseFail;
4035  }
4036 
4037  // Registers must be incremental (with wraparound at 31)
4038  if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
4039  (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
4040  Error(Loc, "registers must be sequential");
4041  return MatchOperand_ParseFail;
4042  }
4043 
4044  PrevReg = Reg;
4045  ++Count;
4046  }
4047  }
4048 
4049  if (parseToken(AsmToken::RCurly, "'}' expected"))
4050  return MatchOperand_ParseFail;
4051 
4052  if (Count > 4) {
4053  Error(S, "invalid number of vectors");
4054  return MatchOperand_ParseFail;
4055  }
4056 
4057  unsigned NumElements = 0;
4058  unsigned ElementWidth = 0;
4059  if (!Kind.empty()) {
4060  if (const auto &VK = parseVectorKind(Kind, VectorKind))
4061  std::tie(NumElements, ElementWidth) = *VK;
4062  }
4063 
4064  Operands.push_back(AArch64Operand::CreateVectorList(
4065  FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
4066  getContext()));
4067 
4068  return MatchOperand_Success;
4069 }
4070 
4071 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4072 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4073  auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4074  if (ParseRes != MatchOperand_Success)
4075  return true;
4076 
4077  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
4078 }
4079 
4081 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4082  SMLoc StartLoc = getLoc();
4083 
4084  unsigned RegNum;
4085  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4086  if (Res != MatchOperand_Success)
4087  return Res;
4088 
4089  if (!parseOptionalToken(AsmToken::Comma)) {
4090  Operands.push_back(AArch64Operand::CreateReg(
4091  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4092  return MatchOperand_Success;
4093  }
4094 
4095  parseOptionalToken(AsmToken::Hash);
4096 
4097  if (getTok().isNot(AsmToken::Integer)) {
4098  Error(getLoc(), "index must be absent or #0");
4099  return MatchOperand_ParseFail;
4100  }
4101 
4102  const MCExpr *ImmVal;
4103  if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4104  cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
4105  Error(getLoc(), "index must be absent or #0");
4106  return MatchOperand_ParseFail;
4107  }
4108 
4109  Operands.push_back(AArch64Operand::CreateReg(
4110  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4111  return MatchOperand_Success;
4112 }
4113 
4114 template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4116 AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4117  SMLoc StartLoc = getLoc();
4118 
4119  unsigned RegNum;
4120  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4121  if (Res != MatchOperand_Success)
4122  return Res;
4123 
4124  // No shift/extend is the default.
4125  if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4126  Operands.push_back(AArch64Operand::CreateReg(
4127  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4128  return MatchOperand_Success;
4129  }
4130 
4131  // Eat the comma
4132  Lex();
4133 
4134  // Match the shift
4136  Res = tryParseOptionalShiftExtend(ExtOpnd);
4137  if (Res != MatchOperand_Success)
4138  return Res;
4139 
4140  auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4141  Operands.push_back(AArch64Operand::CreateReg(
4142  RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4143  Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4144  Ext->hasShiftExtendAmount()));
4145 
4146  return MatchOperand_Success;
4147 }
4148 
4149 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4150  MCAsmParser &Parser = getParser();
4151 
4152  // Some SVE instructions have a decoration after the immediate, i.e.
4153  // "mul vl". We parse them here and add tokens, which must be present in the
4154  // asm string in the tablegen instruction.
4155  bool NextIsVL =
4156  Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4157  bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4158  if (!getTok().getString().equals_insensitive("mul") ||
4159  !(NextIsVL || NextIsHash))
4160  return true;
4161 
4162  Operands.push_back(
4163  AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4164  Lex(); // Eat the "mul"
4165 
4166  if (NextIsVL) {
4167  Operands.push_back(
4168  AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4169  Lex(); // Eat the "vl"
4170  return false;
4171  }
4172 
4173  if (NextIsHash) {
4174  Lex(); // Eat the #
4175  SMLoc S = getLoc();
4176 
4177  // Parse immediate operand.
4178  const MCExpr *ImmVal;
4179  if (!Parser.parseExpression(ImmVal))
4180  if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4181  Operands.push_back(AArch64Operand::CreateImm(
4182  MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4183  getContext()));
4184  return MatchOperand_Success;
4185  }
4186  }
4187 
4188  return Error(getLoc(), "expected 'vl' or '#<imm>'");
4189 }
4190 
4191 bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4192  auto Tok = getTok();
4193  if (Tok.isNot(AsmToken::Identifier))
4194  return true;
4195 
4196  auto Keyword = Tok.getString();
4197  Keyword = StringSwitch<StringRef>(Keyword.lower())
4198  .Case("sm", "sm")
4199  .Case("za", "za")
4200  .Default(Keyword);
4201  Operands.push_back(
4202  AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4203 
4204  Lex();
4205  return false;
4206 }
4207 
4208 /// parseOperand - Parse a arm instruction operand. For now this parses the
4209 /// operand regardless of the mnemonic.
4210 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4211  bool invertCondCode) {
4212  MCAsmParser &Parser = getParser();
4213 
4214  OperandMatchResultTy ResTy =
4215  MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
4216 
4217  // Check if the current operand has a custom associated parser, if so, try to
4218  // custom parse the operand, or fallback to the general approach.
4219  if (ResTy == MatchOperand_Success)
4220  return false;
4221  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4222  // there was a match, but an error occurred, in which case, just return that
4223  // the operand parsing failed.
4224  if (ResTy == MatchOperand_ParseFail)
4225  return true;
4226 
4227  // Nothing custom, so do general case parsing.
4228  SMLoc S, E;
4229  switch (getLexer().getKind()) {
4230  default: {
4231  SMLoc S = getLoc();
4232  const MCExpr *Expr;
4233  if (parseSymbolicImmVal(Expr))
4234  return Error(S, "invalid operand");
4235 
4236  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4237  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4238  return false;
4239  }
4240  case AsmToken::LBrac: {
4241  Operands.push_back(
4242  AArch64Operand::CreateToken("[", getLoc(), getContext()));
4243  Lex(); // Eat '['
4244 
4245  // There's no comma after a '[', so we can parse the next operand
4246  // immediately.
4247  return parseOperand(Operands, false, false);
4248  }
4249  case AsmToken::LCurly: {
4250  if (!parseNeonVectorList(Operands))
4251  return false;
4252 
4253  Operands.push_back(
4254  AArch64Operand::CreateToken("{", getLoc(), getContext()));
4255  Lex(); // Eat '{'
4256 
4257  // There's no comma after a '{', so we can parse the next operand
4258  // immediately.
4259  return parseOperand(Operands, false, false);
4260  }
4261  case AsmToken::Identifier: {
4262  // If we're expecting a Condition Code operand, then just parse that.
4263  if (isCondCode)
4264  return parseCondCode(Operands, invertCondCode);
4265 
4266  // If it's a register name, parse it.
4267  if (!parseRegister(Operands))
4268  return false;
4269 
4270  // See if this is a "mul vl" decoration or "mul #<int>" operand used
4271  // by SVE instructions.
4272  if (!parseOptionalMulOperand(Operands))
4273  return false;
4274 
4275  // If this is an "smstart" or "smstop" instruction, parse its special
4276  // keyword operand as an identifier.
4277  if (Mnemonic == "smstart" || Mnemonic == "smstop")
4278  return parseKeywordOperand(Operands);
4279 
4280  // This could be an optional "shift" or "extend" operand.
4281  OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
4282  // We can only continue if no tokens were eaten.
4283  if (GotShift != MatchOperand_NoMatch)
4284  return GotShift;
4285 
4286  // If this is a two-word mnemonic, parse its special keyword
4287  // operand as an identifier.
4288  if (Mnemonic == "brb")
4289  return parseKeywordOperand(Operands);
4290 
4291  // This was not a register so parse other operands that start with an
4292  // identifier (like labels) as expressions and create them as immediates.
4293  const MCExpr *IdVal;
4294  S = getLoc();
4295  if (getParser().parseExpression(IdVal))
4296  return true;
4297  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4298  Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4299  return false;
4300  }
4301  case AsmToken::Integer:
4302  case AsmToken::Real:
4303  case AsmToken::Hash: {
4304  // #42 -> immediate.
4305  S = getLoc();
4306 
4307  parseOptionalToken(AsmToken::Hash);
4308 
4309  // Parse a negative sign
4310  bool isNegative = false;
4311  if (getTok().is(AsmToken::Minus)) {
4312  isNegative = true;
4313  // We need to consume this token only when we have a Real, otherwise
4314  // we let parseSymbolicImmVal take care of it
4315  if (Parser.getLexer().peekTok().is(AsmToken::Real))
4316  Lex();
4317  }
4318 
4319  // The only Real that should come through here is a literal #0.0 for
4320  // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4321  // so convert the value.
4322  const AsmToken &Tok = getTok();
4323  if (Tok.is(AsmToken::Real)) {
4324  APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4325  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4326  if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4327  Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4328  Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4329  return TokError("unexpected floating point literal");
4330  else if (IntVal != 0 || isNegative)
4331  return TokError("expected floating-point constant #0.0");
4332  Lex(); // Eat the token.
4333 
4334  Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4335  Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4336  return false;
4337  }
4338 
4339  const MCExpr *ImmVal;
4340  if (parseSymbolicImmVal(ImmVal))
4341  return true;
4342 
4343  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4344  Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4345  return false;
4346  }
4347  case AsmToken::Equal: {
4348  SMLoc Loc = getLoc();
4349  if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4350  return TokError("unexpected token in operand");
4351  Lex(); // Eat '='
4352  const MCExpr *SubExprVal;
4353  if (getParser().parseExpression(SubExprVal))
4354  return true;
4355 
4356  if (Operands.size() < 2 ||
4357  !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4358  return Error(Loc, "Only valid when first operand is register");
4359 
4360  bool IsXReg =
4361  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4362  Operands[1]->getReg());
4363 
4364  MCContext& Ctx = getContext();
4365  E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4366  // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4367  if (isa<MCConstantExpr>(SubExprVal)) {
4368  uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4369  uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4370  while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
4371  ShiftAmt += 16;
4372  Imm >>= 16;
4373  }
4374  if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4375  Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
4376  Operands.push_back(AArch64Operand::CreateImm(
4377  MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4378  if (ShiftAmt)
4379  Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4380  ShiftAmt, true, S, E, Ctx));
4381  return false;
4382  }
4383  APInt Simm = APInt(64, Imm << ShiftAmt);
4384  // check if the immediate is an unsigned or signed 32-bit int for W regs
4385  if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
4386  return Error(Loc, "Immediate too large for register");
4387  }
4388  // If it is a label or an imm that cannot fit in a movz, put it into CP.
4389  const MCExpr *CPLoc =
4390  getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4391  Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
4392  return false;
4393  }
4394  }
4395 }
4396 
4397 bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4398  const MCExpr *Expr = nullptr;
4399  SMLoc L = getLoc();
4400  if (check(getParser().parseExpression(Expr), L, "expected expression"))
4401  return true;
4402  const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4403  if (check(!Value, L, "expected constant expression"))
4404  return true;
4405  Out = Value->getValue();
4406  return false;
4407 }
4408 
4409 bool AArch64AsmParser::parseComma() {
4410  if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
4411  return true;
4412  // Eat the comma
4413  Lex();
4414  return false;
4415 }
4416 
4417 bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
4418  unsigned First, unsigned Last) {
4419  unsigned Reg;
4420  SMLoc Start, End;
4421  if (check(ParseRegister(Reg, Start, End), getLoc(), "expected register"))
4422  return true;
4423 
4424  // Special handling for FP and LR; they aren't linearly after x28 in
4425  // the registers enum.
4426  unsigned RangeEnd = Last;
4427  if (Base == AArch64::X0) {
4428  if (Last == AArch64::FP) {
4429  RangeEnd = AArch64::X28;
4430  if (Reg == AArch64::FP) {
4431  Out = 29;
4432  return false;
4433  }
4434  }
4435  if (Last == AArch64::LR) {
4436  RangeEnd = AArch64::X28;
4437  if (Reg == AArch64::FP) {
4438  Out = 29;
4439  return false;
4440  } else if (Reg == AArch64::LR) {
4441  Out = 30;
4442  return false;
4443  }
4444  }
4445  }
4446 
4447  if (check(Reg < First || Reg > RangeEnd, Start,
4448  Twine("expected register in range ") +
4449  AArch64InstPrinter::getRegisterName(First) + " to " +
4451  return true;
4452  Out = Reg - Base;
4453  return false;
4454 }
4455 
4456 bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
4457  const MCParsedAsmOperand &Op2) const {
4458  auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
4459  auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
4460  if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
4461  AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
4462  return MCTargetAsmParser::regsEqual(Op1, Op2);
4463 
4464  assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
4465  "Testing equality of non-scalar registers not supported");
4466 
4467  // Check if a registers match their sub/super register classes.
4468  if (AOp1.getRegEqualityTy() == EqualsSuperReg)
4469  return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
4470  if (AOp1.getRegEqualityTy() == EqualsSubReg)
4471  return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
4472  if (AOp2.getRegEqualityTy() == EqualsSuperReg)
4473  return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
4474  if (AOp2.getRegEqualityTy() == EqualsSubReg)
4475  return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
4476 
4477  return false;
4478 }
4479 
4480 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
4481 /// operands.
4482 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
4483  StringRef Name, SMLoc NameLoc,
4485  Name = StringSwitch<StringRef>(Name.lower())
4486  .Case("beq", "b.eq")
4487  .Case("bne", "b.ne")
4488  .Case("bhs", "b.hs")
4489  .Case("bcs", "b.cs")
4490  .Case("blo", "b.lo")
4491  .Case("bcc", "b.cc")
4492  .Case("bmi", "b.mi")
4493  .Case("bpl", "b.pl")
4494  .Case("bvs", "b.vs")
4495  .Case("bvc", "b.vc")
4496  .Case("bhi", "b.hi")
4497  .Case("bls", "b.ls")
4498  .Case("bge", "b.ge")
4499  .Case("blt", "b.lt")
4500  .Case("bgt", "b.gt")
4501  .Case("ble", "b.le")
4502  .Case("bal", "b.al")
4503  .Case("bnv", "b.nv")
4504  .Default(Name);
4505 
4506  // First check for the AArch64-specific .req directive.
4507  if (getTok().is(AsmToken::Identifier) &&
4508  getTok().getIdentifier().lower() == ".req") {
4509  parseDirectiveReq(Name, NameLoc);
4510  // We always return 'error' for this, as we're done with this
4511  // statement and don't need to match the 'instruction."
4512  return true;
4513  }
4514 
4515  // Create the leading tokens for the mnemonic, split by '.' characters.
4516  size_t Start = 0, Next = Name.find('.');
4517  StringRef Head = Name.slice(Start, Next);
4518 
4519  // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
4520  // the SYS instruction.
4521  if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
4522  Head == "cfp" || Head == "dvp" || Head == "cpp")
4523  return parseSysAlias(Head, NameLoc, Operands);
4524 
4525  Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
4526  Mnemonic = Head;
4527 
4528  // Handle condition codes for a branch mnemonic
4529  if (Head == "b" && Next != StringRef::npos) {
4530  Start = Next;
4531  Next = Name.find('.', Start + 1);
4532  Head = Name.slice(Start + 1, Next);
4533 
4534  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4535  (Head.data() - Name.data()));
4536  AArch64CC::CondCode CC = parseCondCodeString(Head);
4537  if (CC == AArch64CC::Invalid)
4538  return Error(SuffixLoc, "invalid condition code");
4539  Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
4540  /*IsSuffix=*/true));
4541  Operands.push_back(
4542  AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
4543  }
4544 
4545  // Add the remaining tokens in the mnemonic.
4546  while (Next != StringRef::npos) {
4547  Start = Next;
4548  Next = Name.find('.', Start + 1);
4549  Head = Name.slice(Start, Next);
4550  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4551  (Head.data() - Name.data()) + 1);
4552  Operands.push_back(AArch64Operand::CreateToken(
4553  Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
4554  }
4555 
4556  // Conditional compare instructions have a Condition Code operand, which needs
4557  // to be parsed and an immediate operand created.
4558  bool condCodeFourthOperand =
4559  (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
4560  Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
4561  Head == "csinc" || Head == "csinv" || Head == "csneg");
4562 
4563  // These instructions are aliases to some of the conditional select
4564  // instructions. However, the condition code is inverted in the aliased
4565  // instruction.
4566  //
4567  // FIXME: Is this the correct way to handle these? Or should the parser
4568  // generate the aliased instructions directly?
4569  bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
4570  bool condCodeThirdOperand =
4571  (Head == "cinc" || Head == "cinv" || Head == "cneg");
4572 
4573  // Read the remaining operands.
4574  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4575 
4576  unsigned N = 1;
4577  do {
4578  // Parse and remember the operand.
4579  if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
4580  (N == 3 && condCodeThirdOperand) ||
4581  (N == 2 && condCodeSecondOperand),
4582  condCodeSecondOperand || condCodeThirdOperand)) {
4583  return true;
4584  }
4585 
4586  // After successfully parsing some operands there are three special cases
4587  // to consider (i.e. notional operands not separated by commas). Two are
4588  // due to memory specifiers:
4589  // + An RBrac will end an address for load/store/prefetch
4590  // + An '!' will indicate a pre-indexed operation.
4591  //
4592  // And a further case is '}', which ends a group of tokens specifying the
4593  // SME accumulator array 'ZA' or tile vector, i.e.
4594  //
4595  // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
4596  //
4597  // It's someone else's responsibility to make sure these tokens are sane
4598  // in the given context!
4599 
4600  if (parseOptionalToken(AsmToken::RBrac))
4601  Operands.push_back(
4602  AArch64Operand::CreateToken("]", getLoc(), getContext()));
4603  if (parseOptionalToken(AsmToken::Exclaim))
4604  Operands.push_back(
4605  AArch64Operand::CreateToken("!", getLoc(), getContext()));
4606  if (parseOptionalToken(AsmToken::RCurly))
4607  Operands.push_back(
4608  AArch64Operand::CreateToken("}", getLoc(), getContext()));
4609 
4610  ++N;
4611  } while (parseOptionalToken(AsmToken::Comma));
4612  }
4613 
4614  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4615  return true;
4616 
4617  return false;
4618 }
4619 
4620 static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
4621  assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
4622  return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
4623  (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
4624  (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
4625  (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
4626  (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
4627  (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
4628 }
4629 
4630 // FIXME: This entire function is a giant hack to provide us with decent
4631 // operand range validation/diagnostics until TableGen/MC can be extended
4632 // to support autogeneration of this kind of validation.
4633 bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
4634  SmallVectorImpl<SMLoc> &Loc) {
4635  const MCRegisterInfo *RI = getContext().getRegisterInfo();
4636  const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
4637 
4638  // A prefix only applies to the instruction following it. Here we extract
4639  // prefix information for the next instruction before validating the current
4640  // one so that in the case of failure we don't erronously continue using the
4641  // current prefix.
4642  PrefixInfo Prefix = NextPrefix;
4643  NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
4644 
4645  // Before validating the instruction in isolation we run through the rules
4646  // applicable when it follows a prefix instruction.
4647  // NOTE: brk & hlt can be prefixed but require no additional validation.
4648  if (Prefix.isActive() &&
4649  (Inst.getOpcode() != AArch64::BRK) &&
4650  (Inst.getOpcode() != AArch64::HLT)) {
4651 
4652  // Prefixed intructions must have a destructive operand.
4655  return Error(IDLoc, "instruction is unpredictable when following a"
4656  " movprfx, suggest replacing movprfx with mov");
4657 
4658  // Destination operands must match.
4659  if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
4660  return Error(Loc[0], "instruction is unpredictable when following a"
4661  " movprfx writing to a different destination");
4662 
4663  // Destination operand must not be used in any other location.
4664  for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
4665  if (Inst.getOperand(i).isReg() &&
4666  (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
4667  isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
4668  return Error(Loc[0], "instruction is unpredictable when following a"
4669  " movprfx and destination also used as non-destructive"
4670  " source");
4671  }
4672 
4673  auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
4674  if (Prefix.isPredicated()) {
4675  int PgIdx = -1;
4676 
4677  // Find the instructions general predicate.
4678  for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
4679  if (Inst.getOperand(i).isReg() &&
4680  PPRRegClass.contains(Inst.getOperand(i).getReg())) {
4681  PgIdx = i;
4682  break;
4683  }
4684 
4685  // Instruction must be predicated if the movprfx is predicated.
4686  if (PgIdx == -1 ||
4688  return Error(IDLoc, "instruction is unpredictable when following a"
4689  " predicated movprfx, suggest using unpredicated movprfx");
4690 
4691  // Instruction must use same general predicate as the movprfx.
4692  if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
4693  return Error(IDLoc, "instruction is unpredictable when following a"
4694  " predicated movprfx using a different general predicate");
4695 
4696  // Instruction element type must match the movprfx.
4697  if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
4698  return Error(IDLoc, "instruction is unpredictable when following a"
4699  " predicated movprfx with a different element size");
4700  }
4701  }
4702 
4703  // Check for indexed addressing modes w/ the base register being the
4704  // same as a destination/source register or pair load where
4705  // the Rt == Rt2. All of those are undefined behaviour.
4706  switch (Inst.getOpcode()) {
4707  case AArch64::LDPSWpre:
4708  case AArch64::LDPWpost:
4709  case AArch64::LDPWpre:
4710  case AArch64::LDPXpost:
4711  case AArch64::LDPXpre: {
4712  unsigned Rt = Inst.getOperand(1).getReg();
4713  unsigned Rt2 = Inst.getOperand(2).getReg();
4714  unsigned Rn = Inst.getOperand(3).getReg();
4715  if (RI->isSubRegisterEq(Rn, Rt))
4716  return Error(Loc[0], "unpredictable LDP instruction, writeback base "
4717  "is also a destination");
4718  if (RI->isSubRegisterEq(Rn, Rt2))
4719  return Error(Loc[1], "unpredictable LDP instruction, writeback base "
4720  "is also a destination");
4722  }
4723  case AArch64::LDPDi:
4724  case AArch64::LDPQi:
4725  case AArch64::LDPSi:
4726  case AArch64::LDPSWi:
4727  case AArch64::LDPWi:
4728  case AArch64::LDPXi: {
4729  unsigned Rt = Inst.getOperand(0).getReg();
4730  unsigned Rt2 = Inst.getOperand(1).getReg();
4731  if (Rt == Rt2)
4732  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4733  break;
4734  }
4735  case AArch64::LDPDpost:
4736  case AArch64::LDPDpre:
4737  case AArch64::LDPQpost:
4738  case AArch64::LDPQpre:
4739  case AArch64::LDPSpost:
4740  case AArch64::LDPSpre:
4741  case AArch64::LDPSWpost: {
4742  unsigned Rt = Inst.getOperand(1).getReg();
4743  unsigned Rt2 = Inst.getOperand(2).getReg();
4744  if (Rt == Rt2)
4745  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4746  break;
4747  }
4748  case AArch64::STPDpost:
4749  case AArch64::STPDpre:
4750  case AArch64::STPQpost:
4751  case AArch64::STPQpre:
4752  case AArch64::STPSpost:
4753  case AArch64::STPSpre:
4754  case AArch64::STPWpost:
4755  case AArch64::STPWpre:
4756  case AArch64::STPXpost:
4757  case AArch64::STPXpre: {
4758  unsigned Rt = Inst.getOperand(1).getReg();
4759  unsigned Rt2 = Inst.getOperand(2).getReg();
4760  unsigned Rn = Inst.getOperand(3).getReg();
4761  if (RI->isSubRegisterEq(Rn, Rt))
4762  return Error(Loc[0], "unpredictable STP instruction, writeback base "
4763  "is also a source");
4764  if (RI->isSubRegisterEq(Rn, Rt2))
4765  return Error(Loc[1], "unpredictable STP instruction, writeback base "
4766  "is also a source");
4767  break;
4768  }
4769  case AArch64::LDRBBpre:
4770  case AArch64::LDRBpre:
4771  case AArch64::LDRHHpre:
4772  case AArch64::LDRHpre:
4773  case AArch64::LDRSBWpre:
4774  case AArch64::LDRSBXpre:
4775  case AArch64::LDRSHWpre:
4776  case AArch64::LDRSHXpre:
4777  case AArch64::LDRSWpre:
4778  case AArch64::LDRWpre:
4779  case AArch64::LDRXpre:
4780  case AArch64::LDRBBpost:
4781  case AArch64::LDRBpost:
4782  case AArch64::LDRHHpost:
4783  case AArch64::LDRHpost:
4784  case AArch64::LDRSBWpost:
4785  case AArch64::LDRSBXpost:
4786  case AArch64::LDRSHWpost:
4787  case AArch64::LDRSHXpost:
4788  case AArch64::LDRSWpost:
4789  case AArch64::LDRWpost:
4790  case AArch64::LDRXpost: {
4791  unsigned Rt = Inst.getOperand(1).getReg();
4792  unsigned Rn = Inst.getOperand(2).getReg();
4793  if (RI->isSubRegisterEq(Rn, Rt))
4794  return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4795  "is also a source");
4796  break;
4797  }
4798  case AArch64::STRBBpost:
4799  case AArch64::STRBpost:
4800  case AArch64::STRHHpost:
4801  case AArch64::STRHpost:
4802  case AArch64::STRWpost:
4803  case AArch64::STRXpost:
4804  case AArch64::STRBBpre:
4805  case AArch64::STRBpre:
4806  case AArch64::STRHHpre:
4807  case AArch64::STRHpre:
4808  case AArch64::STRWpre:
4809  case AArch64::STRXpre: {
4810  unsigned Rt = Inst.getOperand(1).getReg();
4811  unsigned Rn = Inst.getOperand(2).getReg();
4812  if (RI->isSubRegisterEq(Rn, Rt))
4813  return Error(Loc[0], "unpredictable STR instruction, writeback base "
4814  "is also a source");
4815  break;
4816  }
4817  case AArch64::STXRB:
4818  case AArch64::STXRH:
4819  case AArch64::STXRW:
4820  case AArch64::STXRX:
4821  case AArch64::STLXRB:
4822  case AArch64::STLXRH:
4823  case AArch64::STLXRW:
4824  case AArch64::STLXRX: {
4825  unsigned Rs = Inst.getOperand(0).getReg();
4826  unsigned Rt = Inst.getOperand(1).getReg();
4827  unsigned Rn = Inst.getOperand(2).getReg();
4828  if (RI->isSubRegisterEq(Rt, Rs) ||
4829  (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4830  return Error(Loc[0],
4831  "unpredictable STXR instruction, status is also a source");
4832  break;
4833  }
4834  case AArch64::STXPW:
4835  case AArch64::STXPX:
4836  case AArch64::STLXPW:
4837  case AArch64::STLXPX: {
4838  unsigned Rs = Inst.getOperand(0).getReg();
4839  unsigned Rt1 = Inst.getOperand(1).getReg();
4840  unsigned Rt2 = Inst.getOperand(2).getReg();
4841  unsigned Rn = Inst.getOperand(3).getReg();
4842  if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4843  (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4844  return Error(Loc[0],
4845  "unpredictable STXP instruction, status is also a source");
4846  break;
4847  }
4848  case AArch64::LDRABwriteback:
4849  case AArch64::LDRAAwriteback: {
4850  unsigned Xt = Inst.getOperand(0).getReg();
4851  unsigned Xn = Inst.getOperand(1).getReg();
4852  if (Xt == Xn)
4853  return Error(Loc[0],
4854  "unpredictable LDRA instruction, writeback base"
4855  " is also a destination");
4856  break;
4857  }
4858  }
4859 
4860 
4861  // Now check immediate ranges. Separate from the above as there is overlap
4862  // in the instructions being checked and this keeps the nested conditionals
4863  // to a minimum.
4864  switch (Inst.getOpcode()) {
4865  case AArch64::ADDSWri:
4866  case AArch64::ADDSXri:
4867  case AArch64::ADDWri:
4868  case AArch64::ADDXri:
4869  case AArch64::SUBSWri:
4870  case AArch64::SUBSXri:
4871  case AArch64::SUBWri:
4872  case AArch64::SUBXri: {
4873  // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4874  // some slight duplication here.
4875  if (Inst.getOperand(2).isExpr()) {
4876  const MCExpr *Expr = Inst.getOperand(2).getExpr();
4877  AArch64MCExpr::VariantKind ELFRefKind;
4878  MCSymbolRefExpr::VariantKind DarwinRefKind;
4879  int64_t Addend;
4880  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4881 
4882  // Only allow these with ADDXri.
4883  if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4884  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4885  Inst.getOpcode() == AArch64::ADDXri)
4886  return false;
4887 
4888  // Only allow these with ADDXri/ADDWri
4889  if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4890  ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4891  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4892  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4893  ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4894  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4895  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4896  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4897  ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4898  ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4899  (Inst.getOpcode() == AArch64::ADDXri ||
4900  Inst.getOpcode() == AArch64::ADDWri))
4901  return false;
4902 
4903  // Don't allow symbol refs in the immediate field otherwise
4904  // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4905  // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4906  // 'cmp w0, 'borked')
4907  return Error(Loc.back(), "invalid immediate expression");
4908  }
4909  // We don't validate more complex expressions here
4910  }
4911  return false;
4912  }
4913  default:
4914  return false;
4915  }
4916 }
4917 
4918 static std::string AArch64MnemonicSpellCheck(StringRef S,
4919  const FeatureBitset &FBS,
4920  unsigned VariantID = 0);
4921 
4922 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4925  switch (ErrCode) {
4926  case Match_InvalidTiedOperand: {
4927  RegConstraintEqualityTy EqTy =
4928  static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4929  .getRegEqualityTy();
4930  switch (EqTy) {
4931  case RegConstraintEqualityTy::EqualsSubReg:
4932  return Error(Loc, "operand must be 64-bit form of destination register");
4933  case RegConstraintEqualityTy::EqualsSuperReg:
4934  return Error(Loc, "operand must be 32-bit form of destination register");
4935  case RegConstraintEqualityTy::EqualsReg:
4936  return Error(Loc, "operand must match destination register");
4937  }
4938  llvm_unreachable("Unknown RegConstraintEqualityTy");
4939  }
4940  case Match_MissingFeature:
4941  return Error(Loc,
4942  "instruction requires a CPU feature not currently enabled");
4943  case Match_InvalidOperand:
4944  return Error(Loc, "invalid operand for instruction");
4945  case Match_InvalidSuffix:
4946  return Error(Loc, "invalid type suffix for instruction");
4947  case Match_InvalidCondCode:
4948  return Error(Loc, "expected AArch64 condition code");
4949  case Match_AddSubRegExtendSmall:
4950  return Error(Loc,
4951  "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
4952  case Match_AddSubRegExtendLarge:
4953  return Error(Loc,
4954  "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4955  case Match_AddSubSecondSource:
4956  return Error(Loc,
4957  "expected compatible register, symbol or integer in range [0, 4095]");
4958  case Match_LogicalSecondSource:
4959  return Error(Loc, "expected compatible register or logical immediate");
4960  case Match_InvalidMovImm32Shift:
4961  return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4962  case Match_InvalidMovImm64Shift:
4963  return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4964  case Match_AddSubRegShift32:
4965  return Error(Loc,
4966  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4967  case Match_AddSubRegShift64:
4968  return Error(Loc,
4969  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4970  case Match_InvalidFPImm:
4971  return Error(Loc,
4972  "expected compatible register or floating-point constant");
4973  case Match_InvalidMemoryIndexedSImm6:
4974  return Error(Loc, "index must be an integer in range [-32, 31].");
4975  case Match_InvalidMemoryIndexedSImm5:
4976  return Error(Loc, "index must be an integer in range [-16, 15].");
4977  case Match_InvalidMemoryIndexed1SImm4:
4978  return Error(Loc, "index must be an integer in range [-8, 7].");
4979  case Match_InvalidMemoryIndexed2SImm4:
4980  return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4981  case Match_InvalidMemoryIndexed3SImm4:
4982  return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4983  case Match_InvalidMemoryIndexed4SImm4:
4984  return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4985  case Match_InvalidMemoryIndexed16SImm4:
4986  return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4987  case Match_InvalidMemoryIndexed32SImm4:
4988  return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
4989  case Match_InvalidMemoryIndexed1SImm6:
4990  return Error(Loc, "index must be an integer in range [-32, 31].");
4991  case Match_InvalidMemoryIndexedSImm8:
4992  return Error(Loc, "index must be an integer in range [-128, 127].");
4993  case Match_InvalidMemoryIndexedSImm9:
4994  return Error(Loc, "index must be an integer in range [-256, 255].");
4995  case Match_InvalidMemoryIndexed16SImm9:
4996  return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4997  case Match_InvalidMemoryIndexed8SImm10:
4998  return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4999  case Match_InvalidMemoryIndexed4SImm7:
5000  return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5001  case Match_InvalidMemoryIndexed8SImm7:
5002  return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5003  case Match_InvalidMemoryIndexed16SImm7:
5004  return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5005  case Match_InvalidMemoryIndexed8UImm5:
5006  return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5007  case Match_InvalidMemoryIndexed4UImm5:
5008  return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5009  case Match_InvalidMemoryIndexed2UImm5:
5010  return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5011  case Match_InvalidMemoryIndexed8UImm6:
5012  return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5013  case Match_InvalidMemoryIndexed16UImm6:
5014  return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5015  case Match_InvalidMemoryIndexed4UImm6:
5016  return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5017  case Match_InvalidMemoryIndexed2UImm6:
5018  return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5019  case Match_InvalidMemoryIndexed1UImm6:
5020  return Error(Loc, "index must be in range [0, 63].");
5021  case Match_InvalidMemoryWExtend8:
5022  return Error(Loc,
5023  "expected 'uxtw' or 'sxtw' with optional shift of #0");
5024  case Match_InvalidMemoryWExtend16:
5025  return Error(Loc,
5026  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5027  case Match_InvalidMemoryWExtend32:
5028  return Error(Loc,
5029  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5030  case Match_InvalidMemoryWExtend64:
5031  return Error(Loc,
5032  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5033  case Match_InvalidMemoryWExtend128:
5034  return Error(Loc,
5035  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5036  case Match_InvalidMemoryXExtend8:
5037  return Error(Loc,
5038  "expected 'lsl' or 'sxtx' with optional shift of #0");
5039  case Match_InvalidMemoryXExtend16:
5040  return Error(Loc,
5041  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5042  case Match_InvalidMemoryXExtend32:
5043  return Error(Loc,
5044  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5045  case Match_InvalidMemoryXExtend64:
5046  return Error(Loc,
5047  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5048  case Match_InvalidMemoryXExtend128:
5049  return Error(Loc,
5050  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
5051  case Match_InvalidMemoryIndexed1:
5052  return Error(Loc, "index must be an integer in range [0, 4095].");
5053  case Match_InvalidMemoryIndexed2:
5054  return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
5055  case Match_InvalidMemoryIndexed4:
5056  return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
5057  case Match_InvalidMemoryIndexed8:
5058  return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
5059  case Match_InvalidMemoryIndexed16:
5060  return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
5061  case Match_InvalidImm0_1:
5062  return Error(Loc, "immediate must be an integer in range [0, 1].");
5063  case Match_InvalidImm0_3:
5064  return Error(Loc, "immediate must be an integer in range [0, 3].");
5065  case Match_InvalidImm0_7:
5066  return Error(Loc, "immediate must be an integer in range [0, 7].");
5067  case Match_InvalidImm0_15:
5068  return Error(Loc, "immediate must be an integer in range [0, 15].");
5069  case Match_InvalidImm0_31:
5070  return Error(Loc, "immediate must be an integer in range [0, 31].");
5071  case Match_InvalidImm0_63:
5072  return Error(Loc, "immediate must be an integer in range [0, 63].");
5073  case Match_InvalidImm0_127:
5074  return Error(Loc, "immediate must be an integer in range [0, 127].");
5075  case Match_InvalidImm0_255:
5076  return Error(Loc, "immediate must be an integer in range [0, 255].");
5077  case Match_InvalidImm0_65535:
5078  return Error(Loc, "immediate must be an integer in range [0, 65535].");
5079  case Match_InvalidImm1_8:
5080  return Error(Loc, "immediate must be an integer in range [1, 8].");
5081  case Match_InvalidImm1_16:
5082  return Error(Loc, "immediate must be an integer in range [1, 16].");
5083  case Match_InvalidImm1_32:
5084  return Error(Loc, "immediate must be an integer in range [1, 32].");
5085  case Match_InvalidImm1_64:
5086  return Error(Loc, "immediate must be an integer in range [1, 64].");
5087  case Match_InvalidSVEAddSubImm8:
5088  return Error(Loc, "immediate must be an integer in range [0, 255]"
5089  " with a shift amount of 0");
5090  case Match_InvalidSVEAddSubImm16:
5091  case Match_InvalidSVEAddSubImm32:
5092  case Match_InvalidSVEAddSubImm64:
5093  return Error(Loc, "immediate must be an integer in range [0, 255] or a "
5094  "multiple of 256 in range [256, 65280]");
5095  case Match_InvalidSVECpyImm8:
5096  return Error(Loc, "immediate must be an integer in range [-128, 255]"
5097  " with a shift amount of 0");
5098  case Match_InvalidSVECpyImm16:
5099  return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5100  "multiple of 256 in range [-32768, 65280]");
5101  case Match_InvalidSVECpyImm32:
5102  case Match_InvalidSVECpyImm64:
5103  return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5104  "multiple of 256 in range [-32768, 32512]");
5105  case Match_InvalidIndexRange0_0:
5106  return Error(Loc, "expected lane specifier '[0]'");
5107  case Match_InvalidIndexRange1_1:
5108  return Error(Loc, "expected lane specifier '[1]'");
5109  case Match_InvalidIndexRange0_15:
5110  return Error(Loc, "vector lane must be an integer in range [0, 15].");
5111  case Match_InvalidIndexRange0_7:
5112  return Error(Loc, "vector lane must be an integer in range [0, 7].");
5113  case Match_InvalidIndexRange0_3:
5114  return Error(Loc, "vector lane must be an integer in range [0, 3].");
5115  case Match_InvalidIndexRange0_1:
5116  return Error(Loc, "vector lane must be an integer in range [0, 1].");
5117  case Match_InvalidSVEIndexRange0_63:
5118  return Error(Loc, "vector lane must be an integer in range [0, 63].");
5119  case Match_InvalidSVEIndexRange0_31:
5120  return Error(Loc, "vector lane must be an integer in range [0, 31].");
5121  case Match_InvalidSVEIndexRange0_15:
5122  return Error(Loc, "vector lane must be an integer in range [0, 15].");
5123  case Match_InvalidSVEIndexRange0_7:
5124  return Error(Loc, "vector lane must be an integer in range [0, 7].");
5125  case Match_InvalidSVEIndexRange0_3:
5126  return Error(Loc, "vector lane must be an integer in range [0, 3].");
5127  case Match_InvalidLabel:
5128  return Error(Loc, "expected label or encodable integer pc offset");
5129  case Match_MRS:
5130  return Error(Loc, "expected readable system register");
5131  case Match_MSR:
5132  case Match_InvalidSVCR:
5133  return Error(Loc, "expected writable system register or pstate");
5134  case Match_InvalidComplexRotationEven:
5135  return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
5136  case Match_InvalidComplexRotationOdd:
5137  return Error(Loc, "complex rotation must be 90 or 270.");
5138  case Match_MnemonicFail: {
5139  std::string Suggestion = AArch64MnemonicSpellCheck(
5140  ((AArch64Operand &)*Operands[0]).getToken(),
5141  ComputeAvailableFeatures(STI->getFeatureBits()));
5142  return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
5143  }
5144  case Match_InvalidGPR64shifted8:
5145  return Error(Loc, "register must be x0..x30 or xzr, without shift");
5146  case Match_InvalidGPR64shifted16:
5147  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
5148  case Match_InvalidGPR64shifted32:
5149  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
5150  case Match_InvalidGPR64shifted64:
5151  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
5152  case Match_InvalidGPR64shifted128:
5153  return Error(
5154  Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
5155  case Match_InvalidGPR64NoXZRshifted8:
5156  return Error(Loc, "register must be x0..x30 without shift");
5157  case Match_InvalidGPR64NoXZRshifted16:
5158  return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
5159  case Match_InvalidGPR64NoXZRshifted32:
5160  return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
5161  case Match_InvalidGPR64NoXZRshifted64:
5162  return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
5163  case Match_InvalidGPR64NoXZRshifted128:
5164  return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
5165  case Match_InvalidZPR32UXTW8:
5166  case Match_InvalidZPR32SXTW8:
5167  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
5168  case Match_InvalidZPR32UXTW16:
5169