LLVM  9.0.0svn
AArch64AsmParser.cpp
Go to the documentation of this file.
1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
13 #include "AArch64InstrInfo.h"
14 #include "Utils/AArch64BaseInfo.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/ADT/StringMap.h"
22 #include "llvm/ADT/StringRef.h"
23 #include "llvm/ADT/StringSwitch.h"
24 #include "llvm/ADT/Twine.h"
25 #include "llvm/MC/MCContext.h"
26 #include "llvm/MC/MCExpr.h"
27 #include "llvm/MC/MCInst.h"
35 #include "llvm/MC/MCRegisterInfo.h"
36 #include "llvm/MC/MCStreamer.h"
38 #include "llvm/MC/MCSymbol.h"
41 #include "llvm/MC/MCValue.h"
42 #include "llvm/Support/Casting.h"
43 #include "llvm/Support/Compiler.h"
46 #include "llvm/Support/SMLoc.h"
50 #include <cassert>
51 #include <cctype>
52 #include <cstdint>
53 #include <cstdio>
54 #include <string>
55 #include <tuple>
56 #include <utility>
57 #include <vector>
58 
59 using namespace llvm;
60 
61 namespace {
62 
63 enum class RegKind {
64  Scalar,
65  NeonVector,
66  SVEDataVector,
67  SVEPredicateVector
68 };
69 
71  EqualsReg,
72  EqualsSuperReg,
73  EqualsSubReg
74 };
75 
76 class AArch64AsmParser : public MCTargetAsmParser {
77 private:
78  StringRef Mnemonic; ///< Instruction mnemonic.
79 
80  // Map of register aliases registers via the .req directive.
82 
83  class PrefixInfo {
84  public:
85  static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
86  PrefixInfo Prefix;
87  switch (Inst.getOpcode()) {
88  case AArch64::MOVPRFX_ZZ:
89  Prefix.Active = true;
90  Prefix.Dst = Inst.getOperand(0).getReg();
91  break;
92  case AArch64::MOVPRFX_ZPmZ_B:
93  case AArch64::MOVPRFX_ZPmZ_H:
94  case AArch64::MOVPRFX_ZPmZ_S:
95  case AArch64::MOVPRFX_ZPmZ_D:
96  Prefix.Active = true;
97  Prefix.Predicated = true;
98  Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
99  assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
100  "No destructive element size set for movprfx");
101  Prefix.Dst = Inst.getOperand(0).getReg();
102  Prefix.Pg = Inst.getOperand(2).getReg();
103  break;
104  case AArch64::MOVPRFX_ZPzZ_B:
105  case AArch64::MOVPRFX_ZPzZ_H:
106  case AArch64::MOVPRFX_ZPzZ_S:
107  case AArch64::MOVPRFX_ZPzZ_D:
108  Prefix.Active = true;
109  Prefix.Predicated = true;
110  Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
111  assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
112  "No destructive element size set for movprfx");
113  Prefix.Dst = Inst.getOperand(0).getReg();
114  Prefix.Pg = Inst.getOperand(1).getReg();
115  break;
116  default:
117  break;
118  }
119 
120  return Prefix;
121  }
122 
123  PrefixInfo() : Active(false), Predicated(false) {}
124  bool isActive() const { return Active; }
125  bool isPredicated() const { return Predicated; }
126  unsigned getElementSize() const {
127  assert(Predicated);
128  return ElementSize;
129  }
130  unsigned getDstReg() const { return Dst; }
131  unsigned getPgReg() const {
132  assert(Predicated);
133  return Pg;
134  }
135 
136  private:
137  bool Active;
138  bool Predicated;
139  unsigned ElementSize;
140  unsigned Dst;
141  unsigned Pg;
142  } NextPrefix;
143 
144  AArch64TargetStreamer &getTargetStreamer() {
145  MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
146  return static_cast<AArch64TargetStreamer &>(TS);
147  }
148 
149  SMLoc getLoc() const { return getParser().getTok().getLoc(); }
150 
151  bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
152  void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
153  AArch64CC::CondCode parseCondCodeString(StringRef Cond);
154  bool parseCondCode(OperandVector &Operands, bool invertCondCode);
155  unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
156  bool parseRegister(OperandVector &Operands);
157  bool parseSymbolicImmVal(const MCExpr *&ImmVal);
158  bool parseNeonVectorList(OperandVector &Operands);
159  bool parseOptionalMulOperand(OperandVector &Operands);
160  bool parseOperand(OperandVector &Operands, bool isCondCode,
161  bool invertCondCode);
162 
163  bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
164  OperandVector &Operands);
165 
166  bool parseDirectiveArch(SMLoc L);
167  bool parseDirectiveArchExtension(SMLoc L);
168  bool parseDirectiveCPU(SMLoc L);
169  bool parseDirectiveInst(SMLoc L);
170 
171  bool parseDirectiveTLSDescCall(SMLoc L);
172 
173  bool parseDirectiveLOH(StringRef LOH, SMLoc L);
174  bool parseDirectiveLtorg(SMLoc L);
175 
176  bool parseDirectiveReq(StringRef Name, SMLoc L);
177  bool parseDirectiveUnreq(SMLoc L);
178  bool parseDirectiveCFINegateRAState();
179  bool parseDirectiveCFIBKeyFrame();
180 
181  bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
183  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
184  OperandVector &Operands, MCStreamer &Out,
185  uint64_t &ErrorInfo,
186  bool MatchingInlineAsm) override;
187 /// @name Auto-generated Match Functions
188 /// {
189 
190 #define GET_ASSEMBLER_HEADER
191 #include "AArch64GenAsmMatcher.inc"
192 
193  /// }
194 
195  OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
196  OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
197  RegKind MatchKind);
198  OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
199  OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
200  OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
201  OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
202  OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
203  template <bool IsSVEPrefetch = false>
204  OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
205  OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
206  OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
207  OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
208  OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
209  template<bool AddFPZeroAsLiteral>
210  OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
211  OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
212  OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
213  bool tryParseNeonVectorRegister(OperandVector &Operands);
214  OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
215  OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
216  template <bool ParseShiftExtend,
217  RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
218  OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
219  template <bool ParseShiftExtend, bool ParseSuffix>
220  OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
221  OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
222  template <RegKind VectorKind>
223  OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
224  bool ExpectMatch = false);
225  OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
226 
227 public:
228  enum AArch64MatchResultTy {
229  Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
230 #define GET_OPERAND_DIAGNOSTIC_TYPES
231 #include "AArch64GenAsmMatcher.inc"
232  };
233  bool IsILP32;
234 
235  AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
236  const MCInstrInfo &MII, const MCTargetOptions &Options)
237  : MCTargetAsmParser(Options, STI, MII) {
238  IsILP32 = Options.getABIName() == "ilp32";
240  MCStreamer &S = getParser().getStreamer();
241  if (S.getTargetStreamer() == nullptr)
242  new AArch64TargetStreamer(S);
243 
244  // Alias .hword/.word/xword to the target-independent .2byte/.4byte/.8byte
245  // directives as they have the same form and semantics:
246  /// ::= (.hword | .word | .xword ) [ expression (, expression)* ]
247  Parser.addAliasForDirective(".hword", ".2byte");
248  Parser.addAliasForDirective(".word", ".4byte");
249  Parser.addAliasForDirective(".xword", ".8byte");
250 
251  // Initialize the set of available features.
252  setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
253  }
254 
255  bool regsEqual(const MCParsedAsmOperand &Op1,
256  const MCParsedAsmOperand &Op2) const override;
257  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
258  SMLoc NameLoc, OperandVector &Operands) override;
259  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
260  bool ParseDirective(AsmToken DirectiveID) override;
261  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
262  unsigned Kind) override;
263 
264  static bool classifySymbolRef(const MCExpr *Expr,
265  AArch64MCExpr::VariantKind &ELFRefKind,
266  MCSymbolRefExpr::VariantKind &DarwinRefKind,
267  int64_t &Addend);
268 };
269 
270 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
271 /// instruction.
272 class AArch64Operand : public MCParsedAsmOperand {
273 private:
274  enum KindTy {
275  k_Immediate,
276  k_ShiftedImm,
277  k_CondCode,
278  k_Register,
279  k_VectorList,
280  k_VectorIndex,
281  k_Token,
282  k_SysReg,
283  k_SysCR,
284  k_Prefetch,
285  k_ShiftExtend,
286  k_FPImm,
287  k_Barrier,
288  k_PSBHint,
289  k_BTIHint,
290  } Kind;
291 
292  SMLoc StartLoc, EndLoc;
293 
294  struct TokOp {
295  const char *Data;
296  unsigned Length;
297  bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
298  };
299 
300  // Separate shift/extend operand.
301  struct ShiftExtendOp {
303  unsigned Amount;
304  bool HasExplicitAmount;
305  };
306 
307  struct RegOp {
308  unsigned RegNum;
309  RegKind Kind;
310  int ElementWidth;
311 
312  // The register may be allowed as a different register class,
313  // e.g. for GPR64as32 or GPR32as64.
314  RegConstraintEqualityTy EqualityTy;
315 
316  // In some cases the shift/extend needs to be explicitly parsed together
317  // with the register, rather than as a separate operand. This is needed
318  // for addressing modes where the instruction as a whole dictates the
319  // scaling/extend, rather than specific bits in the instruction.
320  // By parsing them as a single operand, we avoid the need to pass an
321  // extra operand in all CodeGen patterns (because all operands need to
322  // have an associated value), and we avoid the need to update TableGen to
323  // accept operands that have no associated bits in the instruction.
324  //
325  // An added benefit of parsing them together is that the assembler
326  // can give a sensible diagnostic if the scaling is not correct.
327  //
328  // The default is 'lsl #0' (HasExplicitAmount = false) if no
329  // ShiftExtend is specified.
330  ShiftExtendOp ShiftExtend;
331  };
332 
333  struct VectorListOp {
334  unsigned RegNum;
335  unsigned Count;
336  unsigned NumElements;
337  unsigned ElementWidth;
339  };
340 
341  struct VectorIndexOp {
342  unsigned Val;
343  };
344 
345  struct ImmOp {
346  const MCExpr *Val;
347  };
348 
349  struct ShiftedImmOp {
350  const MCExpr *Val;
351  unsigned ShiftAmount;
352  };
353 
354  struct CondCodeOp {
355  AArch64CC::CondCode Code;
356  };
357 
358  struct FPImmOp {
359  uint64_t Val; // APFloat value bitcasted to uint64_t.
360  bool IsExact; // describes whether parsed value was exact.
361  };
362 
363  struct BarrierOp {
364  const char *Data;
365  unsigned Length;
366  unsigned Val; // Not the enum since not all values have names.
367  };
368 
369  struct SysRegOp {
370  const char *Data;
371  unsigned Length;
372  uint32_t MRSReg;
373  uint32_t MSRReg;
374  uint32_t PStateField;
375  };
376 
377  struct SysCRImmOp {
378  unsigned Val;
379  };
380 
381  struct PrefetchOp {
382  const char *Data;
383  unsigned Length;
384  unsigned Val;
385  };
386 
387  struct PSBHintOp {
388  const char *Data;
389  unsigned Length;
390  unsigned Val;
391  };
392 
393  struct BTIHintOp {
394  const char *Data;
395  unsigned Length;
396  unsigned Val;
397  };
398 
399  struct ExtendOp {
400  unsigned Val;
401  };
402 
403  union {
404  struct TokOp Tok;
405  struct RegOp Reg;
406  struct VectorListOp VectorList;
407  struct VectorIndexOp VectorIndex;
408  struct ImmOp Imm;
409  struct ShiftedImmOp ShiftedImm;
410  struct CondCodeOp CondCode;
411  struct FPImmOp FPImm;
412  struct BarrierOp Barrier;
413  struct SysRegOp SysReg;
414  struct SysCRImmOp SysCRImm;
415  struct PrefetchOp Prefetch;
416  struct PSBHintOp PSBHint;
417  struct BTIHintOp BTIHint;
418  struct ShiftExtendOp ShiftExtend;
419  };
420 
421  // Keep the MCContext around as the MCExprs may need manipulated during
422  // the add<>Operands() calls.
423  MCContext &Ctx;
424 
425 public:
426  AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
427 
428  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
429  Kind = o.Kind;
430  StartLoc = o.StartLoc;
431  EndLoc = o.EndLoc;
432  switch (Kind) {
433  case k_Token:
434  Tok = o.Tok;
435  break;
436  case k_Immediate:
437  Imm = o.Imm;
438  break;
439  case k_ShiftedImm:
440  ShiftedImm = o.ShiftedImm;
441  break;
442  case k_CondCode:
443  CondCode = o.CondCode;
444  break;
445  case k_FPImm:
446  FPImm = o.FPImm;
447  break;
448  case k_Barrier:
449  Barrier = o.Barrier;
450  break;
451  case k_Register:
452  Reg = o.Reg;
453  break;
454  case k_VectorList:
455  VectorList = o.VectorList;
456  break;
457  case k_VectorIndex:
458  VectorIndex = o.VectorIndex;
459  break;
460  case k_SysReg:
461  SysReg = o.SysReg;
462  break;
463  case k_SysCR:
464  SysCRImm = o.SysCRImm;
465  break;
466  case k_Prefetch:
467  Prefetch = o.Prefetch;
468  break;
469  case k_PSBHint:
470  PSBHint = o.PSBHint;
471  break;
472  case k_BTIHint:
473  BTIHint = o.BTIHint;
474  break;
475  case k_ShiftExtend:
476  ShiftExtend = o.ShiftExtend;
477  break;
478  }
479  }
480 
481  /// getStartLoc - Get the location of the first token of this operand.
482  SMLoc getStartLoc() const override { return StartLoc; }
483  /// getEndLoc - Get the location of the last token of this operand.
484  SMLoc getEndLoc() const override { return EndLoc; }
485 
486  StringRef getToken() const {
487  assert(Kind == k_Token && "Invalid access!");
488  return StringRef(Tok.Data, Tok.Length);
489  }
490 
491  bool isTokenSuffix() const {
492  assert(Kind == k_Token && "Invalid access!");
493  return Tok.IsSuffix;
494  }
495 
496  const MCExpr *getImm() const {
497  assert(Kind == k_Immediate && "Invalid access!");
498  return Imm.Val;
499  }
500 
501  const MCExpr *getShiftedImmVal() const {
502  assert(Kind == k_ShiftedImm && "Invalid access!");
503  return ShiftedImm.Val;
504  }
505 
506  unsigned getShiftedImmShift() const {
507  assert(Kind == k_ShiftedImm && "Invalid access!");
508  return ShiftedImm.ShiftAmount;
509  }
510 
512  assert(Kind == k_CondCode && "Invalid access!");
513  return CondCode.Code;
514  }
515 
516  APFloat getFPImm() const {
517  assert (Kind == k_FPImm && "Invalid access!");
518  return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
519  }
520 
521  bool getFPImmIsExact() const {
522  assert (Kind == k_FPImm && "Invalid access!");
523  return FPImm.IsExact;
524  }
525 
526  unsigned getBarrier() const {
527  assert(Kind == k_Barrier && "Invalid access!");
528  return Barrier.Val;
529  }
530 
531  StringRef getBarrierName() const {
532  assert(Kind == k_Barrier && "Invalid access!");
533  return StringRef(Barrier.Data, Barrier.Length);
534  }
535 
536  unsigned getReg() const override {
537  assert(Kind == k_Register && "Invalid access!");
538  return Reg.RegNum;
539  }
540 
541  RegConstraintEqualityTy getRegEqualityTy() const {
542  assert(Kind == k_Register && "Invalid access!");
543  return Reg.EqualityTy;
544  }
545 
546  unsigned getVectorListStart() const {
547  assert(Kind == k_VectorList && "Invalid access!");
548  return VectorList.RegNum;
549  }
550 
551  unsigned getVectorListCount() const {
552  assert(Kind == k_VectorList && "Invalid access!");
553  return VectorList.Count;
554  }
555 
556  unsigned getVectorIndex() const {
557  assert(Kind == k_VectorIndex && "Invalid access!");
558  return VectorIndex.Val;
559  }
560 
561  StringRef getSysReg() const {
562  assert(Kind == k_SysReg && "Invalid access!");
563  return StringRef(SysReg.Data, SysReg.Length);
564  }
565 
566  unsigned getSysCR() const {
567  assert(Kind == k_SysCR && "Invalid access!");
568  return SysCRImm.Val;
569  }
570 
571  unsigned getPrefetch() const {
572  assert(Kind == k_Prefetch && "Invalid access!");
573  return Prefetch.Val;
574  }
575 
576  unsigned getPSBHint() const {
577  assert(Kind == k_PSBHint && "Invalid access!");
578  return PSBHint.Val;
579  }
580 
581  StringRef getPSBHintName() const {
582  assert(Kind == k_PSBHint && "Invalid access!");
583  return StringRef(PSBHint.Data, PSBHint.Length);
584  }
585 
586  unsigned getBTIHint() const {
587  assert(Kind == k_BTIHint && "Invalid access!");
588  return BTIHint.Val;
589  }
590 
591  StringRef getBTIHintName() const {
592  assert(Kind == k_BTIHint && "Invalid access!");
593  return StringRef(BTIHint.Data, BTIHint.Length);
594  }
595 
596  StringRef getPrefetchName() const {
597  assert(Kind == k_Prefetch && "Invalid access!");
598  return StringRef(Prefetch.Data, Prefetch.Length);
599  }
600 
601  AArch64_AM::ShiftExtendType getShiftExtendType() const {
602  if (Kind == k_ShiftExtend)
603  return ShiftExtend.Type;
604  if (Kind == k_Register)
605  return Reg.ShiftExtend.Type;
606  llvm_unreachable("Invalid access!");
607  }
608 
609  unsigned getShiftExtendAmount() const {
610  if (Kind == k_ShiftExtend)
611  return ShiftExtend.Amount;
612  if (Kind == k_Register)
613  return Reg.ShiftExtend.Amount;
614  llvm_unreachable("Invalid access!");
615  }
616 
617  bool hasShiftExtendAmount() const {
618  if (Kind == k_ShiftExtend)
619  return ShiftExtend.HasExplicitAmount;
620  if (Kind == k_Register)
621  return Reg.ShiftExtend.HasExplicitAmount;
622  llvm_unreachable("Invalid access!");
623  }
624 
625  bool isImm() const override { return Kind == k_Immediate; }
626  bool isMem() const override { return false; }
627 
628  bool isUImm6() const {
629  if (!isImm())
630  return false;
631  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
632  if (!MCE)
633  return false;
634  int64_t Val = MCE->getValue();
635  return (Val >= 0 && Val < 64);
636  }
637 
638  template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
639 
640  template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
641  return isImmScaled<Bits, Scale>(true);
642  }
643 
644  template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
645  return isImmScaled<Bits, Scale>(false);
646  }
647 
648  template <int Bits, int Scale>
649  DiagnosticPredicate isImmScaled(bool Signed) const {
650  if (!isImm())
652 
653  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
654  if (!MCE)
656 
657  int64_t MinVal, MaxVal;
658  if (Signed) {
659  int64_t Shift = Bits - 1;
660  MinVal = (int64_t(1) << Shift) * -Scale;
661  MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
662  } else {
663  MinVal = 0;
664  MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
665  }
666 
667  int64_t Val = MCE->getValue();
668  if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
670 
672  }
673 
674  DiagnosticPredicate isSVEPattern() const {
675  if (!isImm())
677  auto *MCE = dyn_cast<MCConstantExpr>(getImm());
678  if (!MCE)
680  int64_t Val = MCE->getValue();
681  if (Val >= 0 && Val < 32)
684  }
685 
686  bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
687  AArch64MCExpr::VariantKind ELFRefKind;
688  MCSymbolRefExpr::VariantKind DarwinRefKind;
689  int64_t Addend;
690  if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
691  Addend)) {
692  // If we don't understand the expression, assume the best and
693  // let the fixup and relocation code deal with it.
694  return true;
695  }
696 
697  if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
698  ELFRefKind == AArch64MCExpr::VK_LO12 ||
699  ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
700  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
701  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
702  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
703  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
704  ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
705  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
706  ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
707  ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
708  // Note that we don't range-check the addend. It's adjusted modulo page
709  // size when converted, so there is no "out of range" condition when using
710  // @pageoff.
711  return true;
712  } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
713  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
714  // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
715  return Addend == 0;
716  }
717 
718  return false;
719  }
720 
721  template <int Scale> bool isUImm12Offset() const {
722  if (!isImm())
723  return false;
724 
725  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
726  if (!MCE)
727  return isSymbolicUImm12Offset(getImm());
728 
729  int64_t Val = MCE->getValue();
730  return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
731  }
732 
733  template <int N, int M>
734  bool isImmInRange() const {
735  if (!isImm())
736  return false;
737  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
738  if (!MCE)
739  return false;
740  int64_t Val = MCE->getValue();
741  return (Val >= N && Val <= M);
742  }
743 
744  // NOTE: Also used for isLogicalImmNot as anything that can be represented as
745  // a logical immediate can always be represented when inverted.
746  template <typename T>
747  bool isLogicalImm() const {
748  if (!isImm())
749  return false;
750  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
751  if (!MCE)
752  return false;
753 
754  int64_t Val = MCE->getValue();
755  int64_t SVal = typename std::make_signed<T>::type(Val);
756  int64_t UVal = typename std::make_unsigned<T>::type(Val);
757  if (Val != SVal && Val != UVal)
758  return false;
759 
760  return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8);
761  }
762 
763  bool isShiftedImm() const { return Kind == k_ShiftedImm; }
764 
765  /// Returns the immediate value as a pair of (imm, shift) if the immediate is
766  /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
767  /// immediate that can be shifted by 'Shift'.
768  template <unsigned Width>
769  Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
770  if (isShiftedImm() && Width == getShiftedImmShift())
771  if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
772  return std::make_pair(CE->getValue(), Width);
773 
774  if (isImm())
775  if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
776  int64_t Val = CE->getValue();
777  if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
778  return std::make_pair(Val >> Width, Width);
779  else
780  return std::make_pair(Val, 0u);
781  }
782 
783  return {};
784  }
785 
786  bool isAddSubImm() const {
787  if (!isShiftedImm() && !isImm())
788  return false;
789 
790  const MCExpr *Expr;
791 
792  // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
793  if (isShiftedImm()) {
794  unsigned Shift = ShiftedImm.ShiftAmount;
795  Expr = ShiftedImm.Val;
796  if (Shift != 0 && Shift != 12)
797  return false;
798  } else {
799  Expr = getImm();
800  }
801 
802  AArch64MCExpr::VariantKind ELFRefKind;
803  MCSymbolRefExpr::VariantKind DarwinRefKind;
804  int64_t Addend;
805  if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
806  DarwinRefKind, Addend)) {
807  return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
808  || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
809  || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
810  || ELFRefKind == AArch64MCExpr::VK_LO12
811  || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
812  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
813  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
814  || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
815  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
816  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
817  || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
818  || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
819  || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
820  }
821 
822  // If it's a constant, it should be a real immediate in range.
823  if (auto ShiftedVal = getShiftedVal<12>())
824  return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
825 
826  // If it's an expression, we hope for the best and let the fixup/relocation
827  // code deal with it.
828  return true;
829  }
830 
831  bool isAddSubImmNeg() const {
832  if (!isShiftedImm() && !isImm())
833  return false;
834 
835  // Otherwise it should be a real negative immediate in range.
836  if (auto ShiftedVal = getShiftedVal<12>())
837  return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
838 
839  return false;
840  }
841 
842  // Signed value in the range -128 to +127. For element widths of
843  // 16 bits or higher it may also be a signed multiple of 256 in the
844  // range -32768 to +32512.
845  // For element-width of 8 bits a range of -128 to 255 is accepted,
846  // since a copy of a byte can be either signed/unsigned.
847  template <typename T>
849  if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
851 
852  bool IsByte =
853  std::is_same<int8_t, typename std::make_signed<T>::type>::value;
854  if (auto ShiftedImm = getShiftedVal<8>())
855  if (!(IsByte && ShiftedImm->second) &&
856  AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
857  << ShiftedImm->second))
859 
861  }
862 
863  // Unsigned value in the range 0 to 255. For element widths of
864  // 16 bits or higher it may also be a signed multiple of 256 in the
865  // range 0 to 65280.
866  template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
867  if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
869 
870  bool IsByte =
871  std::is_same<int8_t, typename std::make_signed<T>::type>::value;
872  if (auto ShiftedImm = getShiftedVal<8>())
873  if (!(IsByte && ShiftedImm->second) &&
874  AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
875  << ShiftedImm->second))
877 
879  }
880 
881  template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
882  if (isLogicalImm<T>() && !isSVECpyImm<T>())
885  }
886 
887  bool isCondCode() const { return Kind == k_CondCode; }
888 
889  bool isSIMDImmType10() const {
890  if (!isImm())
891  return false;
892  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
893  if (!MCE)
894  return false;
896  }
897 
898  template<int N>
899  bool isBranchTarget() const {
900  if (!isImm())
901  return false;
902  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
903  if (!MCE)
904  return true;
905  int64_t Val = MCE->getValue();
906  if (Val & 0x3)
907  return false;
908  assert(N > 0 && "Branch target immediate cannot be 0 bits!");
909  return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
910  }
911 
912  bool
913  isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
914  if (!isImm())
915  return false;
916 
917  AArch64MCExpr::VariantKind ELFRefKind;
918  MCSymbolRefExpr::VariantKind DarwinRefKind;
919  int64_t Addend;
920  if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
921  DarwinRefKind, Addend)) {
922  return false;
923  }
924  if (DarwinRefKind != MCSymbolRefExpr::VK_None)
925  return false;
926 
927  for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
928  if (ELFRefKind == AllowedModifiers[i])
929  return true;
930  }
931 
932  return false;
933  }
934 
935  bool isMovZSymbolG3() const {
936  return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
937  }
938 
939  bool isMovZSymbolG2() const {
943  }
944 
945  bool isMovZSymbolG1() const {
946  return isMovWSymbol({
950  });
951  }
952 
953  bool isMovZSymbolG0() const {
957  }
958 
959  bool isMovKSymbolG3() const {
960  return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
961  }
962 
963  bool isMovKSymbolG2() const {
964  return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
965  }
966 
967  bool isMovKSymbolG1() const {
968  return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
971  }
972 
973  bool isMovKSymbolG0() const {
974  return isMovWSymbol(
977  }
978 
979  template<int RegWidth, int Shift>
980  bool isMOVZMovAlias() const {
981  if (!isImm()) return false;
982 
983  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
984  if (!CE) return false;
985  uint64_t Value = CE->getValue();
986 
987  return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
988  }
989 
990  template<int RegWidth, int Shift>
991  bool isMOVNMovAlias() const {
992  if (!isImm()) return false;
993 
994  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
995  if (!CE) return false;
996  uint64_t Value = CE->getValue();
997 
998  return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
999  }
1000 
1001  bool isFPImm() const {
1002  return Kind == k_FPImm &&
1003  AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1004  }
1005 
1006  bool isBarrier() const { return Kind == k_Barrier; }
1007  bool isSysReg() const { return Kind == k_SysReg; }
1008 
1009  bool isMRSSystemRegister() const {
1010  if (!isSysReg()) return false;
1011 
1012  return SysReg.MRSReg != -1U;
1013  }
1014 
1015  bool isMSRSystemRegister() const {
1016  if (!isSysReg()) return false;
1017  return SysReg.MSRReg != -1U;
1018  }
1019 
1020  bool isSystemPStateFieldWithImm0_1() const {
1021  if (!isSysReg()) return false;
1022  return (SysReg.PStateField == AArch64PState::PAN ||
1023  SysReg.PStateField == AArch64PState::DIT ||
1024  SysReg.PStateField == AArch64PState::UAO ||
1025  SysReg.PStateField == AArch64PState::SSBS);
1026  }
1027 
1028  bool isSystemPStateFieldWithImm0_15() const {
1029  if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1030  return SysReg.PStateField != -1U;
1031  }
1032 
1033  bool isReg() const override {
1034  return Kind == k_Register;
1035  }
1036 
1037  bool isScalarReg() const {
1038  return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1039  }
1040 
1041  bool isNeonVectorReg() const {
1042  return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1043  }
1044 
1045  bool isNeonVectorRegLo() const {
1046  return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1047  AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1048  Reg.RegNum);
1049  }
1050 
1051  template <unsigned Class> bool isSVEVectorReg() const {
1052  RegKind RK;
1053  switch (Class) {
1054  case AArch64::ZPRRegClassID:
1055  case AArch64::ZPR_3bRegClassID:
1056  case AArch64::ZPR_4bRegClassID:
1057  RK = RegKind::SVEDataVector;
1058  break;
1059  case AArch64::PPRRegClassID:
1060  case AArch64::PPR_3bRegClassID:
1061  RK = RegKind::SVEPredicateVector;
1062  break;
1063  default:
1064  llvm_unreachable("Unsupport register class");
1065  }
1066 
1067  return (Kind == k_Register && Reg.Kind == RK) &&
1068  AArch64MCRegisterClasses[Class].contains(getReg());
1069  }
1070 
1071  template <unsigned Class> bool isFPRasZPR() const {
1072  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1073  AArch64MCRegisterClasses[Class].contains(getReg());
1074  }
1075 
1076  template <int ElementWidth, unsigned Class>
1077  DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1078  if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1080 
1081  if (isSVEVectorReg<Class>() &&
1082  (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
1084 
1086  }
1087 
1088  template <int ElementWidth, unsigned Class>
1089  DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1090  if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1092 
1093  if (isSVEVectorReg<Class>() &&
1094  (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
1096 
1098  }
1099 
1100  template <int ElementWidth, unsigned Class,
1101  AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1102  bool ShiftWidthAlwaysSame>
1103  DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1104  auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1105  if (!VectorMatch.isMatch())
1107 
1108  // Give a more specific diagnostic when the user has explicitly typed in
1109  // a shift-amount that does not match what is expected, but for which
1110  // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1111  bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1112  if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1113  ShiftExtendTy == AArch64_AM::SXTW) &&
1114  !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1116 
1117  if (MatchShift && ShiftExtendTy == getShiftExtendType())
1119 
1121  }
1122 
1123  bool isGPR32as64() const {
1124  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1125  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1126  }
1127 
1128  bool isGPR64as32() const {
1129  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1130  AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1131  }
1132 
1133  bool isWSeqPair() const {
1134  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1135  AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1136  Reg.RegNum);
1137  }
1138 
1139  bool isXSeqPair() const {
1140  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1141  AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1142  Reg.RegNum);
1143  }
1144 
1145  template<int64_t Angle, int64_t Remainder>
1146  DiagnosticPredicate isComplexRotation() const {
1147  if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1148 
1149  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1150  if (!CE) return DiagnosticPredicateTy::NoMatch;
1151  uint64_t Value = CE->getValue();
1152 
1153  if (Value % Angle == Remainder && Value <= 270)
1156  }
1157 
1158  template <unsigned RegClassID> bool isGPR64() const {
1159  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1160  AArch64MCRegisterClasses[RegClassID].contains(getReg());
1161  }
1162 
1163  template <unsigned RegClassID, int ExtWidth>
1164  DiagnosticPredicate isGPR64WithShiftExtend() const {
1165  if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1167 
1168  if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1169  getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1172  }
1173 
1174  /// Is this a vector list with the type implicit (presumably attached to the
1175  /// instruction itself)?
1176  template <RegKind VectorKind, unsigned NumRegs>
1177  bool isImplicitlyTypedVectorList() const {
1178  return Kind == k_VectorList && VectorList.Count == NumRegs &&
1179  VectorList.NumElements == 0 &&
1180  VectorList.RegisterKind == VectorKind;
1181  }
1182 
1183  template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1184  unsigned ElementWidth>
1185  bool isTypedVectorList() const {
1186  if (Kind != k_VectorList)
1187  return false;
1188  if (VectorList.Count != NumRegs)
1189  return false;
1190  if (VectorList.RegisterKind != VectorKind)
1191  return false;
1192  if (VectorList.ElementWidth != ElementWidth)
1193  return false;
1194  return VectorList.NumElements == NumElements;
1195  }
1196 
1197  template <int Min, int Max>
1198  DiagnosticPredicate isVectorIndex() const {
1199  if (Kind != k_VectorIndex)
1201  if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1204  }
1205 
1206  bool isToken() const override { return Kind == k_Token; }
1207 
1208  bool isTokenEqual(StringRef Str) const {
1209  return Kind == k_Token && getToken() == Str;
1210  }
1211  bool isSysCR() const { return Kind == k_SysCR; }
1212  bool isPrefetch() const { return Kind == k_Prefetch; }
1213  bool isPSBHint() const { return Kind == k_PSBHint; }
1214  bool isBTIHint() const { return Kind == k_BTIHint; }
1215  bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1216  bool isShifter() const {
1217  if (!isShiftExtend())
1218  return false;
1219 
1220  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1221  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1222  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1223  ST == AArch64_AM::MSL);
1224  }
1225 
1226  template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1227  if (Kind != k_FPImm)
1229 
1230  if (getFPImmIsExact()) {
1231  // Lookup the immediate from table of supported immediates.
1232  auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1233  assert(Desc && "Unknown enum value");
1234 
1235  // Calculate its FP value.
1236  APFloat RealVal(APFloat::IEEEdouble());
1237  if (RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero) !=
1238  APFloat::opOK)
1239  llvm_unreachable("FP immediate is not exact");
1240 
1241  if (getFPImm().bitwiseIsEqual(RealVal))
1243  }
1244 
1246  }
1247 
1248  template <unsigned ImmA, unsigned ImmB>
1249  DiagnosticPredicate isExactFPImm() const {
1251  if ((Res = isExactFPImm<ImmA>()))
1253  if ((Res = isExactFPImm<ImmB>()))
1255  return Res;
1256  }
1257 
1258  bool isExtend() const {
1259  if (!isShiftExtend())
1260  return false;
1261 
1262  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1263  return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1264  ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1265  ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1266  ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1267  ET == AArch64_AM::LSL) &&
1268  getShiftExtendAmount() <= 4;
1269  }
1270 
1271  bool isExtend64() const {
1272  if (!isExtend())
1273  return false;
1274  // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
1275  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1276  return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
1277  }
1278 
1279  bool isExtendLSL64() const {
1280  if (!isExtend())
1281  return false;
1282  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1283  return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1284  ET == AArch64_AM::LSL) &&
1285  getShiftExtendAmount() <= 4;
1286  }
1287 
1288  template<int Width> bool isMemXExtend() const {
1289  if (!isExtend())
1290  return false;
1291  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1292  return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1293  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1294  getShiftExtendAmount() == 0);
1295  }
1296 
1297  template<int Width> bool isMemWExtend() const {
1298  if (!isExtend())
1299  return false;
1300  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1301  return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1302  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1303  getShiftExtendAmount() == 0);
1304  }
1305 
1306  template <unsigned width>
1307  bool isArithmeticShifter() const {
1308  if (!isShifter())
1309  return false;
1310 
1311  // An arithmetic shifter is LSL, LSR, or ASR.
1312  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1313  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1314  ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1315  }
1316 
1317  template <unsigned width>
1318  bool isLogicalShifter() const {
1319  if (!isShifter())
1320  return false;
1321 
1322  // A logical shifter is LSL, LSR, ASR or ROR.
1323  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1324  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1325  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1326  getShiftExtendAmount() < width;
1327  }
1328 
1329  bool isMovImm32Shifter() const {
1330  if (!isShifter())
1331  return false;
1332 
1333  // A MOVi shifter is LSL of 0, 16, 32, or 48.
1334  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1335  if (ST != AArch64_AM::LSL)
1336  return false;
1337  uint64_t Val = getShiftExtendAmount();
1338  return (Val == 0 || Val == 16);
1339  }
1340 
1341  bool isMovImm64Shifter() const {
1342  if (!isShifter())
1343  return false;
1344 
1345  // A MOVi shifter is LSL of 0 or 16.
1346  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1347  if (ST != AArch64_AM::LSL)
1348  return false;
1349  uint64_t Val = getShiftExtendAmount();
1350  return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1351  }
1352 
1353  bool isLogicalVecShifter() const {
1354  if (!isShifter())
1355  return false;
1356 
1357  // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1358  unsigned Shift = getShiftExtendAmount();
1359  return getShiftExtendType() == AArch64_AM::LSL &&
1360  (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1361  }
1362 
1363  bool isLogicalVecHalfWordShifter() const {
1364  if (!isLogicalVecShifter())
1365  return false;
1366 
1367  // A logical vector shifter is a left shift by 0 or 8.
1368  unsigned Shift = getShiftExtendAmount();
1369  return getShiftExtendType() == AArch64_AM::LSL &&
1370  (Shift == 0 || Shift == 8);
1371  }
1372 
1373  bool isMoveVecShifter() const {
1374  if (!isShiftExtend())
1375  return false;
1376 
1377  // A logical vector shifter is a left shift by 8 or 16.
1378  unsigned Shift = getShiftExtendAmount();
1379  return getShiftExtendType() == AArch64_AM::MSL &&
1380  (Shift == 8 || Shift == 16);
1381  }
1382 
1383  // Fallback unscaled operands are for aliases of LDR/STR that fall back
1384  // to LDUR/STUR when the offset is not legal for the former but is for
1385  // the latter. As such, in addition to checking for being a legal unscaled
1386  // address, also check that it is not a legal scaled address. This avoids
1387  // ambiguity in the matcher.
1388  template<int Width>
1389  bool isSImm9OffsetFB() const {
1390  return isSImm<9>() && !isUImm12Offset<Width / 8>();
1391  }
1392 
1393  bool isAdrpLabel() const {
1394  // Validation was handled during parsing, so we just sanity check that
1395  // something didn't go haywire.
1396  if (!isImm())
1397  return false;
1398 
1399  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1400  int64_t Val = CE->getValue();
1401  int64_t Min = - (4096 * (1LL << (21 - 1)));
1402  int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1403  return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1404  }
1405 
1406  return true;
1407  }
1408 
1409  bool isAdrLabel() const {
1410  // Validation was handled during parsing, so we just sanity check that
1411  // something didn't go haywire.
1412  if (!isImm())
1413  return false;
1414 
1415  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1416  int64_t Val = CE->getValue();
1417  int64_t Min = - (1LL << (21 - 1));
1418  int64_t Max = ((1LL << (21 - 1)) - 1);
1419  return Val >= Min && Val <= Max;
1420  }
1421 
1422  return true;
1423  }
1424 
1425  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1426  // Add as immediates when possible. Null MCExpr = 0.
1427  if (!Expr)
1429  else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1430  Inst.addOperand(MCOperand::createImm(CE->getValue()));
1431  else
1432  Inst.addOperand(MCOperand::createExpr(Expr));
1433  }
1434 
1435  void addRegOperands(MCInst &Inst, unsigned N) const {
1436  assert(N == 1 && "Invalid number of operands!");
1438  }
1439 
1440  void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1441  assert(N == 1 && "Invalid number of operands!");
1442  assert(
1443  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1444 
1445  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1446  uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1447  RI->getEncodingValue(getReg()));
1448 
1449  Inst.addOperand(MCOperand::createReg(Reg));
1450  }
1451 
1452  void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1453  assert(N == 1 && "Invalid number of operands!");
1454  assert(
1455  AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1456 
1457  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1458  uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1459  RI->getEncodingValue(getReg()));
1460 
1461  Inst.addOperand(MCOperand::createReg(Reg));
1462  }
1463 
1464  template <int Width>
1465  void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1466  unsigned Base;
1467  switch (Width) {
1468  case 8: Base = AArch64::B0; break;
1469  case 16: Base = AArch64::H0; break;
1470  case 32: Base = AArch64::S0; break;
1471  case 64: Base = AArch64::D0; break;
1472  case 128: Base = AArch64::Q0; break;
1473  default:
1474  llvm_unreachable("Unsupported width");
1475  }
1476  Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1477  }
1478 
1479  void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1480  assert(N == 1 && "Invalid number of operands!");
1481  assert(
1482  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1483  Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1484  }
1485 
1486  void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1487  assert(N == 1 && "Invalid number of operands!");
1488  assert(
1489  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1491  }
1492 
1493  void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1494  assert(N == 1 && "Invalid number of operands!");
1496  }
1497 
1498  enum VecListIndexType {
1499  VecListIdx_DReg = 0,
1500  VecListIdx_QReg = 1,
1501  VecListIdx_ZReg = 2,
1502  };
1503 
1504  template <VecListIndexType RegTy, unsigned NumRegs>
1505  void addVectorListOperands(MCInst &Inst, unsigned N) const {
1506  assert(N == 1 && "Invalid number of operands!");
1507  static const unsigned FirstRegs[][5] = {
1508  /* DReg */ { AArch64::Q0,
1509  AArch64::D0, AArch64::D0_D1,
1510  AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1511  /* QReg */ { AArch64::Q0,
1512  AArch64::Q0, AArch64::Q0_Q1,
1513  AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1514  /* ZReg */ { AArch64::Z0,
1515  AArch64::Z0, AArch64::Z0_Z1,
1516  AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1517  };
1518 
1519  assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1520  " NumRegs must be <= 4 for ZRegs");
1521 
1522  unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1523  Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1524  FirstRegs[(unsigned)RegTy][0]));
1525  }
1526 
1527  void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1528  assert(N == 1 && "Invalid number of operands!");
1529  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1530  }
1531 
1532  template <unsigned ImmIs0, unsigned ImmIs1>
1533  void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1534  assert(N == 1 && "Invalid number of operands!");
1535  assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1536  Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1537  }
1538 
1539  void addImmOperands(MCInst &Inst, unsigned N) const {
1540  assert(N == 1 && "Invalid number of operands!");
1541  // If this is a pageoff symrefexpr with an addend, adjust the addend
1542  // to be only the page-offset portion. Otherwise, just add the expr
1543  // as-is.
1544  addExpr(Inst, getImm());
1545  }
1546 
1547  template <int Shift>
1548  void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1549  assert(N == 2 && "Invalid number of operands!");
1550  if (auto ShiftedVal = getShiftedVal<Shift>()) {
1551  Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1552  Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1553  } else if (isShiftedImm()) {
1554  addExpr(Inst, getShiftedImmVal());
1555  Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1556  } else {
1557  addExpr(Inst, getImm());
1559  }
1560  }
1561 
1562  template <int Shift>
1563  void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1564  assert(N == 2 && "Invalid number of operands!");
1565  if (auto ShiftedVal = getShiftedVal<Shift>()) {
1566  Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1567  Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1568  } else
1569  llvm_unreachable("Not a shifted negative immediate");
1570  }
1571 
1572  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1573  assert(N == 1 && "Invalid number of operands!");
1575  }
1576 
1577  void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1578  assert(N == 1 && "Invalid number of operands!");
1579  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1580  if (!MCE)
1581  addExpr(Inst, getImm());
1582  else
1583  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1584  }
1585 
1586  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1587  addImmOperands(Inst, N);
1588  }
1589 
1590  template<int Scale>
1591  void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1592  assert(N == 1 && "Invalid number of operands!");
1593  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1594 
1595  if (!MCE) {
1596  Inst.addOperand(MCOperand::createExpr(getImm()));
1597  return;
1598  }
1599  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1600  }
1601 
1602  void addUImm6Operands(MCInst &Inst, unsigned N) const {
1603  assert(N == 1 && "Invalid number of operands!");
1604  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1606  }
1607 
1608  template <int Scale>
1609  void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1610  assert(N == 1 && "Invalid number of operands!");
1611  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1612  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1613  }
1614 
1615  template <typename T>
1616  void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1617  assert(N == 1 && "Invalid number of operands!");
1618  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1619  typename std::make_unsigned<T>::type Val = MCE->getValue();
1620  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1621  Inst.addOperand(MCOperand::createImm(encoding));
1622  }
1623 
1624  template <typename T>
1625  void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1626  assert(N == 1 && "Invalid number of operands!");
1627  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1628  typename std::make_unsigned<T>::type Val = ~MCE->getValue();
1629  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1630  Inst.addOperand(MCOperand::createImm(encoding));
1631  }
1632 
1633  void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1634  assert(N == 1 && "Invalid number of operands!");
1635  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1636  uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1637  Inst.addOperand(MCOperand::createImm(encoding));
1638  }
1639 
1640  void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1641  // Branch operands don't encode the low bits, so shift them off
1642  // here. If it's a label, however, just put it on directly as there's
1643  // not enough information now to do anything.
1644  assert(N == 1 && "Invalid number of operands!");
1645  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1646  if (!MCE) {
1647  addExpr(Inst, getImm());
1648  return;
1649  }
1650  assert(MCE && "Invalid constant immediate operand!");
1651  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1652  }
1653 
1654  void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1655  // Branch operands don't encode the low bits, so shift them off
1656  // here. If it's a label, however, just put it on directly as there's
1657  // not enough information now to do anything.
1658  assert(N == 1 && "Invalid number of operands!");
1659  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1660  if (!MCE) {
1661  addExpr(Inst, getImm());
1662  return;
1663  }
1664  assert(MCE && "Invalid constant immediate operand!");
1665  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1666  }
1667 
1668  void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1669  // Branch operands don't encode the low bits, so shift them off
1670  // here. If it's a label, however, just put it on directly as there's
1671  // not enough information now to do anything.
1672  assert(N == 1 && "Invalid number of operands!");
1673  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1674  if (!MCE) {
1675  addExpr(Inst, getImm());
1676  return;
1677  }
1678  assert(MCE && "Invalid constant immediate operand!");
1679  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1680  }
1681 
1682  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1683  assert(N == 1 && "Invalid number of operands!");
1685  AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1686  }
1687 
1688  void addBarrierOperands(MCInst &Inst, unsigned N) const {
1689  assert(N == 1 && "Invalid number of operands!");
1690  Inst.addOperand(MCOperand::createImm(getBarrier()));
1691  }
1692 
1693  void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1694  assert(N == 1 && "Invalid number of operands!");
1695 
1696  Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1697  }
1698 
1699  void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1700  assert(N == 1 && "Invalid number of operands!");
1701 
1702  Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1703  }
1704 
1705  void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1706  assert(N == 1 && "Invalid number of operands!");
1707 
1708  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1709  }
1710 
1711  void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1712  assert(N == 1 && "Invalid number of operands!");
1713 
1714  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1715  }
1716 
1717  void addSysCROperands(MCInst &Inst, unsigned N) const {
1718  assert(N == 1 && "Invalid number of operands!");
1719  Inst.addOperand(MCOperand::createImm(getSysCR()));
1720  }
1721 
1722  void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1723  assert(N == 1 && "Invalid number of operands!");
1724  Inst.addOperand(MCOperand::createImm(getPrefetch()));
1725  }
1726 
1727  void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1728  assert(N == 1 && "Invalid number of operands!");
1729  Inst.addOperand(MCOperand::createImm(getPSBHint()));
1730  }
1731 
1732  void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1733  assert(N == 1 && "Invalid number of operands!");
1734  Inst.addOperand(MCOperand::createImm(getBTIHint()));
1735  }
1736 
1737  void addShifterOperands(MCInst &Inst, unsigned N) const {
1738  assert(N == 1 && "Invalid number of operands!");
1739  unsigned Imm =
1740  AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1741  Inst.addOperand(MCOperand::createImm(Imm));
1742  }
1743 
1744  void addExtendOperands(MCInst &Inst, unsigned N) const {
1745  assert(N == 1 && "Invalid number of operands!");
1746  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1747  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1748  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1749  Inst.addOperand(MCOperand::createImm(Imm));
1750  }
1751 
1752  void addExtend64Operands(MCInst &Inst, unsigned N) const {
1753  assert(N == 1 && "Invalid number of operands!");
1754  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1755  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1756  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1757  Inst.addOperand(MCOperand::createImm(Imm));
1758  }
1759 
1760  void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1761  assert(N == 2 && "Invalid number of operands!");
1762  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1763  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1764  Inst.addOperand(MCOperand::createImm(IsSigned));
1765  Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1766  }
1767 
1768  // For 8-bit load/store instructions with a register offset, both the
1769  // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1770  // they're disambiguated by whether the shift was explicit or implicit rather
1771  // than its size.
1772  void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1773  assert(N == 2 && "Invalid number of operands!");
1774  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1775  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1776  Inst.addOperand(MCOperand::createImm(IsSigned));
1777  Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1778  }
1779 
1780  template<int Shift>
1781  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1782  assert(N == 1 && "Invalid number of operands!");
1783 
1784  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1785  uint64_t Value = CE->getValue();
1786  Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1787  }
1788 
1789  template<int Shift>
1790  void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1791  assert(N == 1 && "Invalid number of operands!");
1792 
1793  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1794  uint64_t Value = CE->getValue();
1795  Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1796  }
1797 
1798  void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1799  assert(N == 1 && "Invalid number of operands!");
1800  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1801  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1802  }
1803 
1804  void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1805  assert(N == 1 && "Invalid number of operands!");
1806  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1807  Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1808  }
1809 
1810  void print(raw_ostream &OS) const override;
1811 
1812  static std::unique_ptr<AArch64Operand>
1813  CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1814  auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1815  Op->Tok.Data = Str.data();
1816  Op->Tok.Length = Str.size();
1817  Op->Tok.IsSuffix = IsSuffix;
1818  Op->StartLoc = S;
1819  Op->EndLoc = S;
1820  return Op;
1821  }
1822 
1823  static std::unique_ptr<AArch64Operand>
1824  CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1825  RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1827  unsigned ShiftAmount = 0,
1828  unsigned HasExplicitAmount = false) {
1829  auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1830  Op->Reg.RegNum = RegNum;
1831  Op->Reg.Kind = Kind;
1832  Op->Reg.ElementWidth = 0;
1833  Op->Reg.EqualityTy = EqTy;
1834  Op->Reg.ShiftExtend.Type = ExtTy;
1835  Op->Reg.ShiftExtend.Amount = ShiftAmount;
1836  Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1837  Op->StartLoc = S;
1838  Op->EndLoc = E;
1839  return Op;
1840  }
1841 
1842  static std::unique_ptr<AArch64Operand>
1843  CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1844  SMLoc S, SMLoc E, MCContext &Ctx,
1846  unsigned ShiftAmount = 0,
1847  unsigned HasExplicitAmount = false) {
1848  assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
1849  Kind == RegKind::SVEPredicateVector) &&
1850  "Invalid vector kind");
1851  auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1852  HasExplicitAmount);
1853  Op->Reg.ElementWidth = ElementWidth;
1854  return Op;
1855  }
1856 
1857  static std::unique_ptr<AArch64Operand>
1858  CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1859  unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1860  MCContext &Ctx) {
1861  auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1862  Op->VectorList.RegNum = RegNum;
1863  Op->VectorList.Count = Count;
1864  Op->VectorList.NumElements = NumElements;
1865  Op->VectorList.ElementWidth = ElementWidth;
1866  Op->VectorList.RegisterKind = RegisterKind;
1867  Op->StartLoc = S;
1868  Op->EndLoc = E;
1869  return Op;
1870  }
1871 
1872  static std::unique_ptr<AArch64Operand>
1873  CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1874  auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1875  Op->VectorIndex.Val = Idx;
1876  Op->StartLoc = S;
1877  Op->EndLoc = E;
1878  return Op;
1879  }
1880 
1881  static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1882  SMLoc E, MCContext &Ctx) {
1883  auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1884  Op->Imm.Val = Val;
1885  Op->StartLoc = S;
1886  Op->EndLoc = E;
1887  return Op;
1888  }
1889 
1890  static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1891  unsigned ShiftAmount,
1892  SMLoc S, SMLoc E,
1893  MCContext &Ctx) {
1894  auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1895  Op->ShiftedImm .Val = Val;
1896  Op->ShiftedImm.ShiftAmount = ShiftAmount;
1897  Op->StartLoc = S;
1898  Op->EndLoc = E;
1899  return Op;
1900  }
1901 
1902  static std::unique_ptr<AArch64Operand>
1903  CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1904  auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1905  Op->CondCode.Code = Code;
1906  Op->StartLoc = S;
1907  Op->EndLoc = E;
1908  return Op;
1909  }
1910 
1911  static std::unique_ptr<AArch64Operand>
1912  CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1913  auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1914  Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1915  Op->FPImm.IsExact = IsExact;
1916  Op->StartLoc = S;
1917  Op->EndLoc = S;
1918  return Op;
1919  }
1920 
1921  static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1922  StringRef Str,
1923  SMLoc S,
1924  MCContext &Ctx) {
1925  auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1926  Op->Barrier.Val = Val;
1927  Op->Barrier.Data = Str.data();
1928  Op->Barrier.Length = Str.size();
1929  Op->StartLoc = S;
1930  Op->EndLoc = S;
1931  return Op;
1932  }
1933 
1934  static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1935  uint32_t MRSReg,
1936  uint32_t MSRReg,
1937  uint32_t PStateField,
1938  MCContext &Ctx) {
1939  auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1940  Op->SysReg.Data = Str.data();
1941  Op->SysReg.Length = Str.size();
1942  Op->SysReg.MRSReg = MRSReg;
1943  Op->SysReg.MSRReg = MSRReg;
1944  Op->SysReg.PStateField = PStateField;
1945  Op->StartLoc = S;
1946  Op->EndLoc = S;
1947  return Op;
1948  }
1949 
1950  static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1951  SMLoc E, MCContext &Ctx) {
1952  auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1953  Op->SysCRImm.Val = Val;
1954  Op->StartLoc = S;
1955  Op->EndLoc = E;
1956  return Op;
1957  }
1958 
1959  static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1960  StringRef Str,
1961  SMLoc S,
1962  MCContext &Ctx) {
1963  auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1964  Op->Prefetch.Val = Val;
1965  Op->Barrier.Data = Str.data();
1966  Op->Barrier.Length = Str.size();
1967  Op->StartLoc = S;
1968  Op->EndLoc = S;
1969  return Op;
1970  }
1971 
1972  static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1973  StringRef Str,
1974  SMLoc S,
1975  MCContext &Ctx) {
1976  auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1977  Op->PSBHint.Val = Val;
1978  Op->PSBHint.Data = Str.data();
1979  Op->PSBHint.Length = Str.size();
1980  Op->StartLoc = S;
1981  Op->EndLoc = S;
1982  return Op;
1983  }
1984 
1985  static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
1986  StringRef Str,
1987  SMLoc S,
1988  MCContext &Ctx) {
1989  auto Op = make_unique<AArch64Operand>(k_BTIHint, Ctx);
1990  Op->BTIHint.Val = Val << 1 | 32;
1991  Op->BTIHint.Data = Str.data();
1992  Op->BTIHint.Length = Str.size();
1993  Op->StartLoc = S;
1994  Op->EndLoc = S;
1995  return Op;
1996  }
1997 
1998  static std::unique_ptr<AArch64Operand>
1999  CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2000  bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2001  auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2002  Op->ShiftExtend.Type = ShOp;
2003  Op->ShiftExtend.Amount = Val;
2004  Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2005  Op->StartLoc = S;
2006  Op->EndLoc = E;
2007  return Op;
2008  }
2009 };
2010 
2011 } // end anonymous namespace.
2012 
2013 void AArch64Operand::print(raw_ostream &OS) const {
2014  switch (Kind) {
2015  case k_FPImm:
2016  OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2017  if (!getFPImmIsExact())
2018  OS << " (inexact)";
2019  OS << ">";
2020  break;
2021  case k_Barrier: {
2022  StringRef Name = getBarrierName();
2023  if (!Name.empty())
2024  OS << "<barrier " << Name << ">";
2025  else
2026  OS << "<barrier invalid #" << getBarrier() << ">";
2027  break;
2028  }
2029  case k_Immediate:
2030  OS << *getImm();
2031  break;
2032  case k_ShiftedImm: {
2033  unsigned Shift = getShiftedImmShift();
2034  OS << "<shiftedimm ";
2035  OS << *getShiftedImmVal();
2036  OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2037  break;
2038  }
2039  case k_CondCode:
2040  OS << "<condcode " << getCondCode() << ">";
2041  break;
2042  case k_VectorList: {
2043  OS << "<vectorlist ";
2044  unsigned Reg = getVectorListStart();
2045  for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2046  OS << Reg + i << " ";
2047  OS << ">";
2048  break;
2049  }
2050  case k_VectorIndex:
2051  OS << "<vectorindex " << getVectorIndex() << ">";
2052  break;
2053  case k_SysReg:
2054  OS << "<sysreg: " << getSysReg() << '>';
2055  break;
2056  case k_Token:
2057  OS << "'" << getToken() << "'";
2058  break;
2059  case k_SysCR:
2060  OS << "c" << getSysCR();
2061  break;
2062  case k_Prefetch: {
2063  StringRef Name = getPrefetchName();
2064  if (!Name.empty())
2065  OS << "<prfop " << Name << ">";
2066  else
2067  OS << "<prfop invalid #" << getPrefetch() << ">";
2068  break;
2069  }
2070  case k_PSBHint:
2071  OS << getPSBHintName();
2072  break;
2073  case k_Register:
2074  OS << "<register " << getReg() << ">";
2075  if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2076  break;
2078  case k_BTIHint:
2079  OS << getBTIHintName();
2080  break;
2081  case k_ShiftExtend:
2082  OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2083  << getShiftExtendAmount();
2084  if (!hasShiftExtendAmount())
2085  OS << "<imp>";
2086  OS << '>';
2087  break;
2088  }
2089 }
2090 
2091 /// @name Auto-generated Match Functions
2092 /// {
2093 
2094 static unsigned MatchRegisterName(StringRef Name);
2095 
2096 /// }
2097 
2098 static unsigned MatchNeonVectorRegName(StringRef Name) {
2099  return StringSwitch<unsigned>(Name.lower())
2100  .Case("v0", AArch64::Q0)
2101  .Case("v1", AArch64::Q1)
2102  .Case("v2", AArch64::Q2)
2103  .Case("v3", AArch64::Q3)
2104  .Case("v4", AArch64::Q4)
2105  .Case("v5", AArch64::Q5)
2106  .Case("v6", AArch64::Q6)
2107  .Case("v7", AArch64::Q7)
2108  .Case("v8", AArch64::Q8)
2109  .Case("v9", AArch64::Q9)
2110  .Case("v10", AArch64::Q10)
2111  .Case("v11", AArch64::Q11)
2112  .Case("v12", AArch64::Q12)
2113  .Case("v13", AArch64::Q13)
2114  .Case("v14", AArch64::Q14)
2115  .Case("v15", AArch64::Q15)
2116  .Case("v16", AArch64::Q16)
2117  .Case("v17", AArch64::Q17)
2118  .Case("v18", AArch64::Q18)
2119  .Case("v19", AArch64::Q19)
2120  .Case("v20", AArch64::Q20)
2121  .Case("v21", AArch64::Q21)
2122  .Case("v22", AArch64::Q22)
2123  .Case("v23", AArch64::Q23)
2124  .Case("v24", AArch64::Q24)
2125  .Case("v25", AArch64::Q25)
2126  .Case("v26", AArch64::Q26)
2127  .Case("v27", AArch64::Q27)
2128  .Case("v28", AArch64::Q28)
2129  .Case("v29", AArch64::Q29)
2130  .Case("v30", AArch64::Q30)
2131  .Case("v31", AArch64::Q31)
2132  .Default(0);
2133 }
2134 
2135 /// Returns an optional pair of (#elements, element-width) if Suffix
2136 /// is a valid vector kind. Where the number of elements in a vector
2137 /// or the vector width is implicit or explicitly unknown (but still a
2138 /// valid suffix kind), 0 is used.
2139 static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2140  RegKind VectorKind) {
2141  std::pair<int, int> Res = {-1, -1};
2142 
2143  switch (VectorKind) {
2144  case RegKind::NeonVector:
2145  Res =
2146  StringSwitch<std::pair<int, int>>(Suffix.lower())
2147  .Case("", {0, 0})
2148  .Case(".1d", {1, 64})
2149  .Case(".1q", {1, 128})
2150  // '.2h' needed for fp16 scalar pairwise reductions
2151  .Case(".2h", {2, 16})
2152  .Case(".2s", {2, 32})
2153  .Case(".2d", {2, 64})
2154  // '.4b' is another special case for the ARMv8.2a dot product
2155  // operand
2156  .Case(".4b", {4, 8})
2157  .Case(".4h", {4, 16})
2158  .Case(".4s", {4, 32})
2159  .Case(".8b", {8, 8})
2160  .Case(".8h", {8, 16})
2161  .Case(".16b", {16, 8})
2162  // Accept the width neutral ones, too, for verbose syntax. If those
2163  // aren't used in the right places, the token operand won't match so
2164  // all will work out.
2165  .Case(".b", {0, 8})
2166  .Case(".h", {0, 16})
2167  .Case(".s", {0, 32})
2168  .Case(".d", {0, 64})
2169  .Default({-1, -1});
2170  break;
2171  case RegKind::SVEPredicateVector:
2172  case RegKind::SVEDataVector:
2173  Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2174  .Case("", {0, 0})
2175  .Case(".b", {0, 8})
2176  .Case(".h", {0, 16})
2177  .Case(".s", {0, 32})
2178  .Case(".d", {0, 64})
2179  .Case(".q", {0, 128})
2180  .Default({-1, -1});
2181  break;
2182  default:
2183  llvm_unreachable("Unsupported RegKind");
2184  }
2185 
2186  if (Res == std::make_pair(-1, -1))
2187  return Optional<std::pair<int, int>>();
2188 
2189  return Optional<std::pair<int, int>>(Res);
2190 }
2191 
2192 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2193  return parseVectorKind(Suffix, VectorKind).hasValue();
2194 }
2195 
2196 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2197  return StringSwitch<unsigned>(Name.lower())
2198  .Case("z0", AArch64::Z0)
2199  .Case("z1", AArch64::Z1)
2200  .Case("z2", AArch64::Z2)
2201  .Case("z3", AArch64::Z3)
2202  .Case("z4", AArch64::Z4)
2203  .Case("z5", AArch64::Z5)
2204  .Case("z6", AArch64::Z6)
2205  .Case("z7", AArch64::Z7)
2206  .Case("z8", AArch64::Z8)
2207  .Case("z9", AArch64::Z9)
2208  .Case("z10", AArch64::Z10)
2209  .Case("z11", AArch64::Z11)
2210  .Case("z12", AArch64::Z12)
2211  .Case("z13", AArch64::Z13)
2212  .Case("z14", AArch64::Z14)
2213  .Case("z15", AArch64::Z15)
2214  .Case("z16", AArch64::Z16)
2215  .Case("z17", AArch64::Z17)
2216  .Case("z18", AArch64::Z18)
2217  .Case("z19", AArch64::Z19)
2218  .Case("z20", AArch64::Z20)
2219  .Case("z21", AArch64::Z21)
2220  .Case("z22", AArch64::Z22)
2221  .Case("z23", AArch64::Z23)
2222  .Case("z24", AArch64::Z24)
2223  .Case("z25", AArch64::Z25)
2224  .Case("z26", AArch64::Z26)
2225  .Case("z27", AArch64::Z27)
2226  .Case("z28", AArch64::Z28)
2227  .Case("z29", AArch64::Z29)
2228  .Case("z30", AArch64::Z30)
2229  .Case("z31", AArch64::Z31)
2230  .Default(0);
2231 }
2232 
2233 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2234  return StringSwitch<unsigned>(Name.lower())
2235  .Case("p0", AArch64::P0)
2236  .Case("p1", AArch64::P1)
2237  .Case("p2", AArch64::P2)
2238  .Case("p3", AArch64::P3)
2239  .Case("p4", AArch64::P4)
2240  .Case("p5", AArch64::P5)
2241  .Case("p6", AArch64::P6)
2242  .Case("p7", AArch64::P7)
2243  .Case("p8", AArch64::P8)
2244  .Case("p9", AArch64::P9)
2245  .Case("p10", AArch64::P10)
2246  .Case("p11", AArch64::P11)
2247  .Case("p12", AArch64::P12)
2248  .Case("p13", AArch64::P13)
2249  .Case("p14", AArch64::P14)
2250  .Case("p15", AArch64::P15)
2251  .Default(0);
2252 }
2253 
2254 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2255  SMLoc &EndLoc) {
2256  StartLoc = getLoc();
2257  auto Res = tryParseScalarRegister(RegNo);
2258  EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2259  return Res != MatchOperand_Success;
2260 }
2261 
2262 // Matches a register name or register alias previously defined by '.req'
2263 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2264  RegKind Kind) {
2265  unsigned RegNum = 0;
2266  if ((RegNum = matchSVEDataVectorRegName(Name)))
2267  return Kind == RegKind::SVEDataVector ? RegNum : 0;
2268 
2269  if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2270  return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2271 
2272  if ((RegNum = MatchNeonVectorRegName(Name)))
2273  return Kind == RegKind::NeonVector ? RegNum : 0;
2274 
2275  // The parsed register must be of RegKind Scalar
2276  if ((RegNum = MatchRegisterName(Name)))
2277  return Kind == RegKind::Scalar ? RegNum : 0;
2278 
2279  if (!RegNum) {
2280  // Handle a few common aliases of registers.
2281  if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2282  .Case("fp", AArch64::FP)
2283  .Case("lr", AArch64::LR)
2284  .Case("x31", AArch64::XZR)
2285  .Case("w31", AArch64::WZR)
2286  .Default(0))
2287  return Kind == RegKind::Scalar ? RegNum : 0;
2288 
2289  // Check for aliases registered via .req. Canonicalize to lower case.
2290  // That's more consistent since register names are case insensitive, and
2291  // it's how the original entry was passed in from MC/MCParser/AsmParser.
2292  auto Entry = RegisterReqs.find(Name.lower());
2293  if (Entry == RegisterReqs.end())
2294  return 0;
2295 
2296  // set RegNum if the match is the right kind of register
2297  if (Kind == Entry->getValue().first)
2298  RegNum = Entry->getValue().second;
2299  }
2300  return RegNum;
2301 }
2302 
2303 /// tryParseScalarRegister - Try to parse a register name. The token must be an
2304 /// Identifier when called, and if it is a register name the token is eaten and
2305 /// the register is added to the operand list.
2307 AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2308  MCAsmParser &Parser = getParser();
2309  const AsmToken &Tok = Parser.getTok();
2310  if (Tok.isNot(AsmToken::Identifier))
2311  return MatchOperand_NoMatch;
2312 
2313  std::string lowerCase = Tok.getString().lower();
2314  unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2315  if (Reg == 0)
2316  return MatchOperand_NoMatch;
2317 
2318  RegNum = Reg;
2319  Parser.Lex(); // Eat identifier token.
2320  return MatchOperand_Success;
2321 }
2322 
2323 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2325 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2326  MCAsmParser &Parser = getParser();
2327  SMLoc S = getLoc();
2328 
2329  if (Parser.getTok().isNot(AsmToken::Identifier)) {
2330  Error(S, "Expected cN operand where 0 <= N <= 15");
2331  return MatchOperand_ParseFail;
2332  }
2333 
2334  StringRef Tok = Parser.getTok().getIdentifier();
2335  if (Tok[0] != 'c' && Tok[0] != 'C') {
2336  Error(S, "Expected cN operand where 0 <= N <= 15");
2337  return MatchOperand_ParseFail;
2338  }
2339 
2340  uint32_t CRNum;
2341  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2342  if (BadNum || CRNum > 15) {
2343  Error(S, "Expected cN operand where 0 <= N <= 15");
2344  return MatchOperand_ParseFail;
2345  }
2346 
2347  Parser.Lex(); // Eat identifier token.
2348  Operands.push_back(
2349  AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2350  return MatchOperand_Success;
2351 }
2352 
2353 /// tryParsePrefetch - Try to parse a prefetch operand.
2354 template <bool IsSVEPrefetch>
2356 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2357  MCAsmParser &Parser = getParser();
2358  SMLoc S = getLoc();
2359  const AsmToken &Tok = Parser.getTok();
2360 
2361  auto LookupByName = [](StringRef N) {
2362  if (IsSVEPrefetch) {
2363  if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2364  return Optional<unsigned>(Res->Encoding);
2365  } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2366  return Optional<unsigned>(Res->Encoding);
2367  return Optional<unsigned>();
2368  };
2369 
2370  auto LookupByEncoding = [](unsigned E) {
2371  if (IsSVEPrefetch) {
2372  if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2373  return Optional<StringRef>(Res->Name);
2374  } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2375  return Optional<StringRef>(Res->Name);
2376  return Optional<StringRef>();
2377  };
2378  unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2379 
2380  // Either an identifier for named values or a 5-bit immediate.
2381  // Eat optional hash.
2382  if (parseOptionalToken(AsmToken::Hash) ||
2383  Tok.is(AsmToken::Integer)) {
2384  const MCExpr *ImmVal;
2385  if (getParser().parseExpression(ImmVal))
2386  return MatchOperand_ParseFail;
2387 
2388  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2389  if (!MCE) {
2390  TokError("immediate value expected for prefetch operand");
2391  return MatchOperand_ParseFail;
2392  }
2393  unsigned prfop = MCE->getValue();
2394  if (prfop > MaxVal) {
2395  TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2396  "] expected");
2397  return MatchOperand_ParseFail;
2398  }
2399 
2400  auto PRFM = LookupByEncoding(MCE->getValue());
2401  Operands.push_back(AArch64Operand::CreatePrefetch(
2402  prfop, PRFM.getValueOr(""), S, getContext()));
2403  return MatchOperand_Success;
2404  }
2405 
2406  if (Tok.isNot(AsmToken::Identifier)) {
2407  TokError("prefetch hint expected");
2408  return MatchOperand_ParseFail;
2409  }
2410 
2411  auto PRFM = LookupByName(Tok.getString());
2412  if (!PRFM) {
2413  TokError("prefetch hint expected");
2414  return MatchOperand_ParseFail;
2415  }
2416 
2417  Parser.Lex(); // Eat identifier token.
2418  Operands.push_back(AArch64Operand::CreatePrefetch(
2419  *PRFM, Tok.getString(), S, getContext()));
2420  return MatchOperand_Success;
2421 }
2422 
2423 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2425 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2426  MCAsmParser &Parser = getParser();
2427  SMLoc S = getLoc();
2428  const AsmToken &Tok = Parser.getTok();
2429  if (Tok.isNot(AsmToken::Identifier)) {
2430  TokError("invalid operand for instruction");
2431  return MatchOperand_ParseFail;
2432  }
2433 
2434  auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2435  if (!PSB) {
2436  TokError("invalid operand for instruction");
2437  return MatchOperand_ParseFail;
2438  }
2439 
2440  Parser.Lex(); // Eat identifier token.
2441  Operands.push_back(AArch64Operand::CreatePSBHint(
2442  PSB->Encoding, Tok.getString(), S, getContext()));
2443  return MatchOperand_Success;
2444 }
2445 
2446 /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2448 AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2449  MCAsmParser &Parser = getParser();
2450  SMLoc S = getLoc();
2451  const AsmToken &Tok = Parser.getTok();
2452  if (Tok.isNot(AsmToken::Identifier)) {
2453  TokError("invalid operand for instruction");
2454  return MatchOperand_ParseFail;
2455  }
2456 
2457  auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2458  if (!BTI) {
2459  TokError("invalid operand for instruction");
2460  return MatchOperand_ParseFail;
2461  }
2462 
2463  Parser.Lex(); // Eat identifier token.
2464  Operands.push_back(AArch64Operand::CreateBTIHint(
2465  BTI->Encoding, Tok.getString(), S, getContext()));
2466  return MatchOperand_Success;
2467 }
2468 
2469 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2470 /// instruction.
2472 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2473  MCAsmParser &Parser = getParser();
2474  SMLoc S = getLoc();
2475  const MCExpr *Expr;
2476 
2477  if (Parser.getTok().is(AsmToken::Hash)) {
2478  Parser.Lex(); // Eat hash token.
2479  }
2480 
2481  if (parseSymbolicImmVal(Expr))
2482  return MatchOperand_ParseFail;
2483 
2484  AArch64MCExpr::VariantKind ELFRefKind;
2485  MCSymbolRefExpr::VariantKind DarwinRefKind;
2486  int64_t Addend;
2487  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2488  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2489  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2490  // No modifier was specified at all; this is the syntax for an ELF basic
2491  // ADRP relocation (unfortunately).
2492  Expr =
2494  } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2495  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2496  Addend != 0) {
2497  Error(S, "gotpage label reference not allowed an addend");
2498  return MatchOperand_ParseFail;
2499  } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2500  DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2501  DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2502  ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2503  ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2504  ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2505  // The operand must be an @page or @gotpage qualified symbolref.
2506  Error(S, "page or gotpage label reference expected");
2507  return MatchOperand_ParseFail;
2508  }
2509  }
2510 
2511  // We have either a label reference possibly with addend or an immediate. The
2512  // addend is a raw value here. The linker will adjust it to only reference the
2513  // page.
2514  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2515  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2516 
2517  return MatchOperand_Success;
2518 }
2519 
2520 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2521 /// instruction.
2523 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2524  SMLoc S = getLoc();
2525  const MCExpr *Expr;
2526 
2527  // Leave anything with a bracket to the default for SVE
2528  if (getParser().getTok().is(AsmToken::LBrac))
2529  return MatchOperand_NoMatch;
2530 
2531  if (getParser().getTok().is(AsmToken::Hash))
2532  getParser().Lex(); // Eat hash token.
2533 
2534  if (parseSymbolicImmVal(Expr))
2535  return MatchOperand_ParseFail;
2536 
2537  AArch64MCExpr::VariantKind ELFRefKind;
2538  MCSymbolRefExpr::VariantKind DarwinRefKind;
2539  int64_t Addend;
2540  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2541  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2542  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2543  // No modifier was specified at all; this is the syntax for an ELF basic
2544  // ADR relocation (unfortunately).
2545  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2546  } else {
2547  Error(S, "unexpected adr label");
2548  return MatchOperand_ParseFail;
2549  }
2550  }
2551 
2552  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2553  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2554  return MatchOperand_Success;
2555 }
2556 
2557 /// tryParseFPImm - A floating point immediate expression operand.
2558 template<bool AddFPZeroAsLiteral>
2560 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2561  MCAsmParser &Parser = getParser();
2562  SMLoc S = getLoc();
2563 
2564  bool Hash = parseOptionalToken(AsmToken::Hash);
2565 
2566  // Handle negation, as that still comes through as a separate token.
2567  bool isNegative = parseOptionalToken(AsmToken::Minus);
2568 
2569  const AsmToken &Tok = Parser.getTok();
2570  if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2571  if (!Hash)
2572  return MatchOperand_NoMatch;
2573  TokError("invalid floating point immediate");
2574  return MatchOperand_ParseFail;
2575  }
2576 
2577  // Parse hexadecimal representation.
2578  if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2579  if (Tok.getIntVal() > 255 || isNegative) {
2580  TokError("encoded floating point value out of range");
2581  return MatchOperand_ParseFail;
2582  }
2583 
2584  APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2585  Operands.push_back(
2586  AArch64Operand::CreateFPImm(F, true, S, getContext()));
2587  } else {
2588  // Parse FP representation.
2589  APFloat RealVal(APFloat::IEEEdouble());
2590  auto Status =
2592  if (isNegative)
2593  RealVal.changeSign();
2594 
2595  if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2596  Operands.push_back(
2597  AArch64Operand::CreateToken("#0", false, S, getContext()));
2598  Operands.push_back(
2599  AArch64Operand::CreateToken(".0", false, S, getContext()));
2600  } else
2601  Operands.push_back(AArch64Operand::CreateFPImm(
2602  RealVal, Status == APFloat::opOK, S, getContext()));
2603  }
2604 
2605  Parser.Lex(); // Eat the token.
2606 
2607  return MatchOperand_Success;
2608 }
2609 
2610 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2611 /// a shift suffix, for example '#1, lsl #12'.
2613 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2614  MCAsmParser &Parser = getParser();
2615  SMLoc S = getLoc();
2616 
2617  if (Parser.getTok().is(AsmToken::Hash))
2618  Parser.Lex(); // Eat '#'
2619  else if (Parser.getTok().isNot(AsmToken::Integer))
2620  // Operand should start from # or should be integer, emit error otherwise.
2621  return MatchOperand_NoMatch;
2622 
2623  const MCExpr *Imm;
2624  if (parseSymbolicImmVal(Imm))
2625  return MatchOperand_ParseFail;
2626  else if (Parser.getTok().isNot(AsmToken::Comma)) {
2627  SMLoc E = Parser.getTok().getLoc();
2628  Operands.push_back(
2629  AArch64Operand::CreateImm(Imm, S, E, getContext()));
2630  return MatchOperand_Success;
2631  }
2632 
2633  // Eat ','
2634  Parser.Lex();
2635 
2636  // The optional operand must be "lsl #N" where N is non-negative.
2637  if (!Parser.getTok().is(AsmToken::Identifier) ||
2638  !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2639  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2640  return MatchOperand_ParseFail;
2641  }
2642 
2643  // Eat 'lsl'
2644  Parser.Lex();
2645 
2646  parseOptionalToken(AsmToken::Hash);
2647 
2648  if (Parser.getTok().isNot(AsmToken::Integer)) {
2649  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2650  return MatchOperand_ParseFail;
2651  }
2652 
2653  int64_t ShiftAmount = Parser.getTok().getIntVal();
2654 
2655  if (ShiftAmount < 0) {
2656  Error(Parser.getTok().getLoc(), "positive shift amount required");
2657  return MatchOperand_ParseFail;
2658  }
2659  Parser.Lex(); // Eat the number
2660 
2661  // Just in case the optional lsl #0 is used for immediates other than zero.
2662  if (ShiftAmount == 0 && Imm != 0) {
2663  SMLoc E = Parser.getTok().getLoc();
2664  Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2665  return MatchOperand_Success;
2666  }
2667 
2668  SMLoc E = Parser.getTok().getLoc();
2669  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2670  S, E, getContext()));
2671  return MatchOperand_Success;
2672 }
2673 
2674 /// parseCondCodeString - Parse a Condition Code string.
2675 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2677  .Case("eq", AArch64CC::EQ)
2678  .Case("ne", AArch64CC::NE)
2679  .Case("cs", AArch64CC::HS)
2680  .Case("hs", AArch64CC::HS)
2681  .Case("cc", AArch64CC::LO)
2682  .Case("lo", AArch64CC::LO)
2683  .Case("mi", AArch64CC::MI)
2684  .Case("pl", AArch64CC::PL)
2685  .Case("vs", AArch64CC::VS)
2686  .Case("vc", AArch64CC::VC)
2687  .Case("hi", AArch64CC::HI)
2688  .Case("ls", AArch64CC::LS)
2689  .Case("ge", AArch64CC::GE)
2690  .Case("lt", AArch64CC::LT)
2691  .Case("gt", AArch64CC::GT)
2692  .Case("le", AArch64CC::LE)
2693  .Case("al", AArch64CC::AL)
2694  .Case("nv", AArch64CC::NV)
2696 
2697  if (CC == AArch64CC::Invalid &&
2698  getSTI().getFeatureBits()[AArch64::FeatureSVE])
2700  .Case("none", AArch64CC::EQ)
2701  .Case("any", AArch64CC::NE)
2702  .Case("nlast", AArch64CC::HS)
2703  .Case("last", AArch64CC::LO)
2704  .Case("first", AArch64CC::MI)
2705  .Case("nfrst", AArch64CC::PL)
2706  .Case("pmore", AArch64CC::HI)
2707  .Case("plast", AArch64CC::LS)
2708  .Case("tcont", AArch64CC::GE)
2709  .Case("tstop", AArch64CC::LT)
2711 
2712  return CC;
2713 }
2714 
2715 /// parseCondCode - Parse a Condition Code operand.
2716 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2717  bool invertCondCode) {
2718  MCAsmParser &Parser = getParser();
2719  SMLoc S = getLoc();
2720  const AsmToken &Tok = Parser.getTok();
2721  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2722 
2723  StringRef Cond = Tok.getString();
2724  AArch64CC::CondCode CC = parseCondCodeString(Cond);
2725  if (CC == AArch64CC::Invalid)
2726  return TokError("invalid condition code");
2727  Parser.Lex(); // Eat identifier token.
2728 
2729  if (invertCondCode) {
2730  if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2731  return TokError("condition codes AL and NV are invalid for this instruction");
2733  }
2734 
2735  Operands.push_back(
2736  AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2737  return false;
2738 }
2739 
2740 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2741 /// them if present.
2743 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2744  MCAsmParser &Parser = getParser();
2745  const AsmToken &Tok = Parser.getTok();
2746  std::string LowerID = Tok.getString().lower();
2749  .Case("lsl", AArch64_AM::LSL)
2750  .Case("lsr", AArch64_AM::LSR)
2751  .Case("asr", AArch64_AM::ASR)
2752  .Case("ror", AArch64_AM::ROR)
2753  .Case("msl", AArch64_AM::MSL)
2754  .Case("uxtb", AArch64_AM::UXTB)
2755  .Case("uxth", AArch64_AM::UXTH)
2756  .Case("uxtw", AArch64_AM::UXTW)
2757  .Case("uxtx", AArch64_AM::UXTX)
2758  .Case("sxtb", AArch64_AM::SXTB)
2759  .Case("sxth", AArch64_AM::SXTH)
2760  .Case("sxtw", AArch64_AM::SXTW)
2761  .Case("sxtx", AArch64_AM::SXTX)
2763 
2764  if (ShOp == AArch64_AM::InvalidShiftExtend)
2765  return MatchOperand_NoMatch;
2766 
2767  SMLoc S = Tok.getLoc();
2768  Parser.Lex();
2769 
2770  bool Hash = parseOptionalToken(AsmToken::Hash);
2771 
2772  if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2773  if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2774  ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2775  ShOp == AArch64_AM::MSL) {
2776  // We expect a number here.
2777  TokError("expected #imm after shift specifier");
2778  return MatchOperand_ParseFail;
2779  }
2780 
2781  // "extend" type operations don't need an immediate, #0 is implicit.
2782  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2783  Operands.push_back(
2784  AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2785  return MatchOperand_Success;
2786  }
2787 
2788  // Make sure we do actually have a number, identifier or a parenthesized
2789  // expression.
2790  SMLoc E = Parser.getTok().getLoc();
2791  if (!Parser.getTok().is(AsmToken::Integer) &&
2792  !Parser.getTok().is(AsmToken::LParen) &&
2793  !Parser.getTok().is(AsmToken::Identifier)) {
2794  Error(E, "expected integer shift amount");
2795  return MatchOperand_ParseFail;
2796  }
2797 
2798  const MCExpr *ImmVal;
2799  if (getParser().parseExpression(ImmVal))
2800  return MatchOperand_ParseFail;
2801 
2802  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2803  if (!MCE) {
2804  Error(E, "expected constant '#imm' after shift specifier");
2805  return MatchOperand_ParseFail;
2806  }
2807 
2808  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2809  Operands.push_back(AArch64Operand::CreateShiftExtend(
2810  ShOp, MCE->getValue(), true, S, E, getContext()));
2811  return MatchOperand_Success;
2812 }
2813 
2814 static const struct Extension {
2815  const char *Name;
2817 } ExtensionMap[] = {
2818  {"crc", {AArch64::FeatureCRC}},
2819  {"sm4", {AArch64::FeatureSM4}},
2820  {"sha3", {AArch64::FeatureSHA3}},
2821  {"sha2", {AArch64::FeatureSHA2}},
2822  {"aes", {AArch64::FeatureAES}},
2823  {"crypto", {AArch64::FeatureCrypto}},
2824  {"fp", {AArch64::FeatureFPARMv8}},
2825  {"simd", {AArch64::FeatureNEON}},
2826  {"ras", {AArch64::FeatureRAS}},
2827  {"lse", {AArch64::FeatureLSE}},
2828  {"predres", {AArch64::FeaturePredRes}},
2829  {"ccdp", {AArch64::FeatureCacheDeepPersist}},
2830  {"mte", {AArch64::FeatureMTE}},
2831  {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
2832  {"pan-rwv", {AArch64::FeaturePAN_RWV}},
2833  {"ccpp", {AArch64::FeatureCCPP}},
2834  {"sve", {AArch64::FeatureSVE}},
2835  // FIXME: Unsupported extensions
2836  {"pan", {}},
2837  {"lor", {}},
2838  {"rdma", {}},
2839  {"profile", {}},
2840 };
2841 
2842 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2843  if (FBS[AArch64::HasV8_1aOps])
2844  Str += "ARMv8.1a";
2845  else if (FBS[AArch64::HasV8_2aOps])
2846  Str += "ARMv8.2a";
2847  else if (FBS[AArch64::HasV8_3aOps])
2848  Str += "ARMv8.3a";
2849  else if (FBS[AArch64::HasV8_4aOps])
2850  Str += "ARMv8.4a";
2851  else if (FBS[AArch64::HasV8_5aOps])
2852  Str += "ARMv8.5a";
2853  else {
2854  auto ext = std::find_if(std::begin(ExtensionMap),
2856  [&](const Extension& e)
2857  // Use & in case multiple features are enabled
2858  { return (FBS & e.Features) != FeatureBitset(); }
2859  );
2860 
2861  Str += ext != std::end(ExtensionMap) ? ext->Name : "(unknown)";
2862  }
2863 }
2864 
2865 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2866  SMLoc S) {
2867  const uint16_t Op2 = Encoding & 7;
2868  const uint16_t Cm = (Encoding & 0x78) >> 3;
2869  const uint16_t Cn = (Encoding & 0x780) >> 7;
2870  const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2871 
2872  const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2873 
2874  Operands.push_back(
2875  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2876  Operands.push_back(
2877  AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2878  Operands.push_back(
2879  AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2880  Expr = MCConstantExpr::create(Op2, getContext());
2881  Operands.push_back(
2882  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2883 }
2884 
2885 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2886 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2887 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2888  OperandVector &Operands) {
2889  if (Name.find('.') != StringRef::npos)
2890  return TokError("invalid operand");
2891 
2892  Mnemonic = Name;
2893  Operands.push_back(
2894  AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2895 
2896  MCAsmParser &Parser = getParser();
2897  const AsmToken &Tok = Parser.getTok();
2898  StringRef Op = Tok.getString();
2899  SMLoc S = Tok.getLoc();
2900 
2901  if (Mnemonic == "ic") {
2902  const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2903  if (!IC)
2904  return TokError("invalid operand for IC instruction");
2905  else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2906  std::string Str("IC " + std::string(IC->Name) + " requires ");
2908  return TokError(Str.c_str());
2909  }
2910  createSysAlias(IC->Encoding, Operands, S);
2911  } else if (Mnemonic == "dc") {
2912  const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2913  if (!DC)
2914  return TokError("invalid operand for DC instruction");
2915  else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2916  std::string Str("DC " + std::string(DC->Name) + " requires ");
2918  return TokError(Str.c_str());
2919  }
2920  createSysAlias(DC->Encoding, Operands, S);
2921  } else if (Mnemonic == "at") {
2922  const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2923  if (!AT)
2924  return TokError("invalid operand for AT instruction");
2925  else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2926  std::string Str("AT " + std::string(AT->Name) + " requires ");
2928  return TokError(Str.c_str());
2929  }
2930  createSysAlias(AT->Encoding, Operands, S);
2931  } else if (Mnemonic == "tlbi") {
2932  const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2933  if (!TLBI)
2934  return TokError("invalid operand for TLBI instruction");
2935  else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2936  std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2938  return TokError(Str.c_str());
2939  }
2940  createSysAlias(TLBI->Encoding, Operands, S);
2941  } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
2942  const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
2943  if (!PRCTX)
2944  return TokError("invalid operand for prediction restriction instruction");
2945  else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
2946  std::string Str(
2947  Mnemonic.upper() + std::string(PRCTX->Name) + " requires ");
2949  return TokError(Str.c_str());
2950  }
2951  uint16_t PRCTX_Op2 =
2952  Mnemonic == "cfp" ? 4 :
2953  Mnemonic == "dvp" ? 5 :
2954  Mnemonic == "cpp" ? 7 :
2955  0;
2956  assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction");
2957  createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
2958  }
2959 
2960  Parser.Lex(); // Eat operand.
2961 
2962  bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2963  bool HasRegister = false;
2964 
2965  // Check for the optional register operand.
2966  if (parseOptionalToken(AsmToken::Comma)) {
2967  if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2968  return TokError("expected register operand");
2969  HasRegister = true;
2970  }
2971 
2972  if (ExpectRegister && !HasRegister)
2973  return TokError("specified " + Mnemonic + " op requires a register");
2974  else if (!ExpectRegister && HasRegister)
2975  return TokError("specified " + Mnemonic + " op does not use a register");
2976 
2977  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2978  return true;
2979 
2980  return false;
2981 }
2982 
2984 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2985  MCAsmParser &Parser = getParser();
2986  const AsmToken &Tok = Parser.getTok();
2987 
2988  if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
2989  TokError("'csync' operand expected");
2990  return MatchOperand_ParseFail;
2991  // Can be either a #imm style literal or an option name
2992  } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
2993  // Immediate operand.
2994  const MCExpr *ImmVal;
2995  SMLoc ExprLoc = getLoc();
2996  if (getParser().parseExpression(ImmVal))
2997  return MatchOperand_ParseFail;
2998  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2999  if (!MCE) {
3000  Error(ExprLoc, "immediate value expected for barrier operand");
3001  return MatchOperand_ParseFail;
3002  }
3003  if (MCE->getValue() < 0 || MCE->getValue() > 15) {
3004  Error(ExprLoc, "barrier operand out of range");
3005  return MatchOperand_ParseFail;
3006  }
3007  auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
3008  Operands.push_back(AArch64Operand::CreateBarrier(
3009  MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
3010  return MatchOperand_Success;
3011  }
3012 
3013  if (Tok.isNot(AsmToken::Identifier)) {
3014  TokError("invalid operand for instruction");
3015  return MatchOperand_ParseFail;
3016  }
3017 
3018  auto TSB = AArch64TSB::lookupTSBByName(Tok.getString());
3019  // The only valid named option for ISB is 'sy'
3020  auto DB = AArch64DB::lookupDBByName(Tok.getString());
3021  if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3022  TokError("'sy' or #imm operand expected");
3023  return MatchOperand_ParseFail;
3024  // The only valid named option for TSB is 'csync'
3025  } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3026  TokError("'csync' operand expected");
3027  return MatchOperand_ParseFail;
3028  } else if (!DB && !TSB) {
3029  TokError("invalid barrier option name");
3030  return MatchOperand_ParseFail;
3031  }
3032 
3033  Operands.push_back(AArch64Operand::CreateBarrier(
3034  DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext()));
3035  Parser.Lex(); // Consume the option
3036 
3037  return MatchOperand_Success;
3038 }
3039 
3041 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3042  MCAsmParser &Parser = getParser();
3043  const AsmToken &Tok = Parser.getTok();
3044 
3045  if (Tok.isNot(AsmToken::Identifier))
3046  return MatchOperand_NoMatch;
3047 
3048  int MRSReg, MSRReg;
3049  auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3050  if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3051  MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3052  MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3053  } else
3054  MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3055 
3056  auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3057  unsigned PStateImm = -1;
3058  if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3059  PStateImm = PState->Encoding;
3060 
3061  Operands.push_back(
3062  AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3063  PStateImm, getContext()));
3064  Parser.Lex(); // Eat identifier
3065 
3066  return MatchOperand_Success;
3067 }
3068 
3069 /// tryParseNeonVectorRegister - Parse a vector register operand.
3070 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3071  MCAsmParser &Parser = getParser();
3072  if (Parser.getTok().isNot(AsmToken::Identifier))
3073  return true;
3074 
3075  SMLoc S = getLoc();
3076  // Check for a vector register specifier first.
3077  StringRef Kind;
3078  unsigned Reg;
3079  OperandMatchResultTy Res =
3080  tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3081  if (Res != MatchOperand_Success)
3082  return true;
3083 
3084  const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3085  if (!KindRes)
3086  return true;
3087 
3088  unsigned ElementWidth = KindRes->second;
3089  Operands.push_back(
3090  AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3091  S, getLoc(), getContext()));
3092 
3093  // If there was an explicit qualifier, that goes on as a literal text
3094  // operand.
3095  if (!Kind.empty())
3096  Operands.push_back(
3097  AArch64Operand::CreateToken(Kind, false, S, getContext()));
3098 
3099  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3100 }
3101 
3103 AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3104  SMLoc SIdx = getLoc();
3105  if (parseOptionalToken(AsmToken::LBrac)) {
3106  const MCExpr *ImmVal;
3107  if (getParser().parseExpression(ImmVal))
3108  return MatchOperand_NoMatch;
3109  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3110  if (!MCE) {
3111  TokError("immediate value expected for vector index");
3112  return MatchOperand_ParseFail;;
3113  }
3114 
3115  SMLoc E = getLoc();
3116 
3117  if (parseToken(AsmToken::RBrac, "']' expected"))
3118  return MatchOperand_ParseFail;;
3119 
3120  Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3121  E, getContext()));
3122  return MatchOperand_Success;
3123  }
3124 
3125  return MatchOperand_NoMatch;
3126 }
3127 
3128 // tryParseVectorRegister - Try to parse a vector register name with
3129 // optional kind specifier. If it is a register specifier, eat the token
3130 // and return it.
3132 AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3133  RegKind MatchKind) {
3134  MCAsmParser &Parser = getParser();
3135  const AsmToken &Tok = Parser.getTok();
3136 
3137  if (Tok.isNot(AsmToken::Identifier))
3138  return MatchOperand_NoMatch;
3139 
3140  StringRef Name = Tok.getString();
3141  // If there is a kind specifier, it's separated from the register name by
3142  // a '.'.
3143  size_t Start = 0, Next = Name.find('.');
3144  StringRef Head = Name.slice(Start, Next);
3145  unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3146 
3147  if (RegNum) {
3148  if (Next != StringRef::npos) {
3149  Kind = Name.slice(Next, StringRef::npos);
3150  if (!isValidVectorKind(Kind, MatchKind)) {
3151  TokError("invalid vector kind qualifier");
3152  return MatchOperand_ParseFail;
3153  }
3154  }
3155  Parser.Lex(); // Eat the register token.
3156 
3157  Reg = RegNum;
3158  return MatchOperand_Success;
3159  }
3160 
3161  return MatchOperand_NoMatch;
3162 }
3163 
3164 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3166 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3167  // Check for a SVE predicate register specifier first.
3168  const SMLoc S = getLoc();
3169  StringRef Kind;
3170  unsigned RegNum;
3171  auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3172  if (Res != MatchOperand_Success)
3173  return Res;
3174 
3175  const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3176  if (!KindRes)
3177  return MatchOperand_NoMatch;
3178 
3179  unsigned ElementWidth = KindRes->second;
3180  Operands.push_back(AArch64Operand::CreateVectorReg(
3181  RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3182  getLoc(), getContext()));
3183 
3184  // Not all predicates are followed by a '/m' or '/z'.
3185  MCAsmParser &Parser = getParser();
3186  if (Parser.getTok().isNot(AsmToken::Slash))
3187  return MatchOperand_Success;
3188 
3189  // But when they do they shouldn't have an element type suffix.
3190  if (!Kind.empty()) {
3191  Error(S, "not expecting size suffix");
3192  return MatchOperand_ParseFail;
3193  }
3194 
3195  // Add a literal slash as operand
3196  Operands.push_back(
3197  AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3198 
3199  Parser.Lex(); // Eat the slash.
3200 
3201  // Zeroing or merging?
3202  auto Pred = Parser.getTok().getString().lower();
3203  if (Pred != "z" && Pred != "m") {
3204  Error(getLoc(), "expecting 'm' or 'z' predication");
3205  return MatchOperand_ParseFail;
3206  }
3207 
3208  // Add zero/merge token.
3209  const char *ZM = Pred == "z" ? "z" : "m";
3210  Operands.push_back(
3211  AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3212 
3213  Parser.Lex(); // Eat zero/merge token.
3214  return MatchOperand_Success;
3215 }
3216 
3217 /// parseRegister - Parse a register operand.
3218 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3219  // Try for a Neon vector register.
3220  if (!tryParseNeonVectorRegister(Operands))
3221  return false;
3222 
3223  // Otherwise try for a scalar register.
3224  if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3225  return false;
3226 
3227  return true;
3228 }
3229 
3230 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3231  MCAsmParser &Parser = getParser();
3232  bool HasELFModifier = false;
3234 
3235  if (parseOptionalToken(AsmToken::Colon)) {
3236  HasELFModifier = true;
3237 
3238  if (Parser.getTok().isNot(AsmToken::Identifier))
3239  return TokError("expect relocation specifier in operand after ':'");
3240 
3241  std::string LowerCase = Parser.getTok().getIdentifier().lower();
3242  RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3243  .Case("lo12", AArch64MCExpr::VK_LO12)
3244  .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3245  .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3246  .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3247  .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3248  .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3249  .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3250  .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3251  .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3252  .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3253  .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3254  .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3255  .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3256  .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3257  .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3258  .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3259  .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3260  .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3261  .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3262  .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3263  .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3264  .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3265  .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3266  .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3267  .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3268  .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3269  .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3270  .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3272  .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3274  .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3275  .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3276  .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3278  .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3279  .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3281 
3282  if (RefKind == AArch64MCExpr::VK_INVALID)
3283  return TokError("expect relocation specifier in operand after ':'");
3284 
3285  Parser.Lex(); // Eat identifier
3286 
3287  if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3288  return true;
3289  }
3290 
3291  if (getParser().parseExpression(ImmVal))
3292  return true;
3293 
3294  if (HasELFModifier)
3295  ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3296 
3297  return false;
3298 }
3299 
3300 template <RegKind VectorKind>
3302 AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3303  bool ExpectMatch) {
3304  MCAsmParser &Parser = getParser();
3305  if (!Parser.getTok().is(AsmToken::LCurly))
3306  return MatchOperand_NoMatch;
3307 
3308  // Wrapper around parse function
3309  auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3310  bool NoMatchIsError) {
3311  auto RegTok = Parser.getTok();
3312  auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3313  if (ParseRes == MatchOperand_Success) {
3314  if (parseVectorKind(Kind, VectorKind))
3315  return ParseRes;
3316  llvm_unreachable("Expected a valid vector kind");
3317  }
3318 
3319  if (RegTok.isNot(AsmToken::Identifier) ||
3320  ParseRes == MatchOperand_ParseFail ||
3321  (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3322  Error(Loc, "vector register expected");
3323  return MatchOperand_ParseFail;
3324  }
3325 
3326  return MatchOperand_NoMatch;
3327  };
3328 
3329  SMLoc S = getLoc();
3330  auto LCurly = Parser.getTok();
3331  Parser.Lex(); // Eat left bracket token.
3332 
3333  StringRef Kind;
3334  unsigned FirstReg;
3335  auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3336 
3337  // Put back the original left bracket if there was no match, so that
3338  // different types of list-operands can be matched (e.g. SVE, Neon).
3339  if (ParseRes == MatchOperand_NoMatch)
3340  Parser.getLexer().UnLex(LCurly);
3341 
3342  if (ParseRes != MatchOperand_Success)
3343  return ParseRes;
3344 
3345  int64_t PrevReg = FirstReg;
3346  unsigned Count = 1;
3347 
3348  if (parseOptionalToken(AsmToken::Minus)) {
3349  SMLoc Loc = getLoc();
3350  StringRef NextKind;
3351 
3352  unsigned Reg;
3353  ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3354  if (ParseRes != MatchOperand_Success)
3355  return ParseRes;
3356 
3357  // Any Kind suffices must match on all regs in the list.
3358  if (Kind != NextKind) {
3359  Error(Loc, "mismatched register size suffix");
3360  return MatchOperand_ParseFail;
3361  }
3362 
3363  unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3364 
3365  if (Space == 0 || Space > 3) {
3366  Error(Loc, "invalid number of vectors");
3367  return MatchOperand_ParseFail;
3368  }
3369 
3370  Count += Space;
3371  }
3372  else {
3373  while (parseOptionalToken(AsmToken::Comma)) {
3374  SMLoc Loc = getLoc();
3375  StringRef NextKind;
3376  unsigned Reg;
3377  ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3378  if (ParseRes != MatchOperand_Success)
3379  return ParseRes;
3380 
3381  // Any Kind suffices must match on all regs in the list.
3382  if (Kind != NextKind) {
3383  Error(Loc, "mismatched register size suffix");
3384  return MatchOperand_ParseFail;
3385  }
3386 
3387  // Registers must be incremental (with wraparound at 31)
3388  if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3389  (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3390  Error(Loc, "registers must be sequential");
3391  return MatchOperand_ParseFail;
3392  }
3393 
3394  PrevReg = Reg;
3395  ++Count;
3396  }
3397  }
3398 
3399  if (parseToken(AsmToken::RCurly, "'}' expected"))
3400  return MatchOperand_ParseFail;
3401 
3402  if (Count > 4) {
3403  Error(S, "invalid number of vectors");
3404  return MatchOperand_ParseFail;
3405  }
3406 
3407  unsigned NumElements = 0;
3408  unsigned ElementWidth = 0;
3409  if (!Kind.empty()) {
3410  if (const auto &VK = parseVectorKind(Kind, VectorKind))
3411  std::tie(NumElements, ElementWidth) = *VK;
3412  }
3413 
3414  Operands.push_back(AArch64Operand::CreateVectorList(
3415  FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3416  getContext()));
3417 
3418  return MatchOperand_Success;
3419 }
3420 
3421 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3422 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3423  auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3424  if (ParseRes != MatchOperand_Success)
3425  return true;
3426 
3427  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3428 }
3429 
3431 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3432  SMLoc StartLoc = getLoc();
3433 
3434  unsigned RegNum;
3435  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3436  if (Res != MatchOperand_Success)
3437  return Res;
3438 
3439  if (!parseOptionalToken(AsmToken::Comma)) {
3440  Operands.push_back(AArch64Operand::CreateReg(
3441  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3442  return MatchOperand_Success;
3443  }
3444 
3445  parseOptionalToken(AsmToken::Hash);
3446 
3447  if (getParser().getTok().isNot(AsmToken::Integer)) {
3448  Error(getLoc(), "index must be absent or #0");
3449  return MatchOperand_ParseFail;
3450  }
3451 
3452  const MCExpr *ImmVal;
3453  if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3454  cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3455  Error(getLoc(), "index must be absent or #0");
3456  return MatchOperand_ParseFail;
3457  }
3458 
3459  Operands.push_back(AArch64Operand::CreateReg(
3460  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3461  return MatchOperand_Success;
3462 }
3463 
3464 template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3466 AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3467  SMLoc StartLoc = getLoc();
3468 
3469  unsigned RegNum;
3470  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3471  if (Res != MatchOperand_Success)
3472  return Res;
3473 
3474  // No shift/extend is the default.
3475  if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3476  Operands.push_back(AArch64Operand::CreateReg(
3477  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3478  return MatchOperand_Success;
3479  }
3480 
3481  // Eat the comma
3482  getParser().Lex();
3483 
3484  // Match the shift
3486  Res = tryParseOptionalShiftExtend(ExtOpnd);
3487  if (Res != MatchOperand_Success)
3488  return Res;
3489 
3490  auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3491  Operands.push_back(AArch64Operand::CreateReg(
3492  RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3493  Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3494  Ext->hasShiftExtendAmount()));
3495 
3496  return MatchOperand_Success;
3497 }
3498 
3499 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3500  MCAsmParser &Parser = getParser();
3501 
3502  // Some SVE instructions have a decoration after the immediate, i.e.
3503  // "mul vl". We parse them here and add tokens, which must be present in the
3504  // asm string in the tablegen instruction.
3505  bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3506  bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3507  if (!Parser.getTok().getString().equals_lower("mul") ||
3508  !(NextIsVL || NextIsHash))
3509  return true;
3510 
3511  Operands.push_back(
3512  AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3513  Parser.Lex(); // Eat the "mul"
3514 
3515  if (NextIsVL) {
3516  Operands.push_back(
3517  AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3518  Parser.Lex(); // Eat the "vl"
3519  return false;
3520  }
3521 
3522  if (NextIsHash) {
3523  Parser.Lex(); // Eat the #
3524  SMLoc S = getLoc();
3525 
3526  // Parse immediate operand.
3527  const MCExpr *ImmVal;
3528  if (!Parser.parseExpression(ImmVal))
3529  if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3530  Operands.push_back(AArch64Operand::CreateImm(
3531  MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3532  getContext()));
3533  return MatchOperand_Success;
3534  }
3535  }
3536 
3537  return Error(getLoc(), "expected 'vl' or '#<imm>'");
3538 }
3539 
3540 /// parseOperand - Parse a arm instruction operand. For now this parses the
3541 /// operand regardless of the mnemonic.
3542 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3543  bool invertCondCode) {
3544  MCAsmParser &Parser = getParser();
3545 
3546  OperandMatchResultTy ResTy =
3547  MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3548 
3549  // Check if the current operand has a custom associated parser, if so, try to
3550  // custom parse the operand, or fallback to the general approach.
3551  if (ResTy == MatchOperand_Success)
3552  return false;
3553  // If there wasn't a custom match, try the generic matcher below. Otherwise,
3554  // there was a match, but an error occurred, in which case, just return that
3555  // the operand parsing failed.
3556  if (ResTy == MatchOperand_ParseFail)
3557  return true;
3558 
3559  // Nothing custom, so do general case parsing.
3560  SMLoc S, E;
3561  switch (getLexer().getKind()) {
3562  default: {
3563  SMLoc S = getLoc();
3564  const MCExpr *Expr;
3565  if (parseSymbolicImmVal(Expr))
3566  return Error(S, "invalid operand");
3567 
3568  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3569  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3570  return false;
3571  }
3572  case AsmToken::LBrac: {
3573  SMLoc Loc = Parser.getTok().getLoc();
3574  Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3575  getContext()));
3576  Parser.Lex(); // Eat '['
3577 
3578  // There's no comma after a '[', so we can parse the next operand
3579  // immediately.
3580  return parseOperand(Operands, false, false);
3581  }
3582  case AsmToken::LCurly:
3583  return parseNeonVectorList(Operands);
3584  case AsmToken::Identifier: {
3585  // If we're expecting a Condition Code operand, then just parse that.
3586  if (isCondCode)
3587  return parseCondCode(Operands, invertCondCode);
3588 
3589  // If it's a register name, parse it.
3590  if (!parseRegister(Operands))
3591  return false;
3592 
3593  // See if this is a "mul vl" decoration or "mul #<int>" operand used
3594  // by SVE instructions.
3595  if (!parseOptionalMulOperand(Operands))
3596  return false;
3597 
3598  // This could be an optional "shift" or "extend" operand.
3599  OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3600  // We can only continue if no tokens were eaten.
3601  if (GotShift != MatchOperand_NoMatch)
3602  return GotShift;
3603 
3604  // This was not a register so parse other operands that start with an
3605  // identifier (like labels) as expressions and create them as immediates.
3606  const MCExpr *IdVal;
3607  S = getLoc();
3608  if (getParser().parseExpression(IdVal))
3609  return true;
3610  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3611  Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3612  return false;
3613  }
3614  case AsmToken::Integer:
3615  case AsmToken::Real:
3616  case AsmToken::Hash: {
3617  // #42 -> immediate.
3618  S = getLoc();
3619 
3620  parseOptionalToken(AsmToken::Hash);
3621 
3622  // Parse a negative sign
3623  bool isNegative = false;
3624  if (Parser.getTok().is(AsmToken::Minus)) {
3625  isNegative = true;
3626  // We need to consume this token only when we have a Real, otherwise
3627  // we let parseSymbolicImmVal take care of it
3628  if (Parser.getLexer().peekTok().is(AsmToken::Real))
3629  Parser.Lex();
3630  }
3631 
3632  // The only Real that should come through here is a literal #0.0 for
3633  // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3634  // so convert the value.
3635  const AsmToken &Tok = Parser.getTok();
3636  if (Tok.is(AsmToken::Real)) {
3637  APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3638  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3639  if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3640  Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3641  Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3642  return TokError("unexpected floating point literal");
3643  else if (IntVal != 0 || isNegative)
3644  return TokError("expected floating-point constant #0.0");
3645  Parser.Lex(); // Eat the token.
3646 
3647  Operands.push_back(
3648  AArch64Operand::CreateToken("#0", false, S, getContext()));
3649  Operands.push_back(
3650  AArch64Operand::CreateToken(".0", false, S, getContext()));
3651  return false;
3652  }
3653 
3654  const MCExpr *ImmVal;
3655  if (parseSymbolicImmVal(ImmVal))
3656  return true;
3657 
3658  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3659  Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3660  return false;
3661  }
3662  case AsmToken::Equal: {
3663  SMLoc Loc = getLoc();
3664  if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3665  return TokError("unexpected token in operand");
3666  Parser.Lex(); // Eat '='
3667  const MCExpr *SubExprVal;
3668  if (getParser().parseExpression(SubExprVal))
3669  return true;
3670 
3671  if (Operands.size() < 2 ||
3672  !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3673  return Error(Loc, "Only valid when first operand is register");
3674 
3675  bool IsXReg =
3676  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3677  Operands[1]->getReg());
3678 
3679  MCContext& Ctx = getContext();
3680  E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3681  // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3682  if (isa<MCConstantExpr>(SubExprVal)) {
3683  uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3684  uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3685  while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3686  ShiftAmt += 16;
3687  Imm >>= 16;
3688  }
3689  if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3690  Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3691  Operands.push_back(AArch64Operand::CreateImm(
3692  MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3693  if (ShiftAmt)
3694  Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3695  ShiftAmt, true, S, E, Ctx));
3696  return false;
3697  }
3698  APInt Simm = APInt(64, Imm << ShiftAmt);
3699  // check if the immediate is an unsigned or signed 32-bit int for W regs
3700  if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3701  return Error(Loc, "Immediate too large for register");
3702  }
3703  // If it is a label or an imm that cannot fit in a movz, put it into CP.
3704  const MCExpr *CPLoc =
3705  getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3706  Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3707  return false;
3708  }
3709  }
3710 }
3711 
3712 bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3713  const MCParsedAsmOperand &Op2) const {
3714  auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3715  auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3716  if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3717  AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3718  return MCTargetAsmParser::regsEqual(Op1, Op2);
3719 
3720  assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
3721  "Testing equality of non-scalar registers not supported");
3722 
3723  // Check if a registers match their sub/super register classes.
3724  if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3725  return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3726  if (AOp1.getRegEqualityTy() == EqualsSubReg)
3727  return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3728  if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3729  return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3730  if (AOp2.getRegEqualityTy() == EqualsSubReg)
3731  return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3732 
3733  return false;
3734 }
3735 
3736 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3737 /// operands.
3738 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3739  StringRef Name, SMLoc NameLoc,
3740  OperandVector &Operands) {
3741  MCAsmParser &Parser = getParser();
3742  Name = StringSwitch<StringRef>(Name.lower())
3743  .Case("beq", "b.eq")
3744  .Case("bne", "b.ne")
3745  .Case("bhs", "b.hs")
3746  .Case("bcs", "b.cs")
3747  .Case("blo", "b.lo")
3748  .Case("bcc", "b.cc")
3749  .Case("bmi", "b.mi")
3750  .Case("bpl", "b.pl")
3751  .Case("bvs", "b.vs")
3752  .Case("bvc", "b.vc")
3753  .Case("bhi", "b.hi")
3754  .Case("bls", "b.ls")
3755  .Case("bge", "b.ge")
3756  .Case("blt", "b.lt")
3757  .Case("bgt", "b.gt")
3758  .Case("ble", "b.le")
3759  .Case("bal", "b.al")
3760  .Case("bnv", "b.nv")
3761  .Default(Name);
3762 
3763  // First check for the AArch64-specific .req directive.
3764  if (Parser.getTok().is(AsmToken::Identifier) &&
3765  Parser.getTok().getIdentifier() == ".req") {
3766  parseDirectiveReq(Name, NameLoc);
3767  // We always return 'error' for this, as we're done with this
3768  // statement and don't need to match the 'instruction."
3769  return true;
3770  }
3771 
3772  // Create the leading tokens for the mnemonic, split by '.' characters.
3773  size_t Start = 0, Next = Name.find('.');
3774  StringRef Head = Name.slice(Start, Next);
3775 
3776  // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
3777  // the SYS instruction.
3778  if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
3779  Head == "cfp" || Head == "dvp" || Head == "cpp")
3780  return parseSysAlias(Head, NameLoc, Operands);
3781 
3782  Operands.push_back(
3783  AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3784  Mnemonic = Head;
3785 
3786  // Handle condition codes for a branch mnemonic
3787  if (Head == "b" && Next != StringRef::npos) {
3788  Start = Next;
3789  Next = Name.find('.', Start + 1);
3790  Head = Name.slice(Start + 1, Next);
3791 
3792  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3793  (Head.data() - Name.data()));
3794  AArch64CC::CondCode CC = parseCondCodeString(Head);
3795  if (CC == AArch64CC::Invalid)
3796  return Error(SuffixLoc, "invalid condition code");
3797  Operands.push_back(
3798  AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3799  Operands.push_back(
3800  AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3801  }
3802 
3803  // Add the remaining tokens in the mnemonic.
3804  while (Next != StringRef::npos) {
3805  Start = Next;
3806  Next = Name.find('.', Start + 1);
3807  Head = Name.slice(Start, Next);
3808  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3809  (Head.data() - Name.data()) + 1);
3810  Operands.push_back(
3811  AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3812  }
3813 
3814  // Conditional compare instructions have a Condition Code operand, which needs
3815  // to be parsed and an immediate operand created.
3816  bool condCodeFourthOperand =
3817  (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3818  Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3819  Head == "csinc" || Head == "csinv" || Head == "csneg");
3820 
3821  // These instructions are aliases to some of the conditional select
3822  // instructions. However, the condition code is inverted in the aliased
3823  // instruction.
3824  //
3825  // FIXME: Is this the correct way to handle these? Or should the parser
3826  // generate the aliased instructions directly?
3827  bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3828  bool condCodeThirdOperand =
3829  (Head == "cinc" || Head == "cinv" || Head == "cneg");
3830 
3831  // Read the remaining operands.
3832  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3833 
3834  unsigned N = 1;
3835  do {
3836  // Parse and remember the operand.
3837  if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3838  (N == 3 && condCodeThirdOperand) ||
3839  (N == 2 && condCodeSecondOperand),
3840  condCodeSecondOperand || condCodeThirdOperand)) {
3841  return true;
3842  }
3843 
3844  // After successfully parsing some operands there are two special cases to
3845  // consider (i.e. notional operands not separated by commas). Both are due
3846  // to memory specifiers:
3847  // + An RBrac will end an address for load/store/prefetch
3848  // + An '!' will indicate a pre-indexed operation.
3849  //
3850  // It's someone else's responsibility to make sure these tokens are sane
3851  // in the given context!
3852 
3853  SMLoc RLoc = Parser.getTok().getLoc();
3854  if (parseOptionalToken(AsmToken::RBrac))
3855  Operands.push_back(
3856  AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3857  SMLoc ELoc = Parser.getTok().getLoc();
3858  if (parseOptionalToken(AsmToken::Exclaim))
3859  Operands.push_back(
3860  AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3861 
3862  ++N;
3863  } while (parseOptionalToken(AsmToken::Comma));
3864  }
3865 
3866  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3867  return true;
3868 
3869  return false;
3870 }
3871 
3872 static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
3873  assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
3874  return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
3875  (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
3876  (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
3877  (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
3878  (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
3879  (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
3880 }
3881 
3882 // FIXME: This entire function is a giant hack to provide us with decent
3883 // operand range validation/diagnostics until TableGen/MC can be extended
3884 // to support autogeneration of this kind of validation.
3885 bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
3886  SmallVectorImpl<SMLoc> &Loc) {
3887  const MCRegisterInfo *RI = getContext().getRegisterInfo();
3888  const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
3889 
3890  // A prefix only applies to the instruction following it. Here we extract
3891  // prefix information for the next instruction before validating the current
3892  // one so that in the case of failure we don't erronously continue using the
3893  // current prefix.
3894  PrefixInfo Prefix = NextPrefix;
3895  NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
3896 
3897  // Before validating the instruction in isolation we run through the rules
3898  // applicable when it follows a prefix instruction.
3899  // NOTE: brk & hlt can be prefixed but require no additional validation.
3900  if (Prefix.isActive() &&
3901  (Inst.getOpcode() != AArch64::BRK) &&
3902  (Inst.getOpcode() != AArch64::HLT)) {
3903 
3904  // Prefixed intructions must have a destructive operand.
3907  return Error(IDLoc, "instruction is unpredictable when following a"
3908  " movprfx, suggest replacing movprfx with mov");
3909 
3910  // Destination operands must match.
3911  if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
3912  return Error(Loc[0], "instruction is unpredictable when following a"
3913  " movprfx writing to a different destination");
3914 
3915  // Destination operand must not be used in any other location.
3916  for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
3917  if (Inst.getOperand(i).isReg() &&
3918  (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
3919  isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
3920  return Error(Loc[0], "instruction is unpredictable when following a"
3921  " movprfx and destination also used as non-destructive"
3922  " source");
3923  }
3924 
3925  auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
3926  if (Prefix.isPredicated()) {
3927  int PgIdx = -1;
3928 
3929  // Find the instructions general predicate.
3930  for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
3931  if (Inst.getOperand(i).isReg() &&
3932  PPRRegClass.contains(Inst.getOperand(i).getReg())) {
3933  PgIdx = i;
3934  break;
3935  }
3936 
3937  // Instruction must be predicated if the movprfx is predicated.
3938  if (PgIdx == -1 ||
3940  return Error(IDLoc, "instruction is unpredictable when following a"
3941  " predicated movprfx, suggest using unpredicated movprfx");
3942 
3943  // Instruction must use same general predicate as the movprfx.
3944  if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
3945  return Error(IDLoc, "instruction is unpredictable when following a"
3946  " predicated movprfx using a different general predicate");
3947 
3948  // Instruction element type must match the movprfx.
3949  if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
3950  return Error(IDLoc, "instruction is unpredictable when following a"
3951  " predicated movprfx with a different element size");
3952  }
3953  }
3954 
3955  // Check for indexed addressing modes w/ the base register being the
3956  // same as a destination/source register or pair load where
3957  // the Rt == Rt2. All of those are undefined behaviour.
3958  switch (Inst.getOpcode()) {
3959  case AArch64::LDPSWpre:
3960  case AArch64::LDPWpost:
3961  case AArch64::LDPWpre:
3962  case AArch64::LDPXpost:
3963  case AArch64::LDPXpre: {
3964  unsigned Rt = Inst.getOperand(1).getReg();
3965  unsigned Rt2 = Inst.getOperand(2).getReg();
3966  unsigned Rn = Inst.getOperand(3).getReg();
3967  if (RI->isSubRegisterEq(Rn, Rt))
3968  return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3969  "is also a destination");
3970  if (RI->isSubRegisterEq(Rn, Rt2))
3971  return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3972  "is also a destination");
3974  }
3975  case AArch64::LDPDi:
3976  case AArch64::LDPQi:
3977  case AArch64::LDPSi:
3978  case AArch64::LDPSWi:
3979  case AArch64::LDPWi:
3980  case AArch64::LDPXi: {
3981  unsigned Rt = Inst.getOperand(0).getReg();
3982  unsigned Rt2 = Inst.getOperand(1).getReg();
3983  if (Rt == Rt2)
3984  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3985  break;
3986  }
3987  case AArch64::LDPDpost:
3988  case AArch64::LDPDpre:
3989  case AArch64::LDPQpost:
3990  case AArch64::LDPQpre:
3991  case AArch64::LDPSpost:
3992  case AArch64::LDPSpre:
3993  case AArch64::LDPSWpost: {
3994  unsigned Rt = Inst.getOperand(1).getReg();
3995  unsigned Rt2 = Inst.getOperand(2).getReg();
3996  if (Rt == Rt2)
3997  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3998  break;
3999  }
4000  case AArch64::STPDpost:
4001  case AArch64::STPDpre:
4002  case AArch64::STPQpost:
4003  case AArch64::STPQpre:
4004  case AArch64::STPSpost:
4005  case AArch64::STPSpre:
4006  case AArch64::STPWpost:
4007  case AArch64::STPWpre:
4008  case AArch64::STPXpost:
4009  case AArch64::STPXpre: {
4010  unsigned Rt = Inst.getOperand(1).getReg();
4011  unsigned Rt2 = Inst.getOperand(2).getReg();
4012  unsigned Rn = Inst.getOperand(3).getReg();
4013  if (RI->isSubRegisterEq(Rn, Rt))
4014  return Error(Loc[0], "unpredictable STP instruction, writeback base "
4015  "is also a source");
4016  if (RI->isSubRegisterEq(Rn, Rt2))
4017  return Error(Loc[1], "unpredictable STP instruction, writeback base "
4018  "is also a source");
4019  break;
4020  }
4021  case AArch64::LDRBBpre:
4022  case AArch64::LDRBpre:
4023  case AArch64::LDRHHpre:
4024  case AArch64::LDRHpre:
4025  case AArch64::LDRSBWpre:
4026  case AArch64::LDRSBXpre:
4027  case AArch64::LDRSHWpre:
4028  case AArch64::LDRSHXpre:
4029  case AArch64::LDRSWpre:
4030  case AArch64::LDRWpre:
4031  case AArch64::LDRXpre:
4032  case AArch64::LDRBBpost:
4033  case AArch64::LDRBpost:
4034  case AArch64::LDRHHpost:
4035  case AArch64::LDRHpost:
4036  case AArch64::LDRSBWpost:
4037  case AArch64::LDRSBXpost:
4038  case AArch64::LDRSHWpost:
4039  case AArch64::LDRSHXpost:
4040  case AArch64::LDRSWpost:
4041  case AArch64::LDRWpost:
4042  case AArch64::LDRXpost: {
4043  unsigned Rt = Inst.getOperand(1).getReg();
4044  unsigned Rn = Inst.getOperand(2).getReg();
4045  if (RI->isSubRegisterEq(Rn, Rt))
4046  return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4047  "is also a source");
4048  break;
4049  }
4050  case AArch64::STRBBpost:
4051  case AArch64::STRBpost:
4052  case AArch64::STRHHpost:
4053  case AArch64::STRHpost:
4054  case AArch64::STRWpost:
4055  case AArch64::STRXpost:
4056  case AArch64::STRBBpre:
4057  case AArch64::STRBpre:
4058  case AArch64::STRHHpre:
4059  case AArch64::STRHpre:
4060  case AArch64::STRWpre:
4061  case AArch64::STRXpre: {
4062  unsigned Rt = Inst.getOperand(1).getReg();
4063  unsigned Rn = Inst.getOperand(2).getReg();
4064  if (RI->isSubRegisterEq(Rn, Rt))
4065  return Error(Loc[0], "unpredictable STR instruction, writeback base "
4066  "is also a source");
4067  break;
4068  }
4069  case AArch64::STXRB:
4070  case AArch64::STXRH:
4071  case AArch64::STXRW:
4072  case AArch64::STXRX:
4073  case AArch64::STLXRB:
4074  case AArch64::STLXRH:
4075  case AArch64::STLXRW:
4076  case AArch64::STLXRX: {
4077  unsigned Rs = Inst.getOperand(0).getReg();
4078  unsigned Rt = Inst.getOperand(1).getReg();
4079  unsigned Rn = Inst.getOperand(2).getReg();
4080  if (RI->isSubRegisterEq(Rt, Rs) ||
4081  (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4082  return Error(Loc[0],
4083  "unpredictable STXR instruction, status is also a source");
4084  break;
4085  }
4086  case AArch64::STXPW:
4087  case AArch64::STXPX:
4088  case AArch64::STLXPW:
4089  case AArch64::STLXPX: {
4090  unsigned Rs = Inst.getOperand(0).getReg();
4091  unsigned Rt1 = Inst.getOperand(1).getReg();
4092  unsigned Rt2 = Inst.getOperand(2).getReg();
4093  unsigned Rn = Inst.getOperand(3).getReg();
4094  if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4095  (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4096  return Error(Loc[0],
4097  "unpredictable STXP instruction, status is also a source");
4098  break;
4099  }
4100  case AArch64::LDGV: {
4101  unsigned Rt = Inst.getOperand(0).getReg();
4102  unsigned Rn = Inst.getOperand(1).getReg();
4103  if (RI->isSubRegisterEq(Rt, Rn)) {
4104  return Error(Loc[0],
4105  "unpredictable LDGV instruction, writeback register is also "
4106  "the target register");
4107  }
4108  }
4109  }
4110 
4111 
4112  // Now check immediate ranges. Separate from the above as there is overlap
4113  // in the instructions being checked and this keeps the nested conditionals
4114  // to a minimum.
4115  switch (Inst.getOpcode()) {
4116  case AArch64::ADDSWri:
4117  case AArch64::ADDSXri:
4118  case AArch64::ADDWri:
4119  case AArch64::ADDXri:
4120  case AArch64::SUBSWri:
4121  case AArch64::SUBSXri:
4122  case AArch64::SUBWri:
4123  case AArch64::SUBXri: {
4124  // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4125  // some slight duplication here.
4126  if (Inst.getOperand(2).isExpr()) {
4127  const MCExpr *Expr = Inst.getOperand(2).getExpr();
4128  AArch64MCExpr::VariantKind ELFRefKind;
4129  MCSymbolRefExpr::VariantKind DarwinRefKind;
4130  int64_t Addend;
4131  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4132 
4133  // Only allow these with ADDXri.
4134  if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4135  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4136  Inst.getOpcode() == AArch64::ADDXri)
4137  return false;
4138 
4139  // Only allow these with ADDXri/ADDWri
4140  if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4141  ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4142  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4143  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4144  ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4145  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4146  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4147  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4148  ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4149  ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4150  (Inst.getOpcode() == AArch64::ADDXri ||
4151  Inst.getOpcode() == AArch64::ADDWri))
4152  return false;
4153 
4154  // Don't allow symbol refs in the immediate field otherwise
4155  // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4156  // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4157  // 'cmp w0, 'borked')
4158  return Error(Loc.back(), "invalid immediate expression");
4159  }
4160  // We don't validate more complex expressions here
4161  }
4162  return false;
4163  }
4164  default:
4165  return false;
4166  }
4167 }
4168 
4169 static std::string AArch64MnemonicSpellCheck(StringRef S,
4170  const FeatureBitset &FBS,
4171  unsigned VariantID = 0);
4172 
4173 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4174  uint64_t ErrorInfo,
4175  OperandVector &Operands) {
4176  switch (ErrCode) {
4177  case Match_InvalidTiedOperand: {
4179  static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4180  .getRegEqualityTy();
4181  switch (EqTy) {
4182  case RegConstraintEqualityTy::EqualsSubReg:
4183  return Error(Loc, "operand must be 64-bit form of destination register");
4184  case RegConstraintEqualityTy::EqualsSuperReg:
4185  return Error(Loc, "operand must be 32-bit form of destination register");
4186  case RegConstraintEqualityTy::EqualsReg:
4187  return Error(Loc, "operand must match destination register");
4188  }
4189  llvm_unreachable("Unknown RegConstraintEqualityTy");
4190  }
4191  case Match_MissingFeature:
4192  return Error(Loc,
4193  "instruction requires a CPU feature not currently enabled");
4194  case Match_InvalidOperand:
4195  return Error(Loc, "invalid operand for instruction");
4196  case Match_InvalidSuffix:
4197  return Error(Loc, "invalid type suffix for instruction");
4198  case Match_InvalidCondCode:
4199  return Error(Loc, "expected AArch64 condition code");
4200  case Match_AddSubRegExtendSmall:
4201  return Error(Loc,
4202  "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
4203  case Match_AddSubRegExtendLarge:
4204  return Error(Loc,
4205  "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4206  case Match_AddSubSecondSource:
4207  return Error(Loc,
4208  "expected compatible register, symbol or integer in range [0, 4095]");
4209  case Match_LogicalSecondSource:
4210  return Error(Loc, "expected compatible register or logical immediate");
4211  case Match_InvalidMovImm32Shift:
4212  return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4213  case Match_InvalidMovImm64Shift:
4214  return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4215  case Match_AddSubRegShift32:
4216  return Error(Loc,
4217  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4218  case Match_AddSubRegShift64:
4219  return Error(Loc,
4220  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4221  case Match_InvalidFPImm:
4222  return Error(Loc,
4223  "expected compatible register or floating-point constant");
4224  case Match_InvalidMemoryIndexedSImm6:
4225  return Error(Loc, "index must be an integer in range [-32, 31].");
4226  case Match_InvalidMemoryIndexedSImm5:
4227  return Error(Loc, "index must be an integer in range [-16, 15].");
4228  case Match_InvalidMemoryIndexed1SImm4:
4229  return Error(Loc, "index must be an integer in range [-8, 7].");
4230  case Match_InvalidMemoryIndexed2SImm4:
4231  return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4232  case Match_InvalidMemoryIndexed3SImm4:
4233  return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4234  case Match_InvalidMemoryIndexed4SImm4:
4235  return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4236  case Match_InvalidMemoryIndexed16SImm4:
4237  return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4238  case Match_InvalidMemoryIndexed1SImm6:
4239  return Error(Loc, "index must be an integer in range [-32, 31].");
4240  case Match_InvalidMemoryIndexedSImm8:
4241  return Error(Loc, "index must be an integer in range [-128, 127].");
4242  case Match_InvalidMemoryIndexedSImm9:
4243  return Error(Loc, "index must be an integer in range [-256, 255].");
4244  case Match_InvalidMemoryIndexed16SImm9:
4245  return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4246  case Match_InvalidMemoryIndexed8SImm10:
4247  return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4248  case Match_InvalidMemoryIndexed4SImm7:
4249  return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4250  case Match_InvalidMemoryIndexed8SImm7:
4251  return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4252  case Match_InvalidMemoryIndexed16SImm7:
4253  return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4254  case Match_InvalidMemoryIndexed8UImm5:
4255  return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4256  case Match_InvalidMemoryIndexed4UImm5:
4257  return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4258  case Match_InvalidMemoryIndexed2UImm5:
4259  return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4260  case Match_InvalidMemoryIndexed8UImm6:
4261  return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4262  case Match_InvalidMemoryIndexed16UImm6:
4263  return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
4264  case Match_InvalidMemoryIndexed4UImm6:
4265  return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4266  case Match_InvalidMemoryIndexed2UImm6:
4267  return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4268  case Match_InvalidMemoryIndexed1UImm6:
4269  return Error(Loc, "index must be in range [0, 63].");
4270  case Match_InvalidMemoryWExtend8:
4271  return Error(Loc,
4272  "expected 'uxtw' or 'sxtw' with optional shift of #0");
4273  case Match_InvalidMemoryWExtend16:
4274  return Error(Loc,
4275  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4276  case Match_InvalidMemoryWExtend32:
4277  return Error(Loc,
4278  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4279  case Match_InvalidMemoryWExtend64:
4280  return Error(Loc,
4281  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4282  case Match_InvalidMemoryWExtend128:
4283  return Error(Loc,
4284  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4285  case Match_InvalidMemoryXExtend8:
4286  return Error(Loc,
4287  "expected 'lsl' or 'sxtx' with optional shift of #0");
4288  case Match_InvalidMemoryXExtend16:
4289  return Error(Loc,
4290  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4291  case Match_InvalidMemoryXExtend32:
4292  return Error(Loc,
4293  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4294  case Match_InvalidMemoryXExtend64:
4295  return Error(Loc,
4296  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4297  case Match_InvalidMemoryXExtend128:
4298  return Error(Loc,
4299  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4300  case Match_InvalidMemoryIndexed1:
4301  return Error(Loc, "index must be an integer in range [0, 4095].");
4302  case Match_InvalidMemoryIndexed2:
4303  return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4304  case Match_InvalidMemoryIndexed4:
4305  return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4306  case Match_InvalidMemoryIndexed8:
4307  return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4308  case Match_InvalidMemoryIndexed16:
4309  return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4310  case Match_InvalidImm0_1:
4311  return Error(Loc, "immediate must be an integer in range [0, 1].");
4312  case Match_InvalidImm0_7:
4313  return Error(Loc, "immediate must be an integer in range [0, 7].");
4314  case Match_InvalidImm0_15:
4315  return Error(Loc, "immediate must be an integer in range [0, 15].");
4316  case Match_InvalidImm0_31:
4317  return Error(Loc, "immediate must be an integer in range [0, 31].");
4318  case Match_InvalidImm0_63:
4319  return Error(Loc, "immediate must be an integer in range [0, 63].");
4320  case Match_InvalidImm0_127:
4321  return Error(Loc, "immediate must be an integer in range [0, 127].");
4322  case Match_InvalidImm0_255:
4323  return Error(Loc, "immediate must be an integer in range [0, 255].");
4324  case Match_InvalidImm0_65535:
4325  return Error(Loc, "immediate must be an integer in range [0, 65535].");
4326  case Match_InvalidImm1_8:
4327  return Error(Loc, "immediate must be an integer in range [1, 8].");
4328  case Match_InvalidImm1_16:
4329  return Error(Loc, "immediate must be an integer in range [1, 16].");
4330  case Match_InvalidImm1_32:
4331  return Error(Loc, "immediate must be an integer in range [1, 32].");
4332  case Match_InvalidImm1_64:
4333  return Error(Loc, "immediate must be an integer in range [1, 64].");
4334  case Match_InvalidSVEAddSubImm8:
4335  return Error(Loc, "immediate must be an integer in range [0, 255]"
4336  " with a shift amount of 0");
4337  case Match_InvalidSVEAddSubImm16:
4338  case Match_InvalidSVEAddSubImm32:
4339  case Match_InvalidSVEAddSubImm64:
4340  return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4341  "multiple of 256 in range [256, 65280]");
4342  case Match_InvalidSVECpyImm8:
4343  return Error(Loc, "immediate must be an integer in range [-128, 255]"
4344  " with a shift amount of 0");
4345  case Match_InvalidSVECpyImm16:
4346  return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4347  "multiple of 256 in range [-32768, 65280]");
4348  case Match_InvalidSVECpyImm32:
4349  case Match_InvalidSVECpyImm64:
4350  return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4351  "multiple of 256 in range [-32768, 32512]");
4352  case Match_InvalidIndexRange1_1:
4353  return Error(Loc, "expected lane specifier '[1]'");
4354  case Match_InvalidIndexRange0_15:
4355  return Error(Loc, "vector lane must be an integer in range [0, 15].");
4356  case Match_InvalidIndexRange0_7:
4357  return Error(Loc, "vector lane must be an integer in range [0, 7].");
4358  case Match_InvalidIndexRange0_3:
4359  return Error(Loc, "vector lane must be an integer in range [0, 3].");
4360  case Match_InvalidIndexRange0_1:
4361  return Error(Loc, "vector lane must be an integer in range [0, 1].");
4362  case Match_InvalidSVEIndexRange0_63:
4363  return Error(Loc, "vector lane must be an integer in range [0, 63].");
4364  case Match_InvalidSVEIndexRange0_31:
4365  return Error(Loc, "vector lane must be an integer in range [0, 31].");
4366  case Match_InvalidSVEIndexRange0_15:
4367  return Error(Loc, "vector lane must be an integer in range [0, 15].");
4368  case Match_InvalidSVEIndexRange0_7:
4369  return Error(Loc, "vector lane must be an integer in range [0, 7].");
4370  case Match_InvalidSVEIndexRange0_3:
4371  return Error(Loc, "vector lane must be an integer in range [0, 3].");
4372  case Match_InvalidLabel:
4373  return Error(Loc, "expected label or encodable integer pc offset");
4374  case Match_MRS:
4375  return Error(Loc, "expected readable system register");
4376  case Match_MSR:
4377  return Error(Loc, "expected writable system register or pstate");
4378  case Match_InvalidComplexRotationEven:
4379  return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4380  case Match_InvalidComplexRotationOdd:
4381  return Error(Loc, "complex rotation must be 90 or 270.");
4382  case Match_MnemonicFail: {
4383  std::string Suggestion = AArch64MnemonicSpellCheck(
4384  ((AArch64Operand &)*Operands[0]).getToken(),
4385  ComputeAvailableFeatures(STI->getFeatureBits()));
4386  return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4387  }
4388  case Match_InvalidGPR64shifted8:
4389  return Error(Loc, "register must be x0..x30 or xzr, without shift");
4390  case Match_InvalidGPR64shifted16:
4391  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4392  case Match_InvalidGPR64shifted32:
4393  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4394  case Match_InvalidGPR64shifted64:
4395  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4396  case Match_InvalidGPR64NoXZRshifted8:
4397  return Error(Loc, "register must be x0..x30 without shift");
4398  case Match_InvalidGPR64NoXZRshifted16:
4399  return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4400  case Match_InvalidGPR64NoXZRshifted32:
4401  return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4402  case Match_InvalidGPR64NoXZRshifted64:
4403  return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4404  case Match_InvalidZPR32UXTW8:
4405  case Match_InvalidZPR32SXTW8:
4406  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4407  case Match_InvalidZPR32UXTW16:
4408  case Match_InvalidZPR32SXTW16:
4409  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4410  case Match_InvalidZPR32UXTW32:
4411  case Match_InvalidZPR32SXTW32:
4412  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4413  case Match_InvalidZPR32UXTW64:
4414  case Match_InvalidZPR32SXTW64:
4415  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4416  case Match_InvalidZPR64UXTW8:
4417  case Match_InvalidZPR64SXTW8:
4418  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4419  case Match_InvalidZPR64UXTW16:
4420  case Match_InvalidZPR64SXTW16:
4421  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4422  case Match_InvalidZPR64UXTW32:
4423  case Match_InvalidZPR64SXTW32:
4424  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4425  case Match_InvalidZPR64UXTW64:
4426  case Match_InvalidZPR64SXTW64:
4427  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4428  case Match_InvalidZPR32LSL8:
4429  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4430  case Match_InvalidZPR32LSL16:
4431  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4432  case Match_InvalidZPR32LSL32:
4433  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4434  case Match_InvalidZPR32LSL64:
4435  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4436  case Match_InvalidZPR64LSL8:
4437  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4438  case Match_InvalidZPR64LSL16:
4439  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4440  case Match_InvalidZPR64LSL32:
4441  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4442  case Match_InvalidZPR64LSL64:
4443  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4444  case Match_InvalidZPR0:
4445  return Error(Loc, "expected register without element width sufix");
4446  case Match_InvalidZPR8:
4447  case Match_InvalidZPR16:
4448  case Match_InvalidZPR32:
4449  case Match_InvalidZPR64:
4450  case Match_InvalidZPR128:
4451  return Error(Loc, "invalid element width");
4452  case Match_InvalidZPR_3b8:
4453  return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4454  case Match_InvalidZPR_3b16:
4455  return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4456  case Match_InvalidZPR_3b32:
4457  return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4458  case Match_InvalidZPR_4b16:
4459  return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4460  case Match_InvalidZPR_4b32:
4461  return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4462  case Match_InvalidZPR_4b64:
4463  return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4464  case Match_InvalidSVEPattern:
4465  return Error(Loc, "invalid predicate pattern");
4466  case Match_InvalidSVEPredicateAnyReg:
4467  case Match_InvalidSVEPredicateBReg:
4468  case Match_InvalidSVEPredicateHReg:
4469  case Match_InvalidSVEPredicateSReg:
4470  case Match_InvalidSVEPredicateDReg:
4471  return Error(Loc, "invalid predicate register.");
4472  case Match_InvalidSVEPredicate3bAnyReg:
4473  case Match_InvalidSVEPredicate3bBReg:
4474  case Match_InvalidSVEPredicate3bHReg:
4475  case Match_InvalidSVEPredicate3bSReg:
4476  case Match_InvalidSVEPredicate3bDReg:
4477  return Error(Loc, "restricted predicate has range [0, 7].");
4478  case Match_InvalidSVEExactFPImmOperandHalfOne:
4479  return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4480  case Match_InvalidSVEExactFPImmOperandHalfTwo:
4481  return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4482  case Match_InvalidSVEExactFPImmOperandZeroOne:
4483  return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4484  default:
4485  llvm_unreachable("unexpected error code!");
4486  }
4487 }
4488 
4489 static const char *getSubtargetFeatureName(uint64_t Val);
4490 
4491 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4492  OperandVector &Operands,
4493  MCStreamer &Out,
4494  uint64_t &ErrorInfo,
4495  bool MatchingInlineAsm) {
4496  assert(!Operands.empty() && "Unexpect empty operand list!");
4497  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4498  assert(Op.isToken() && "Leading operand should always be a mnemonic!");
4499 
4500  StringRef Tok = Op.getToken();
4501  unsigned NumOperands = Operands.size();
4502 
4503  if (NumOperands == 4 && Tok == "lsl") {
4504  AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4505  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4506  if (Op2.isScalarReg() && Op3.isImm()) {
4507  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4508  if (Op3CE) {
4509  uint64_t Op3Val = Op3CE->getValue();
4510  uint64_t NewOp3Val = 0;
4511  uint64_t NewOp4Val = 0;
4512  if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4513  Op2.getReg())) {
4514  NewOp3Val = (32 - Op3Val) & 0x1f;
4515  NewOp4Val = 31 - Op3Val;
4516  } else {
4517  NewOp3Val = (64 - Op3Val) & 0x3f;
4518  NewOp4Val = 63 - Op3Val;
4519  }
4520 
4521  const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4522  const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4523 
4524  Operands[0] = AArch64Operand::CreateToken(
4525  "ubfm", false, Op.getStartLoc(), getContext());
4526  Operands.push_back(AArch64Operand::CreateImm(
4527  NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4528  Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4529  Op3.getEndLoc(), getContext());
4530  }
4531  }
4532  } else if (NumOperands == 4 && Tok == "bfc") {
4533  // FIXME: Horrible hack to handle BFC->BFM alias.
4534  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4535  AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4536  AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4537 
4538  if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4539  const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4540  const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4541 
4542  if (LSBCE && WidthCE) {
4543  uint64_t LSB = LSBCE->getValue();
4544  uint64_t Width = WidthCE->getValue();
4545 
4546  uint64_t RegWidth = 0;
4547  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4548  Op1.getReg()))
4549  RegWidth = 64;
4550  else
4551  RegWidth = 32;
4552 
4553  if (LSB >= RegWidth)
4554  return Error(LSBOp.getStartLoc(),
4555  "expected integer in range [0, 31]");
4556  if (Width < 1 || Width > RegWidth)
4557  return Error(WidthOp.getStartLoc(),
4558  "expected integer in range [1, 32]");
4559 
4560  uint64_t ImmR = 0;
4561  if (RegWidth == 32)
4562  ImmR = (32 - LSB) & 0x1f;
4563  else
4564  ImmR = (64 - LSB) & 0x3f;
4565 
4566  uint64_t ImmS = Width - 1;
4567 
4568  if (ImmR != 0 && ImmS >= ImmR)
4569  return Error(WidthOp.getStartLoc(),
4570  "requested insert overflows register");
4571 
4572  const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4573  const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4574  Operands[0] = AArch64Operand::CreateToken(
4575  "bfm", false, Op.getStartLoc(), getContext());
4576  Operands[2] = AArch64Operand::CreateReg(
4577  RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4578  SMLoc(), SMLoc(), getContext());
4579  Operands[3] = AArch64Operand::CreateImm(
4580  ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4581  Operands.emplace_back(
4582  AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4583  WidthOp.getEndLoc(), getContext()));
4584  }
4585  }
4586  } else if (NumOperands == 5) {
4587  // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4588  // UBFIZ -> UBFM aliases.
4589  if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4590  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4591  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4592  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4593 
4594  if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4595  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4596  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4597 
4598  if (Op3CE && Op4CE) {
4599  uint64_t Op3Val = Op3CE->getValue();
4600  uint64_t Op4Val = Op4CE->getValue();
4601 
4602  uint64_t RegWidth = 0;
4603  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4604  Op1.getReg()))
4605  RegWidth = 64;
4606  else
4607  RegWidth = 32;
4608 
4609  if (Op3Val >= RegWidth)
4610  return Error(Op3.getStartLoc(),
4611  "expected integer in range [0, 31]");
4612  if (Op4Val < 1 || Op4Val > RegWidth)
4613  return Error(Op4.getStartLoc(),
4614  "expected integer in range [1, 32]");
4615 
4616  uint64_t NewOp3Val = 0;
4617  if (RegWidth == 32)
4618  NewOp3Val = (32 - Op3Val) & 0x1f;
4619  else
4620  NewOp3Val = (64 - Op3Val) & 0x3f;
4621 
4622  uint64_t NewOp4Val = Op4Val - 1;
4623 
4624  if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4625  return Error(Op4.getStartLoc(),
4626  "requested insert overflows register");
4627 
4628  const MCExpr *NewOp3 =
4629  MCConstantExpr::create(NewOp3Val, getContext());
4630  const MCExpr *NewOp4 =
4631  MCConstantExpr::create(NewOp4Val, getContext());
4632  Operands[3] = AArch64Operand::CreateImm(
4633  NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4634  Operands[4] = AArch64Operand::CreateImm(
4635  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4636  if (Tok == "bfi")
4637  Operands[0] = AArch64Operand::CreateToken(
4638  "bfm", false, Op.getStartLoc(), getContext());
4639  else if (Tok == "sbfiz")
4640  Operands[0] = AArch64Operand::CreateToken(
4641  "sbfm", false, Op.getStartLoc(), getContext());
4642  else if (Tok == "ubfiz")
4643  Operands[0] = AArch64Operand::CreateToken(
4644  "ubfm", false, Op.getStartLoc(), getContext());
4645  else
4646  llvm_unreachable("No valid mnemonic for alias?");
4647  }
4648  }
4649 
4650  // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4651  // UBFX -> UBFM aliases.
4652  } else if (NumOperands == 5 &&
4653  (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4654  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4655  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4656  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4657 
4658  if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4659  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4660  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4661 
4662  if (Op3CE && Op4CE) {
4663  uint64_t Op3Val = Op3CE->getValue();
4664  uint64_t Op4Val = Op4CE->getValue();
4665 
4666  uint64_t RegWidth = 0;
4667  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4668  Op1.getReg()))
4669  RegWidth = 64;
4670  else
4671  RegWidth = 32;
4672 
4673  if (Op3Val >= RegWidth)
4674  return Error(Op3.getStartLoc(),
4675  "expected integer in range [0, 31]");
4676  if (Op4Val < 1 || Op4Val > RegWidth)
4677  return Error(Op4.getStartLoc(),
4678  "expected integer in range [1, 32]");
4679 
4680  uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4681 
4682  if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4683  return Error(Op4.getStartLoc(),
4684  "requested extract overflows register");
4685 
4686  const MCExpr *NewOp4 =
4687  MCConstantExpr::create(NewOp4Val, getContext());
4688  Operands[4] = AArch64Operand::CreateImm(
4689  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4690  if (Tok == "bfxil")
4691  Operands[0] = AArch64Operand::CreateToken(
4692  "bfm", false, Op.getStartLoc(), getContext());
4693  else if (Tok == "sbfx")
4694  Operands[0] = AArch64Operand::CreateToken(
4695  "sbfm", false, Op.getStartLoc(), getContext());
4696  else if (Tok == "ubfx")
4697  Operands[0] = AArch64Operand::CreateToken(
4698  "ubfm", false, Op.getStartLoc(), getContext());
4699  else
4700  llvm_unreachable("No valid mnemonic for alias?");
4701  }
4702  }
4703  }
4704  }
4705 
4706  // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4707  // instruction for FP registers correctly in some rare circumstances. Convert
4708  // it to a safe instruction and warn (because silently changing someone's
4709  // assembly is rude).
4710  if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4711  NumOperands == 4 && Tok == "movi") {
4712  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4713  AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4714  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4715  if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4716  (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4717  StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4718  if (Suffix.lower() == ".2d" &&
4719  cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4720  Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4721  " correctly on this CPU, converting to equivalent movi.16b");
4722  // Switch the suffix to .16b.
4723  unsigned Idx = Op1.isToken() ? 1 : 2;
4724  Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4725  getContext());
4726  }
4727  }
4728  }
4729 
4730  // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4731  // InstAlias can't quite handle this since the reg classes aren't
4732  // subclasses.
4733  if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4734  // The source register can be Wn here, but the matcher expects a
4735  // GPR64. Twiddle it here if necessary.
4736  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4737  if (Op.isScalarReg()) {
4738  unsigned Reg = getXRegFromWReg(Op.getReg());
4739  Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4740  Op.getStartLoc(), Op.getEndLoc(),
4741  getContext());
4742  }
4743  }
4744  // FIXME: Likewise for sxt[bh] with a Xd dst operand
4745  else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4746  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4747  if (Op.isScalarReg() &&
4748  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4749  Op.getReg())) {
4750  // The source register can be Wn here, but the matcher expects a
4751  // GPR64. Twiddle it here if necessary.
4752  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4753  if (Op.isScalarReg()) {
4754  unsigned Reg = getXRegFromWReg(Op.getReg());
4755  Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4756  Op.getStartLoc(),
4757  Op.getEndLoc(), getContext());
4758  }
4759  }
4760  }
4761  // FIXME: Likewise for uxt[bh] with a Xd dst operand
4762  else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4763  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4764  if (Op.isScalarReg() &&
4765  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4766  Op.getReg())) {
4767  // The source register can be Wn here, but the matcher expects a
4768  // GPR32. Twiddle it here if necessary.
4769  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4770  if (Op.isScalarReg()) {
4771  unsigned Reg = getWRegFromXReg(Op.getReg());
4772  Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4773  Op.getStartLoc(),
4774  Op.getEndLoc(), getContext());
4775  }
4776  }
4777  }
4778 
4779  MCInst Inst;
4780  FeatureBitset MissingFeatures;
4781  // First try to match against the secondary set of tables containing the
4782  // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4783  unsigned MatchResult =
4784  MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
4785  MatchingInlineAsm, 1);
4786 
4787  // If that fails, try against the alternate table containing long-form NEON:
4788  // "fadd v0.2s, v1.2s, v2.2s"
4789  if (MatchResult != Match_Success) {
4790  // But first, save the short-form match result: we can use it in case the
4791  // long-form match also fails.
4792  auto ShortFormNEONErrorInfo = ErrorInfo;
4793  auto ShortFormNEONMatchResult = MatchResult;
4794  auto ShortFormNEONMissingFeatures = MissingFeatures;
4795 
4796  MatchResult =
4797  MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
4798  MatchingInlineAsm, 0);
4799 
4800  // Now, both matches failed, and the long-form match failed on the mnemonic
4801  // suffix token operand. The short-form match failure is probably more
4802  // relevant: use it instead.
4803  if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4804  Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4805  ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4806  MatchResult = ShortFormNEONMatchResult;
4807  ErrorInfo = ShortFormNEONErrorInfo;
4808  MissingFeatures = ShortFormNEONMissingFeatures;
4809  }
4810  }
4811 
4812  switch (MatchResult) {
4813  case Match_Success: {
4814  // Perform range checking and other semantic validations
4815  SmallVector<SMLoc, 8> OperandLocs;
4816  NumOperands = Operands.size();
4817  for (unsigned i = 1; i < NumOperands; ++i)
4818  OperandLocs.push_back(Operands[i]->getStartLoc());
4819  if (validateInstruction(Inst, IDLoc, OperandLocs))
4820  return true;
4821 
4822  Inst.setLoc(IDLoc);
4823  Out.EmitInstruction(Inst, getSTI());
4824  return false;
4825  }
4826  case Match_MissingFeature: {
4827  assert(MissingFeatures.any() && "Unknown missing feature!");
4828  // Special case the error message for the very common case where only
4829  // a single subtarget feature is missing (neon, e.g.).
4830  std::string Msg = "instruction requires:";
4831  for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
4832  if (MissingFeatures[i]) {
4833  Msg += " ";
4834  Msg += getSubtargetFeatureName(i);
4835  }
4836  }
4837  return Error(IDLoc, Msg);
4838  }
4839  case Match_MnemonicFail:
4840  return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
4841  case Match_InvalidOperand: {
4842  SMLoc ErrorLoc = IDLoc;
4843 
4844  if (ErrorInfo != ~0ULL) {
4845  if (ErrorInfo >= Operands.size())
4846  return Error(IDLoc, "too few operands for instruction",
4847  SMRange(IDLoc, getTok().getLoc()));
4848 
4849  ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4850  if (ErrorLoc == SMLoc())
4851  ErrorLoc = IDLoc;
4852  }
4853  // If the match failed on a suffix token operand, tweak the diagnostic
4854  // accordingly.
4855  if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4856  ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4857  MatchResult = Match_InvalidSuffix;
4858 
4859  return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4860  }
4861  case Match_InvalidTiedOperand:
4862  case Match_InvalidMemoryIndexed1:
4863  case Match_InvalidMemoryIndexed2:
4864  case Match_InvalidMemoryIndexed4:
4865  case Match_InvalidMemoryIndexed8:
4866  case Match_InvalidMemoryIndexed16:
4867  case Match_InvalidCondCode:
4868  case Match_AddSubRegExtendSmall:
4869  case Match_AddSubRegExtendLarge:
4870  case Match_AddSubSecondSource:
4871  case Match_LogicalSecondSource:
4872  case Match_AddSubRegShift32:
4873  case Match_AddSubRegShift64:
4874  case Match_InvalidMovImm32Shift:
4875  case Match_InvalidMovImm64Shift:
4876  case Match_InvalidFPImm:
4877  case Match_InvalidMemoryWExtend8:
4878  case Match_InvalidMemoryWExtend16:
4879  case Match_InvalidMemoryWExtend32:
4880  case Match_InvalidMemoryWExtend64:
4881  case Match_InvalidMemoryWExtend128:
4882  case Match_InvalidMemoryXExtend8:
4883  case Match_InvalidMemoryXExtend16:
4884  case Match_InvalidMemoryXExtend32:
4885  case Match_InvalidMemoryXExtend64:
4886  case Match_InvalidMemoryXExtend128:
4887  case Match_InvalidMemoryIndexed1SImm4:
4888  case Match_InvalidMemoryIndexed2SImm4:
4889  case Match_InvalidMemoryIndexed3SImm4:
4890  case Match_InvalidMemoryIndexed4SImm4:
4891  case Match_InvalidMemoryIndexed1SImm6:
4892  case Match_InvalidMemoryIndexed16SImm4:
4893  case Match_InvalidMemoryIndexed4SImm7:
4894  case Match_InvalidMemoryIndexed8SImm7:
4895  case Match_InvalidMemoryIndexed16SImm7:
4896  case Match_InvalidMemoryIndexed8UImm5:
4897  case Match_InvalidMemoryIndexed4UImm5:
4898  case Match_InvalidMemoryIndexed2UImm5:
4899  case Match_InvalidMemoryIndexed1UImm6:
4900  case Match_InvalidMemoryIndexed2UImm6:
4901  case Match_InvalidMemoryIndexed4UImm6:
4902  case Match_InvalidMemoryIndexed8UImm6:
4903  case Match_InvalidMemoryIndexed16UImm6:
4904  case Match_InvalidMemoryIndexedSImm6:
4905  case Match_InvalidMemoryIndexedSImm5:
4906  case Match_InvalidMemoryIndexedSImm8:
4907  case Match_InvalidMemoryIndexedSImm9:
4908  case Match_InvalidMemoryIndexed16SImm9:
4909  case Match_InvalidMemoryIndexed8SImm10:
4910  case Match_InvalidImm0_1:
4911  case Match_InvalidImm0_7:
4912  case Match_InvalidImm0_15:
4913  case Match_InvalidImm0_31:
4914  case Match_InvalidImm0_63:
4915  case Match_InvalidImm0_127:
4916  case Match_InvalidImm0_255:
4917  case Match_InvalidImm0_65535:
4918  case Match_InvalidImm1_8:
4919  case Match_InvalidImm1_16:
4920  case Match_InvalidImm1_32:
4921  case Match_InvalidImm1_64:
4922  case Match_InvalidSVEAddSubImm8:
4923  case Match_InvalidSVEAddSubImm16:
4924  case Match_InvalidSVEAddSubImm32:
4925  case Match_InvalidSVEAddSubImm64:
4926  case Match_InvalidSVECpyImm8:
4927  case Match_InvalidSVECpyImm16:
4928  case Match_InvalidSVECpyImm32:
4929  case Match_InvalidSVECpyImm64:
4930  case Match_InvalidIndexRange1_1:
4931  case Match_InvalidIndexRange0_15:
4932  case Match_InvalidIndexRange0_7:
4933  case Match_InvalidIndexRange0_3:
4934  case Match_InvalidIndexRange0_1:
4935  case Match_InvalidSVEIndexRange0_63:
4936  case Match_InvalidSVEIndexRange0_31:
4937  case Match_InvalidSVEIndexRange0_15:
4938  case Match_InvalidSVEIndexRange0_7:
4939  case Match_InvalidSVEIndexRange0_3:
4940  case Match_InvalidLabel:
4941  case Match_InvalidComplexRotationEven:
4942  case Match_InvalidComplexRotationOdd:
4943  case Match_InvalidGPR64shifted8:
4944  case Match_InvalidGPR64shifted16:
4945  case Match_InvalidGPR64shifted32:
4946  case Match_InvalidGPR64shifted64:
4947  case Match_InvalidGPR64NoXZRshifted8:
4948  case Match_InvalidGPR64NoXZRshifted16:
4949  case Match_InvalidGPR64NoXZRshifted32:
4950  case Match_InvalidGPR64NoXZRshifted64:
4951  case Match_InvalidZPR32UXTW8:
4952  case Match_InvalidZPR32UXTW16:
4953  case Match_InvalidZPR32UXTW32:
4954  case Match_InvalidZPR32UXTW64:
4955  case Match_InvalidZPR32SXTW8:
4956  case Match_InvalidZPR32SXTW16:
4957  case Match_InvalidZPR32SXTW32:
4958  case Match_InvalidZPR32SXTW64:
4959  case Match_InvalidZPR64UXTW8:
4960  case Match_InvalidZPR64SXTW8:
4961  case Match_InvalidZPR64UXTW16:
4962  case Match_InvalidZPR64SXTW16:
4963  case Match_InvalidZPR64UXTW32:
4964  case Match_InvalidZPR64SXTW32:
4965  case Match_InvalidZPR64UXTW64:
4966  case Match_InvalidZPR64SXTW64:
4967  case Match_InvalidZPR32LSL8:
4968  case Match_InvalidZPR32LSL16:
4969  case Match_InvalidZPR32LSL32:
4970  case Match_InvalidZPR32LSL64:
4971  case Match_InvalidZPR64LSL8:
4972  case Match_InvalidZPR64LSL16:
4973  case Match_InvalidZPR64LSL32:
4974  case Match_InvalidZPR64LSL64:
4975  case Match_InvalidZPR0:
4976  case Match_InvalidZPR8:
4977  case Match_InvalidZPR16:
4978  case Match_InvalidZPR32:
4979  case Match_InvalidZPR64:
4980  case Match_InvalidZPR128:
4981  case Match_InvalidZPR_3b8:
4982  case Match_InvalidZPR_3b16:
4983  case Match_InvalidZPR_3b32:
4984  case Match_InvalidZPR_4b16:
4985  case Match_InvalidZPR_4b32:
4986  case Match_InvalidZPR_4b64:
4987  case Match_InvalidSVEPredicateAnyReg:
4988  case Match_InvalidSVEPattern:
4989  case Match_InvalidSVEPredicateBReg:
4990  case Match_InvalidSVEPredicateHReg:
4991  case Match_InvalidSVEPredicateSReg:
4992  case Match_InvalidSVEPredicateDReg:
4993  case Match_InvalidSVEPredicate3bAnyReg:
4994  case Match_InvalidSVEPredicate3bBReg:
4995  case Match_InvalidSVEPredicate3bHReg:
4996  case Match_InvalidSVEPredicate3bSReg:
4997  case Match_InvalidSVEPredicate3bDReg:
4998  case Match_InvalidSVEExactFPImmOperandHalfOne:
4999  case Match_InvalidSVEExactFPImmOperandHalfTwo:
5000  case Match_InvalidSVEExactFPImmOperandZeroOne:
5001  case Match_MSR:
5002  case Match_MRS: {
5003  if (ErrorInfo >= Operands.size())
5004  return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
5005  // Any time we get here, there's nothing fancy to do. Just get the
5006  // operand SMLoc and display the diagnostic.
5007  SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5008  if (ErrorLoc == SMLoc())
5009  ErrorLoc = IDLoc;
5010  return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5011  }
5012  }
5013 
5014  llvm_unreachable("Implement any new match types added!");
5015 }
5016 
5017 /// ParseDirective parses the arm specific directives
5018 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5020  getContext().getObjectFileInfo()->getObjectFileType();
5021  bool IsMachO = Format == MCObjectFileInfo::IsMachO;
5022 
5023  StringRef IDVal = DirectiveID.getIdentifier();
5024  SMLoc Loc = DirectiveID.getLoc();
5025  if (IDVal == ".arch")
5026  parseDirectiveArch(Loc);
5027  else if (IDVal == ".cpu")
5028  parseDirectiveCPU(Loc);
5029  else if (IDVal == ".tlsdesccall")
5030  parseDirectiveTLSDescCall(Loc);
5031  else if (IDVal == ".ltorg" || IDVal == ".pool")
5032  parseDirectiveLtorg(Loc);
5033  else if (IDVal == ".unreq")
5034  parseDirectiveUnreq(Loc);
5035  else if (IDVal == ".inst")
5036  parseDirectiveInst(Loc);
5037  else if (IDVal == ".cfi_negate_ra_state")
5038  parseDirectiveCFINegateRAState();
5039  else if (IDVal == ".cfi_b_key_frame")
5040  parseDirectiveCFIBKeyFrame();
5041  else if (IDVal == ".arch_extension")
5042  parseDirectiveArchExtension(Loc);
5043  else if (IsMachO) {
5044  if (IDVal == MCLOHDirectiveName())
5045  parseDirectiveLOH(IDVal, Loc);
5046  else
5047  return true;
5048  } else
5049  return true;
5050  return false;
5051 }
5052 
5054  SmallVector<StringRef, 4> &RequestedExtensions) {
5055  const bool NoCrypto =
5056  (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5057  "nocrypto") != std::end(RequestedExtensions));
5058  const bool Crypto =
5059  (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5060  "crypto") != std::end(RequestedExtensions));
5061 
5062  if (!NoCrypto && Crypto) {
5063  switch (ArchKind) {
5064  default:
5065  // Map 'generic' (and others) to sha2 and aes, because
5066  // that was the traditional meaning of crypto.
5067  case AArch64::ArchKind::ARMV8_1A:
5068  case AArch64::ArchKind::ARMV8_2A:
5069  case AArch64::ArchKind::ARMV8_3A:
5070  RequestedExtensions.push_back("sha2");
5071  RequestedExtensions.push_back("aes");
5072  break;
5073  case AArch64::ArchKind::ARMV8_4A:
5074  case AArch64::ArchKind::ARMV8_5A:
5075  RequestedExtensions.push_back("sm4");
5076  RequestedExtensions.push_back("sha3");
5077  RequestedExtensions.push_back("sha2");
5078  RequestedExtensions.push_back("aes");
5079  break;
5080  }
5081  } else if (NoCrypto) {
5082  switch (ArchKind) {
5083  default:
5084  // Map 'generic' (and others) to sha2 and aes, because
5085  // that was the traditional meaning of crypto.
5086  case AArch64::ArchKind::ARMV8_1A:
5087  case AArch64::ArchKind::ARMV8_2A:
5088  case AArch64::ArchKind::ARMV8_3A:
5089  RequestedExtensions.push_back("nosha2");
5090  RequestedExtensions.push_back("noaes");
5091  break;
5092  case AArch64::ArchKind::ARMV8_4A:
5093  case AArch64::ArchKind::ARMV8_5A:
5094  RequestedExtensions.push_back("nosm4");
5095  RequestedExtensions.push_back("nosha3");
5096  RequestedExtensions.push_back("nosha2");
5097  RequestedExtensions.push_back("noaes");
5098  break;
5099  }
5100  }
5101 }
5102 
5103 /// parseDirectiveArch
5104 /// ::= .arch token
5105 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
5106  SMLoc ArchLoc = getLoc();
5107 
5108  StringRef Arch, ExtensionString;
5109  std::tie(Arch, ExtensionString) =
5110  getParser().parseStringToEndOfStatement().trim().split('+');
5111 
5113  if (ID == AArch64::ArchKind::INVALID)
5114  return Error(ArchLoc, "unknown arch name");
5115 
5116  if (parseToken(AsmToken::EndOfStatement))
5117  return true;
5118 
5119  // Get the architecture and extension features.
5120  std::vector<StringRef> AArch64Features;
5121  AArch64::getArchFeatures(ID, AArch64Features);
5123  AArch64Features);
5124 
5125  MCSubtargetInfo &STI = copySTI();
5126  std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
5127  STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
5128 
5129  SmallVector<StringRef, 4> RequestedExtensions;
5130  if (!ExtensionString.empty())
5131  ExtensionString.split(RequestedExtensions, '+');
5132 
5133  ExpandCryptoAEK(ID, RequestedExtensions);
5134 
5135  FeatureBitset Features = STI.getFeatureBits();
5136  for (auto Name : RequestedExtensions) {
5137  bool EnableFeature = true;
5138 
5139  if (Name.startswith_lower("no")) {
5140  EnableFeature = false;
5141  Name = Name.substr(2);
5142  }
5143 
5144  for (const auto &Extension : ExtensionMap) {
5145  if (Extension.Name != Name)
5146  continue;
5147 
5148  if (Extension.Features.none())
5149  report_fatal_error("unsupported architectural extension: " + Name);
5150 
5151  FeatureBitset ToggleFeatures = EnableFeature
5152  ? (~Features & Extension.Features)
5153  : ( Features & Extension.Features);
5154  FeatureBitset Features =
5155  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5156  setAvailableFeatures(Features);
5157  break;
5158  }
5159  }
5160  return false;
5161 }
5162 
5163 /// parseDirectiveArchExtension
5164 /// ::= .arch_extension [no]feature
5165 bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
5166  MCAsmParser &Parser = getParser();
5167 
5168  if (getLexer().isNot(AsmToken::Identifier))
5169  return Error(getLexer().getLoc(), "expected architecture extension name");
5170 
5171  const AsmToken &Tok = Parser.getTok();
5172  StringRef Name = Tok.getString();
5173  SMLoc ExtLoc = Tok.getLoc();
5174  Lex();
5175 
5176  if (parseToken(AsmToken::EndOfStatement,
5177  "unexpected token in '.arch_extension' directive"))
5178  return true;
5179 
5180  bool EnableFeature = true;
5181  if (Name.startswith_lower("no")) {
5182  EnableFeature = false;
5183  Name = Name.substr(2);
5184  }
5185 
5186  MCSubtargetInfo &STI = copySTI();
5187  FeatureBitset Features = STI.getFeatureBits();
5188  for (const auto &Extension : ExtensionMap) {
5189  if (Extension.Name != Name)
5190  continue;
5191 
5192  if (Extension.Features.none())
5193  return Error(ExtLoc, "unsupported architectural extension: " + Name);
5194 
5195  FeatureBitset ToggleFeatures = EnableFeature
5196  ? (~Features & Extension.Features)
5197  : (Features & Extension.Features);
5198  FeatureBitset Features =
5199  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5200  setAvailableFeatures(Features);
5201  return false;
5202  }
5203 
5204  return Error(ExtLoc, "unknown architectural extension: " + Name);
5205 }
5206 
5207 static SMLoc incrementLoc(SMLoc L, int Offset) {
5208  return SMLoc::getFromPointer(L.getPointer() + Offset);
5209 }
5210 
5211 /// parseDirectiveCPU
5212 /// ::= .cpu id
5213 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
5214  SMLoc CurLoc = getLoc();
5215 
5216  StringRef CPU, ExtensionString;
5217  std::tie(CPU, ExtensionString) =
5218  getParser().parseStringToEndOfStatement().trim().split('+');
5219 
5220  if (parseToken(AsmToken::EndOfStatement))
5221  return true;
5222 
5223  SmallVector<StringRef, 4> RequestedExtensions;
5224  if (!ExtensionString.empty())
5225  ExtensionString.split(RequestedExtensions, '+');
5226 
5227  // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5228  // once that is tablegen'ed
5229  if (!getSTI().isCPUStringValid(CPU)) {
5230  Error(CurLoc, "unknown CPU name");
5231  return false;
5232  }
5233 
5234  MCSubtargetInfo &STI = copySTI();
5235  STI.setDefaultFeatures(CPU, "");
5236  CurLoc = incrementLoc(CurLoc, CPU.size());
5237 
5238  ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
5239 
5240  FeatureBitset Features = STI.getFeatureBits();
5241  for (auto Name : RequestedExtensions) {
5242  // Advance source location past '+'.
5243  CurLoc = incrementLoc(CurLoc, 1);
5244 
5245  bool EnableFeature = true;
5246 
5247  if (Name.startswith_lower("no")) {
5248  EnableFeature = false;
5249  Name = Name.substr(2);
5250  }
5251 
5252  bool FoundExtension = false;
5253  for (const auto &Extension : ExtensionMap) {
5254  if (Extension.Name != Name)
5255  continue;
5256 
5257  if (Extension.Features.none())
5258  report_fatal_error("unsupported architectural extension: " + Name);
5259 
5260  FeatureBitset ToggleFeatures = EnableFeature
5261  ? (~Features & Extension.Features)
5262  : ( Features & Extension.Features);
5263  FeatureBitset Features =
5264  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5265  setAvailableFeatures(Features);
5266  FoundExtension = true;
5267 
5268  break;
5269  }
5270 
5271  if (!FoundExtension)
5272  Error(CurLoc, "unsupported architectural extension");
5273 
5274  CurLoc = incrementLoc(CurLoc, Name.size());
5275  }
5276  return false;
5277 }
5278 
5279 /// parseDirectiveInst
5280 /// ::= .inst opcode [, ...]
5281 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
5282  if (getLexer().is(AsmToken::EndOfStatement))
5283  return Error(Loc, "expected expression following '.inst' directive");
5284 
5285  auto parseOp = [&]() -> bool {
5286  SMLoc L = getLoc();
5287  const MCExpr *Expr;
5288  if (check(getParser().parseExpression(Expr), L, "expected expression"))
5289  return true;
5290  const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5291  if (check(!Value, L, "expected constant expression"))
5292  return true;
5293  getTargetStreamer().emitInst(Value->getValue());
5294  return false;
5295  };
5296 
5297  if (parseMany(parseOp))
5298  return addErrorSuffix(" in '.inst' directive");
5299  return false;
5300 }
5301 
5302 // parseDirectiveTLSDescCall:
5303 // ::= .tlsdesccall symbol
5304 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
5305  StringRef Name;
5306  if (check(getParser().parseIdentifier(Name), L,
5307  "expected symbol after directive") ||
5308  parseToken(AsmToken::EndOfStatement))
5309  return true;
5310 
5311  MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5312  const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
5313  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
5314 
5315  MCInst Inst;
5316  Inst.setOpcode(AArch64::TLSDESCCALL);
5317  Inst.addOperand(MCOperand::createExpr(Expr));
5318 
5319  getParser().getStreamer().EmitInstruction(Inst, getSTI());
5320  return false;
5321 }
5322 
5323 /// ::= .loh <lohName | lohId> label1, ..., labelN
5324 /// The number of arguments depends on the loh identifier.
5325 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
5326  MCLOHType Kind;
5327  if (getParser().getTok().isNot(AsmToken::Identifier)) {
5328  if (getParser().getTok().isNot(AsmToken::Integer))
5329  return TokError("expected an identifier or a number in directive");
5330  // We successfully get a numeric value for the identifier.
5331  // Check if it is valid.
5332  int64_t Id = getParser().getTok().getIntVal();
5333  if (Id <= -1U && !isValidMCLOHType(Id))
5334  return TokError("invalid numeric identifier in directive");
5335  Kind = (MCLOHType)Id;
5336  } else {
5337  StringRef Name = getTok().getIdentifier();
5338  // We successfully parse an identifier.
5339  // Check if it is a recognized one.
5340  int Id = MCLOHNameToId(Name);
5341 
5342  if (Id == -1)
5343  return TokError("invalid identifier in directive");
5344  Kind = (MCLOHType)Id;
5345  }
5346  // Consume the identifier.
5347  Lex();
5348  // Get the number of arguments of this LOH.
5349  int NbArgs = MCLOHIdToNbArgs(Kind);
5350 
5351  assert(NbArgs != -1 && "Invalid number of arguments");
5352 
5354  for (int Idx = 0; Idx < NbArgs; ++Idx) {
5355  StringRef Name;
5356  if (getParser().parseIdentifier(Name))
5357  return TokError("expected identifier in directive");
5358  Args.push_back(getContext().getOrCreateSymbol(Name));
5359 
5360  if (Idx + 1 == NbArgs)
5361  break;
5362  if (parseToken(AsmToken::Comma,
5363  "unexpected token in '" + Twine(IDVal) + "' directive"))
5364  return true;
5365  }
5366  if (parseToken(AsmToken::EndOfStatement,
5367  "unexpected token in '" + Twine(IDVal) + "' directive"))
5368  return true;
5369 
5370  getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
5371  return false;
5372 }
5373 
5374 /// parseDirectiveLtorg
5375 /// ::= .ltorg | .pool
5376 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5377  if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5378  return true;
5379  getTargetStreamer().emitCurrentConstantPool();
5380  return false;
5381 }
5382 
5383 /// parseDirectiveReq
5384 /// ::= name .req registername
5385 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5386  MCAsmParser &Parser = getParser();
5387  Parser.Lex(); // Eat the '.req' token.
5388  SMLoc SRegLoc = getLoc();
5390  unsigned RegNum;
5391  OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5392 
5393  if (ParseRes != MatchOperand_Success) {
5394  StringRef Kind;
5395  RegisterKind = RegKind::NeonVector;
5396  ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5397 
5398  if (ParseRes == MatchOperand_ParseFail)
5399  return true;
5400 
5401  if (ParseRes == MatchOperand_Success && !Kind.empty())
5402  return Error(SRegLoc, "vector register without type specifier expected");
5403  }
5404 
5405  if (ParseRes != MatchOperand_Success) {
5406  StringRef Kind;
5407  RegisterKind = RegKind::SVEDataVector;
5408  ParseRes =
5409  tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5410 
5411  if (ParseRes == MatchOperand_ParseFail)
5412  return true;
5413 
5414  if (ParseRes == MatchOperand_Success && !Kind.empty())
5415  return Error(SRegLoc,
5416  "sve vector register without type specifier expected");
5417  }
5418 
5419  if (ParseRes != MatchOperand_Success) {
5420