LLVM  9.0.0svn
AArch64AsmParser.cpp
Go to the documentation of this file.
1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
14 #include "AArch64InstrInfo.h"
15 #include "Utils/AArch64BaseInfo.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/StringMap.h"
23 #include "llvm/ADT/StringRef.h"
24 #include "llvm/ADT/StringSwitch.h"
25 #include "llvm/ADT/Twine.h"
26 #include "llvm/MC/MCContext.h"
27 #include "llvm/MC/MCExpr.h"
28 #include "llvm/MC/MCInst.h"
36 #include "llvm/MC/MCRegisterInfo.h"
37 #include "llvm/MC/MCStreamer.h"
39 #include "llvm/MC/MCSymbol.h"
42 #include "llvm/MC/MCValue.h"
43 #include "llvm/Support/Casting.h"
44 #include "llvm/Support/Compiler.h"
47 #include "llvm/Support/SMLoc.h"
51 #include <cassert>
52 #include <cctype>
53 #include <cstdint>
54 #include <cstdio>
55 #include <string>
56 #include <tuple>
57 #include <utility>
58 #include <vector>
59 
60 using namespace llvm;
61 
62 namespace {
63 
64 enum class RegKind {
65  Scalar,
66  NeonVector,
67  SVEDataVector,
68  SVEPredicateVector
69 };
70 
72  EqualsReg,
73  EqualsSuperReg,
74  EqualsSubReg
75 };
76 
77 class AArch64AsmParser : public MCTargetAsmParser {
78 private:
79  StringRef Mnemonic; ///< Instruction mnemonic.
80 
81  // Map of register aliases registers via the .req directive.
83 
84  class PrefixInfo {
85  public:
86  static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
87  PrefixInfo Prefix;
88  switch (Inst.getOpcode()) {
89  case AArch64::MOVPRFX_ZZ:
90  Prefix.Active = true;
91  Prefix.Dst = Inst.getOperand(0).getReg();
92  break;
93  case AArch64::MOVPRFX_ZPmZ_B:
94  case AArch64::MOVPRFX_ZPmZ_H:
95  case AArch64::MOVPRFX_ZPmZ_S:
96  case AArch64::MOVPRFX_ZPmZ_D:
97  Prefix.Active = true;
98  Prefix.Predicated = true;
99  Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
100  assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
101  "No destructive element size set for movprfx");
102  Prefix.Dst = Inst.getOperand(0).getReg();
103  Prefix.Pg = Inst.getOperand(2).getReg();
104  break;
105  case AArch64::MOVPRFX_ZPzZ_B:
106  case AArch64::MOVPRFX_ZPzZ_H:
107  case AArch64::MOVPRFX_ZPzZ_S:
108  case AArch64::MOVPRFX_ZPzZ_D:
109  Prefix.Active = true;
110  Prefix.Predicated = true;
111  Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
112  assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
113  "No destructive element size set for movprfx");
114  Prefix.Dst = Inst.getOperand(0).getReg();
115  Prefix.Pg = Inst.getOperand(1).getReg();
116  break;
117  default:
118  break;
119  }
120 
121  return Prefix;
122  }
123 
124  PrefixInfo() : Active(false), Predicated(false) {}
125  bool isActive() const { return Active; }
126  bool isPredicated() const { return Predicated; }
127  unsigned getElementSize() const {
128  assert(Predicated);
129  return ElementSize;
130  }
131  unsigned getDstReg() const { return Dst; }
132  unsigned getPgReg() const {
133  assert(Predicated);
134  return Pg;
135  }
136 
137  private:
138  bool Active;
139  bool Predicated;
140  unsigned ElementSize;
141  unsigned Dst;
142  unsigned Pg;
143  } NextPrefix;
144 
145  AArch64TargetStreamer &getTargetStreamer() {
146  MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
147  return static_cast<AArch64TargetStreamer &>(TS);
148  }
149 
150  SMLoc getLoc() const { return getParser().getTok().getLoc(); }
151 
152  bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
153  void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
154  AArch64CC::CondCode parseCondCodeString(StringRef Cond);
155  bool parseCondCode(OperandVector &Operands, bool invertCondCode);
156  unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
157  bool parseRegister(OperandVector &Operands);
158  bool parseSymbolicImmVal(const MCExpr *&ImmVal);
159  bool parseNeonVectorList(OperandVector &Operands);
160  bool parseOptionalMulOperand(OperandVector &Operands);
161  bool parseOperand(OperandVector &Operands, bool isCondCode,
162  bool invertCondCode);
163 
164  bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
165  OperandVector &Operands);
166 
167  bool parseDirectiveArch(SMLoc L);
168  bool parseDirectiveArchExtension(SMLoc L);
169  bool parseDirectiveCPU(SMLoc L);
170  bool parseDirectiveInst(SMLoc L);
171 
172  bool parseDirectiveTLSDescCall(SMLoc L);
173 
174  bool parseDirectiveLOH(StringRef LOH, SMLoc L);
175  bool parseDirectiveLtorg(SMLoc L);
176 
177  bool parseDirectiveReq(StringRef Name, SMLoc L);
178  bool parseDirectiveUnreq(SMLoc L);
179  bool parseDirectiveCFINegateRAState();
180  bool parseDirectiveCFIBKeyFrame();
181 
182  bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
184  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
185  OperandVector &Operands, MCStreamer &Out,
186  uint64_t &ErrorInfo,
187  bool MatchingInlineAsm) override;
188 /// @name Auto-generated Match Functions
189 /// {
190 
191 #define GET_ASSEMBLER_HEADER
192 #include "AArch64GenAsmMatcher.inc"
193 
194  /// }
195 
196  OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
197  OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
198  RegKind MatchKind);
199  OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
200  OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
201  OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
202  OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
203  OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
204  template <bool IsSVEPrefetch = false>
205  OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
206  OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
207  OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
208  OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
209  OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
210  template<bool AddFPZeroAsLiteral>
211  OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
212  OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
213  OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
214  bool tryParseNeonVectorRegister(OperandVector &Operands);
215  OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
216  OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
217  template <bool ParseShiftExtend,
218  RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
219  OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
220  template <bool ParseShiftExtend, bool ParseSuffix>
221  OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
222  OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
223  template <RegKind VectorKind>
224  OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
225  bool ExpectMatch = false);
226  OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
227 
228 public:
229  enum AArch64MatchResultTy {
230  Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
231 #define GET_OPERAND_DIAGNOSTIC_TYPES
232 #include "AArch64GenAsmMatcher.inc"
233  };
234  bool IsILP32;
235 
236  AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
237  const MCInstrInfo &MII, const MCTargetOptions &Options)
238  : MCTargetAsmParser(Options, STI, MII) {
239  IsILP32 = Options.getABIName() == "ilp32";
241  MCStreamer &S = getParser().getStreamer();
242  if (S.getTargetStreamer() == nullptr)
243  new AArch64TargetStreamer(S);
244 
245  // Alias .hword/.word/.[dx]word to the target-independent
246  // .2byte/.4byte/.8byte directives as they have the same form and
247  // semantics:
248  /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
249  Parser.addAliasForDirective(".hword", ".2byte");
250  Parser.addAliasForDirective(".word", ".4byte");
251  Parser.addAliasForDirective(".dword", ".8byte");
252  Parser.addAliasForDirective(".xword", ".8byte");
253 
254  // Initialize the set of available features.
255  setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
256  }
257 
258  bool regsEqual(const MCParsedAsmOperand &Op1,
259  const MCParsedAsmOperand &Op2) const override;
260  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
261  SMLoc NameLoc, OperandVector &Operands) override;
262  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
263  bool ParseDirective(AsmToken DirectiveID) override;
264  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
265  unsigned Kind) override;
266 
267  static bool classifySymbolRef(const MCExpr *Expr,
268  AArch64MCExpr::VariantKind &ELFRefKind,
269  MCSymbolRefExpr::VariantKind &DarwinRefKind,
270  int64_t &Addend);
271 };
272 
273 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
274 /// instruction.
275 class AArch64Operand : public MCParsedAsmOperand {
276 private:
277  enum KindTy {
278  k_Immediate,
279  k_ShiftedImm,
280  k_CondCode,
281  k_Register,
282  k_VectorList,
283  k_VectorIndex,
284  k_Token,
285  k_SysReg,
286  k_SysCR,
287  k_Prefetch,
288  k_ShiftExtend,
289  k_FPImm,
290  k_Barrier,
291  k_PSBHint,
292  k_BTIHint,
293  } Kind;
294 
295  SMLoc StartLoc, EndLoc;
296 
297  struct TokOp {
298  const char *Data;
299  unsigned Length;
300  bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
301  };
302 
303  // Separate shift/extend operand.
304  struct ShiftExtendOp {
306  unsigned Amount;
307  bool HasExplicitAmount;
308  };
309 
310  struct RegOp {
311  unsigned RegNum;
312  RegKind Kind;
313  int ElementWidth;
314 
315  // The register may be allowed as a different register class,
316  // e.g. for GPR64as32 or GPR32as64.
317  RegConstraintEqualityTy EqualityTy;
318 
319  // In some cases the shift/extend needs to be explicitly parsed together
320  // with the register, rather than as a separate operand. This is needed
321  // for addressing modes where the instruction as a whole dictates the
322  // scaling/extend, rather than specific bits in the instruction.
323  // By parsing them as a single operand, we avoid the need to pass an
324  // extra operand in all CodeGen patterns (because all operands need to
325  // have an associated value), and we avoid the need to update TableGen to
326  // accept operands that have no associated bits in the instruction.
327  //
328  // An added benefit of parsing them together is that the assembler
329  // can give a sensible diagnostic if the scaling is not correct.
330  //
331  // The default is 'lsl #0' (HasExplicitAmount = false) if no
332  // ShiftExtend is specified.
333  ShiftExtendOp ShiftExtend;
334  };
335 
336  struct VectorListOp {
337  unsigned RegNum;
338  unsigned Count;
339  unsigned NumElements;
340  unsigned ElementWidth;
342  };
343 
344  struct VectorIndexOp {
345  unsigned Val;
346  };
347 
348  struct ImmOp {
349  const MCExpr *Val;
350  };
351 
352  struct ShiftedImmOp {
353  const MCExpr *Val;
354  unsigned ShiftAmount;
355  };
356 
357  struct CondCodeOp {
358  AArch64CC::CondCode Code;
359  };
360 
361  struct FPImmOp {
362  uint64_t Val; // APFloat value bitcasted to uint64_t.
363  bool IsExact; // describes whether parsed value was exact.
364  };
365 
366  struct BarrierOp {
367  const char *Data;
368  unsigned Length;
369  unsigned Val; // Not the enum since not all values have names.
370  };
371 
372  struct SysRegOp {
373  const char *Data;
374  unsigned Length;
375  uint32_t MRSReg;
376  uint32_t MSRReg;
377  uint32_t PStateField;
378  };
379 
380  struct SysCRImmOp {
381  unsigned Val;
382  };
383 
384  struct PrefetchOp {
385  const char *Data;
386  unsigned Length;
387  unsigned Val;
388  };
389 
390  struct PSBHintOp {
391  const char *Data;
392  unsigned Length;
393  unsigned Val;
394  };
395 
396  struct BTIHintOp {
397  const char *Data;
398  unsigned Length;
399  unsigned Val;
400  };
401 
402  struct ExtendOp {
403  unsigned Val;
404  };
405 
406  union {
407  struct TokOp Tok;
408  struct RegOp Reg;
409  struct VectorListOp VectorList;
410  struct VectorIndexOp VectorIndex;
411  struct ImmOp Imm;
412  struct ShiftedImmOp ShiftedImm;
413  struct CondCodeOp CondCode;
414  struct FPImmOp FPImm;
415  struct BarrierOp Barrier;
416  struct SysRegOp SysReg;
417  struct SysCRImmOp SysCRImm;
418  struct PrefetchOp Prefetch;
419  struct PSBHintOp PSBHint;
420  struct BTIHintOp BTIHint;
421  struct ShiftExtendOp ShiftExtend;
422  };
423 
424  // Keep the MCContext around as the MCExprs may need manipulated during
425  // the add<>Operands() calls.
426  MCContext &Ctx;
427 
428 public:
429  AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
430 
431  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
432  Kind = o.Kind;
433  StartLoc = o.StartLoc;
434  EndLoc = o.EndLoc;
435  switch (Kind) {
436  case k_Token:
437  Tok = o.Tok;
438  break;
439  case k_Immediate:
440  Imm = o.Imm;
441  break;
442  case k_ShiftedImm:
443  ShiftedImm = o.ShiftedImm;
444  break;
445  case k_CondCode:
446  CondCode = o.CondCode;
447  break;
448  case k_FPImm:
449  FPImm = o.FPImm;
450  break;
451  case k_Barrier:
452  Barrier = o.Barrier;
453  break;
454  case k_Register:
455  Reg = o.Reg;
456  break;
457  case k_VectorList:
458  VectorList = o.VectorList;
459  break;
460  case k_VectorIndex:
461  VectorIndex = o.VectorIndex;
462  break;
463  case k_SysReg:
464  SysReg = o.SysReg;
465  break;
466  case k_SysCR:
467  SysCRImm = o.SysCRImm;
468  break;
469  case k_Prefetch:
470  Prefetch = o.Prefetch;
471  break;
472  case k_PSBHint:
473  PSBHint = o.PSBHint;
474  break;
475  case k_BTIHint:
476  BTIHint = o.BTIHint;
477  break;
478  case k_ShiftExtend:
479  ShiftExtend = o.ShiftExtend;
480  break;
481  }
482  }
483 
484  /// getStartLoc - Get the location of the first token of this operand.
485  SMLoc getStartLoc() const override { return StartLoc; }
486  /// getEndLoc - Get the location of the last token of this operand.
487  SMLoc getEndLoc() const override { return EndLoc; }
488 
489  StringRef getToken() const {
490  assert(Kind == k_Token && "Invalid access!");
491  return StringRef(Tok.Data, Tok.Length);
492  }
493 
494  bool isTokenSuffix() const {
495  assert(Kind == k_Token && "Invalid access!");
496  return Tok.IsSuffix;
497  }
498 
499  const MCExpr *getImm() const {
500  assert(Kind == k_Immediate && "Invalid access!");
501  return Imm.Val;
502  }
503 
504  const MCExpr *getShiftedImmVal() const {
505  assert(Kind == k_ShiftedImm && "Invalid access!");
506  return ShiftedImm.Val;
507  }
508 
509  unsigned getShiftedImmShift() const {
510  assert(Kind == k_ShiftedImm && "Invalid access!");
511  return ShiftedImm.ShiftAmount;
512  }
513 
515  assert(Kind == k_CondCode && "Invalid access!");
516  return CondCode.Code;
517  }
518 
519  APFloat getFPImm() const {
520  assert (Kind == k_FPImm && "Invalid access!");
521  return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
522  }
523 
524  bool getFPImmIsExact() const {
525  assert (Kind == k_FPImm && "Invalid access!");
526  return FPImm.IsExact;
527  }
528 
529  unsigned getBarrier() const {
530  assert(Kind == k_Barrier && "Invalid access!");
531  return Barrier.Val;
532  }
533 
534  StringRef getBarrierName() const {
535  assert(Kind == k_Barrier && "Invalid access!");
536  return StringRef(Barrier.Data, Barrier.Length);
537  }
538 
539  unsigned getReg() const override {
540  assert(Kind == k_Register && "Invalid access!");
541  return Reg.RegNum;
542  }
543 
544  RegConstraintEqualityTy getRegEqualityTy() const {
545  assert(Kind == k_Register && "Invalid access!");
546  return Reg.EqualityTy;
547  }
548 
549  unsigned getVectorListStart() const {
550  assert(Kind == k_VectorList && "Invalid access!");
551  return VectorList.RegNum;
552  }
553 
554  unsigned getVectorListCount() const {
555  assert(Kind == k_VectorList && "Invalid access!");
556  return VectorList.Count;
557  }
558 
559  unsigned getVectorIndex() const {
560  assert(Kind == k_VectorIndex && "Invalid access!");
561  return VectorIndex.Val;
562  }
563 
564  StringRef getSysReg() const {
565  assert(Kind == k_SysReg && "Invalid access!");
566  return StringRef(SysReg.Data, SysReg.Length);
567  }
568 
569  unsigned getSysCR() const {
570  assert(Kind == k_SysCR && "Invalid access!");
571  return SysCRImm.Val;
572  }
573 
574  unsigned getPrefetch() const {
575  assert(Kind == k_Prefetch && "Invalid access!");
576  return Prefetch.Val;
577  }
578 
579  unsigned getPSBHint() const {
580  assert(Kind == k_PSBHint && "Invalid access!");
581  return PSBHint.Val;
582  }
583 
584  StringRef getPSBHintName() const {
585  assert(Kind == k_PSBHint && "Invalid access!");
586  return StringRef(PSBHint.Data, PSBHint.Length);
587  }
588 
589  unsigned getBTIHint() const {
590  assert(Kind == k_BTIHint && "Invalid access!");
591  return BTIHint.Val;
592  }
593 
594  StringRef getBTIHintName() const {
595  assert(Kind == k_BTIHint && "Invalid access!");
596  return StringRef(BTIHint.Data, BTIHint.Length);
597  }
598 
599  StringRef getPrefetchName() const {
600  assert(Kind == k_Prefetch && "Invalid access!");
601  return StringRef(Prefetch.Data, Prefetch.Length);
602  }
603 
604  AArch64_AM::ShiftExtendType getShiftExtendType() const {
605  if (Kind == k_ShiftExtend)
606  return ShiftExtend.Type;
607  if (Kind == k_Register)
608  return Reg.ShiftExtend.Type;
609  llvm_unreachable("Invalid access!");
610  }
611 
612  unsigned getShiftExtendAmount() const {
613  if (Kind == k_ShiftExtend)
614  return ShiftExtend.Amount;
615  if (Kind == k_Register)
616  return Reg.ShiftExtend.Amount;
617  llvm_unreachable("Invalid access!");
618  }
619 
620  bool hasShiftExtendAmount() const {
621  if (Kind == k_ShiftExtend)
622  return ShiftExtend.HasExplicitAmount;
623  if (Kind == k_Register)
624  return Reg.ShiftExtend.HasExplicitAmount;
625  llvm_unreachable("Invalid access!");
626  }
627 
628  bool isImm() const override { return Kind == k_Immediate; }
629  bool isMem() const override { return false; }
630 
631  bool isUImm6() const {
632  if (!isImm())
633  return false;
634  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
635  if (!MCE)
636  return false;
637  int64_t Val = MCE->getValue();
638  return (Val >= 0 && Val < 64);
639  }
640 
641  template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
642 
643  template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
644  return isImmScaled<Bits, Scale>(true);
645  }
646 
647  template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
648  return isImmScaled<Bits, Scale>(false);
649  }
650 
651  template <int Bits, int Scale>
652  DiagnosticPredicate isImmScaled(bool Signed) const {
653  if (!isImm())
655 
656  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
657  if (!MCE)
659 
660  int64_t MinVal, MaxVal;
661  if (Signed) {
662  int64_t Shift = Bits - 1;
663  MinVal = (int64_t(1) << Shift) * -Scale;
664  MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
665  } else {
666  MinVal = 0;
667  MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
668  }
669 
670  int64_t Val = MCE->getValue();
671  if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
673 
675  }
676 
677  DiagnosticPredicate isSVEPattern() const {
678  if (!isImm())
680  auto *MCE = dyn_cast<MCConstantExpr>(getImm());
681  if (!MCE)
683  int64_t Val = MCE->getValue();
684  if (Val >= 0 && Val < 32)
687  }
688 
689  bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
690  AArch64MCExpr::VariantKind ELFRefKind;
691  MCSymbolRefExpr::VariantKind DarwinRefKind;
692  int64_t Addend;
693  if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
694  Addend)) {
695  // If we don't understand the expression, assume the best and
696  // let the fixup and relocation code deal with it.
697  return true;
698  }
699 
700  if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
701  ELFRefKind == AArch64MCExpr::VK_LO12 ||
702  ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
703  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
704  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
705  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
706  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
707  ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
708  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
709  ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
710  ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
711  // Note that we don't range-check the addend. It's adjusted modulo page
712  // size when converted, so there is no "out of range" condition when using
713  // @pageoff.
714  return true;
715  } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
716  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
717  // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
718  return Addend == 0;
719  }
720 
721  return false;
722  }
723 
724  template <int Scale> bool isUImm12Offset() const {
725  if (!isImm())
726  return false;
727 
728  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
729  if (!MCE)
730  return isSymbolicUImm12Offset(getImm());
731 
732  int64_t Val = MCE->getValue();
733  return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
734  }
735 
736  template <int N, int M>
737  bool isImmInRange() const {
738  if (!isImm())
739  return false;
740  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
741  if (!MCE)
742  return false;
743  int64_t Val = MCE->getValue();
744  return (Val >= N && Val <= M);
745  }
746 
747  // NOTE: Also used for isLogicalImmNot as anything that can be represented as
748  // a logical immediate can always be represented when inverted.
749  template <typename T>
750  bool isLogicalImm() const {
751  if (!isImm())
752  return false;
753  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
754  if (!MCE)
755  return false;
756 
757  int64_t Val = MCE->getValue();
758  int64_t SVal = typename std::make_signed<T>::type(Val);
759  int64_t UVal = typename std::make_unsigned<T>::type(Val);
760  if (Val != SVal && Val != UVal)
761  return false;
762 
763  return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8);
764  }
765 
766  bool isShiftedImm() const { return Kind == k_ShiftedImm; }
767 
768  /// Returns the immediate value as a pair of (imm, shift) if the immediate is
769  /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
770  /// immediate that can be shifted by 'Shift'.
771  template <unsigned Width>
772  Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
773  if (isShiftedImm() && Width == getShiftedImmShift())
774  if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
775  return std::make_pair(CE->getValue(), Width);
776 
777  if (isImm())
778  if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
779  int64_t Val = CE->getValue();
780  if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
781  return std::make_pair(Val >> Width, Width);
782  else
783  return std::make_pair(Val, 0u);
784  }
785 
786  return {};
787  }
788 
789  bool isAddSubImm() const {
790  if (!isShiftedImm() && !isImm())
791  return false;
792 
793  const MCExpr *Expr;
794 
795  // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
796  if (isShiftedImm()) {
797  unsigned Shift = ShiftedImm.ShiftAmount;
798  Expr = ShiftedImm.Val;
799  if (Shift != 0 && Shift != 12)
800  return false;
801  } else {
802  Expr = getImm();
803  }
804 
805  AArch64MCExpr::VariantKind ELFRefKind;
806  MCSymbolRefExpr::VariantKind DarwinRefKind;
807  int64_t Addend;
808  if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
809  DarwinRefKind, Addend)) {
810  return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
811  || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
812  || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
813  || ELFRefKind == AArch64MCExpr::VK_LO12
814  || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
815  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
816  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
817  || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
818  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
819  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
820  || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
821  || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
822  || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
823  }
824 
825  // If it's a constant, it should be a real immediate in range.
826  if (auto ShiftedVal = getShiftedVal<12>())
827  return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
828 
829  // If it's an expression, we hope for the best and let the fixup/relocation
830  // code deal with it.
831  return true;
832  }
833 
834  bool isAddSubImmNeg() const {
835  if (!isShiftedImm() && !isImm())
836  return false;
837 
838  // Otherwise it should be a real negative immediate in range.
839  if (auto ShiftedVal = getShiftedVal<12>())
840  return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
841 
842  return false;
843  }
844 
845  // Signed value in the range -128 to +127. For element widths of
846  // 16 bits or higher it may also be a signed multiple of 256 in the
847  // range -32768 to +32512.
848  // For element-width of 8 bits a range of -128 to 255 is accepted,
849  // since a copy of a byte can be either signed/unsigned.
850  template <typename T>
852  if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
854 
855  bool IsByte =
856  std::is_same<int8_t, typename std::make_signed<T>::type>::value;
857  if (auto ShiftedImm = getShiftedVal<8>())
858  if (!(IsByte && ShiftedImm->second) &&
859  AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
860  << ShiftedImm->second))
862 
864  }
865 
866  // Unsigned value in the range 0 to 255. For element widths of
867  // 16 bits or higher it may also be a signed multiple of 256 in the
868  // range 0 to 65280.
869  template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
870  if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
872 
873  bool IsByte =
874  std::is_same<int8_t, typename std::make_signed<T>::type>::value;
875  if (auto ShiftedImm = getShiftedVal<8>())
876  if (!(IsByte && ShiftedImm->second) &&
877  AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
878  << ShiftedImm->second))
880 
882  }
883 
884  template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
885  if (isLogicalImm<T>() && !isSVECpyImm<T>())
888  }
889 
890  bool isCondCode() const { return Kind == k_CondCode; }
891 
892  bool isSIMDImmType10() const {
893  if (!isImm())
894  return false;
895  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
896  if (!MCE)
897  return false;
899  }
900 
901  template<int N>
902  bool isBranchTarget() const {
903  if (!isImm())
904  return false;
905  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
906  if (!MCE)
907  return true;
908  int64_t Val = MCE->getValue();
909  if (Val & 0x3)
910  return false;
911  assert(N > 0 && "Branch target immediate cannot be 0 bits!");
912  return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
913  }
914 
915  bool
916  isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
917  if (!isImm())
918  return false;
919 
920  AArch64MCExpr::VariantKind ELFRefKind;
921  MCSymbolRefExpr::VariantKind DarwinRefKind;
922  int64_t Addend;
923  if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
924  DarwinRefKind, Addend)) {
925  return false;
926  }
927  if (DarwinRefKind != MCSymbolRefExpr::VK_None)
928  return false;
929 
930  for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
931  if (ELFRefKind == AllowedModifiers[i])
932  return true;
933  }
934 
935  return false;
936  }
937 
938  bool isMovZSymbolG3() const {
939  return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
940  }
941 
942  bool isMovZSymbolG2() const {
946  }
947 
948  bool isMovZSymbolG1() const {
949  return isMovWSymbol({
953  });
954  }
955 
956  bool isMovZSymbolG0() const {
960  }
961 
962  bool isMovKSymbolG3() const {
963  return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
964  }
965 
966  bool isMovKSymbolG2() const {
967  return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
968  }
969 
970  bool isMovKSymbolG1() const {
971  return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
974  }
975 
976  bool isMovKSymbolG0() const {
977  return isMovWSymbol(
980  }
981 
982  template<int RegWidth, int Shift>
983  bool isMOVZMovAlias() const {
984  if (!isImm()) return false;
985 
986  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
987  if (!CE) return false;
988  uint64_t Value = CE->getValue();
989 
990  return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
991  }
992 
993  template<int RegWidth, int Shift>
994  bool isMOVNMovAlias() const {
995  if (!isImm()) return false;
996 
997  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
998  if (!CE) return false;
999  uint64_t Value = CE->getValue();
1000 
1001  return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1002  }
1003 
1004  bool isFPImm() const {
1005  return Kind == k_FPImm &&
1006  AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1007  }
1008 
1009  bool isBarrier() const { return Kind == k_Barrier; }
1010  bool isSysReg() const { return Kind == k_SysReg; }
1011 
1012  bool isMRSSystemRegister() const {
1013  if (!isSysReg()) return false;
1014 
1015  return SysReg.MRSReg != -1U;
1016  }
1017 
1018  bool isMSRSystemRegister() const {
1019  if (!isSysReg()) return false;
1020  return SysReg.MSRReg != -1U;
1021  }
1022 
1023  bool isSystemPStateFieldWithImm0_1() const {
1024  if (!isSysReg()) return false;
1025  return (SysReg.PStateField == AArch64PState::PAN ||
1026  SysReg.PStateField == AArch64PState::DIT ||
1027  SysReg.PStateField == AArch64PState::UAO ||
1028  SysReg.PStateField == AArch64PState::SSBS);
1029  }
1030 
1031  bool isSystemPStateFieldWithImm0_15() const {
1032  if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1033  return SysReg.PStateField != -1U;
1034  }
1035 
1036  bool isReg() const override {
1037  return Kind == k_Register;
1038  }
1039 
1040  bool isScalarReg() const {
1041  return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1042  }
1043 
1044  bool isNeonVectorReg() const {
1045  return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1046  }
1047 
1048  bool isNeonVectorRegLo() const {
1049  return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1050  AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1051  Reg.RegNum);
1052  }
1053 
1054  template <unsigned Class> bool isSVEVectorReg() const {
1055  RegKind RK;
1056  switch (Class) {
1057  case AArch64::ZPRRegClassID:
1058  case AArch64::ZPR_3bRegClassID:
1059  case AArch64::ZPR_4bRegClassID:
1060  RK = RegKind::SVEDataVector;
1061  break;
1062  case AArch64::PPRRegClassID:
1063  case AArch64::PPR_3bRegClassID:
1064  RK = RegKind::SVEPredicateVector;
1065  break;
1066  default:
1067  llvm_unreachable("Unsupport register class");
1068  }
1069 
1070  return (Kind == k_Register && Reg.Kind == RK) &&
1071  AArch64MCRegisterClasses[Class].contains(getReg());
1072  }
1073 
1074  template <unsigned Class> bool isFPRasZPR() const {
1075  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1076  AArch64MCRegisterClasses[Class].contains(getReg());
1077  }
1078 
1079  template <int ElementWidth, unsigned Class>
1080  DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1081  if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1083 
1084  if (isSVEVectorReg<Class>() &&
1085  (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
1087 
1089  }
1090 
1091  template <int ElementWidth, unsigned Class>
1092  DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1093  if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1095 
1096  if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1098 
1100  }
1101 
1102  template <int ElementWidth, unsigned Class,
1103  AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1104  bool ShiftWidthAlwaysSame>
1105  DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1106  auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1107  if (!VectorMatch.isMatch())
1109 
1110  // Give a more specific diagnostic when the user has explicitly typed in
1111  // a shift-amount that does not match what is expected, but for which
1112  // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1113  bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1114  if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1115  ShiftExtendTy == AArch64_AM::SXTW) &&
1116  !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1118 
1119  if (MatchShift && ShiftExtendTy == getShiftExtendType())
1121 
1123  }
1124 
1125  bool isGPR32as64() const {
1126  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1127  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1128  }
1129 
1130  bool isGPR64as32() const {
1131  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1132  AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1133  }
1134 
1135  bool isWSeqPair() const {
1136  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1137  AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1138  Reg.RegNum);
1139  }
1140 
1141  bool isXSeqPair() const {
1142  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1143  AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1144  Reg.RegNum);
1145  }
1146 
1147  template<int64_t Angle, int64_t Remainder>
1148  DiagnosticPredicate isComplexRotation() const {
1149  if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1150 
1151  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1152  if (!CE) return DiagnosticPredicateTy::NoMatch;
1153  uint64_t Value = CE->getValue();
1154 
1155  if (Value % Angle == Remainder && Value <= 270)
1158  }
1159 
1160  template <unsigned RegClassID> bool isGPR64() const {
1161  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1162  AArch64MCRegisterClasses[RegClassID].contains(getReg());
1163  }
1164 
1165  template <unsigned RegClassID, int ExtWidth>
1166  DiagnosticPredicate isGPR64WithShiftExtend() const {
1167  if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1169 
1170  if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1171  getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1174  }
1175 
1176  /// Is this a vector list with the type implicit (presumably attached to the
1177  /// instruction itself)?
1178  template <RegKind VectorKind, unsigned NumRegs>
1179  bool isImplicitlyTypedVectorList() const {
1180  return Kind == k_VectorList && VectorList.Count == NumRegs &&
1181  VectorList.NumElements == 0 &&
1182  VectorList.RegisterKind == VectorKind;
1183  }
1184 
1185  template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1186  unsigned ElementWidth>
1187  bool isTypedVectorList() const {
1188  if (Kind != k_VectorList)
1189  return false;
1190  if (VectorList.Count != NumRegs)
1191  return false;
1192  if (VectorList.RegisterKind != VectorKind)
1193  return false;
1194  if (VectorList.ElementWidth != ElementWidth)
1195  return false;
1196  return VectorList.NumElements == NumElements;
1197  }
1198 
1199  template <int Min, int Max>
1200  DiagnosticPredicate isVectorIndex() const {
1201  if (Kind != k_VectorIndex)
1203  if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1206  }
1207 
1208  bool isToken() const override { return Kind == k_Token; }
1209 
1210  bool isTokenEqual(StringRef Str) const {
1211  return Kind == k_Token && getToken() == Str;
1212  }
1213  bool isSysCR() const { return Kind == k_SysCR; }
1214  bool isPrefetch() const { return Kind == k_Prefetch; }
1215  bool isPSBHint() const { return Kind == k_PSBHint; }
1216  bool isBTIHint() const { return Kind == k_BTIHint; }
1217  bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1218  bool isShifter() const {
1219  if (!isShiftExtend())
1220  return false;
1221 
1222  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1223  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1224  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1225  ST == AArch64_AM::MSL);
1226  }
1227 
1228  template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1229  if (Kind != k_FPImm)
1231 
1232  if (getFPImmIsExact()) {
1233  // Lookup the immediate from table of supported immediates.
1234  auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1235  assert(Desc && "Unknown enum value");
1236 
1237  // Calculate its FP value.
1238  APFloat RealVal(APFloat::IEEEdouble());
1239  if (RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero) !=
1240  APFloat::opOK)
1241  llvm_unreachable("FP immediate is not exact");
1242 
1243  if (getFPImm().bitwiseIsEqual(RealVal))
1245  }
1246 
1248  }
1249 
1250  template <unsigned ImmA, unsigned ImmB>
1251  DiagnosticPredicate isExactFPImm() const {
1253  if ((Res = isExactFPImm<ImmA>()))
1255  if ((Res = isExactFPImm<ImmB>()))
1257  return Res;
1258  }
1259 
1260  bool isExtend() const {
1261  if (!isShiftExtend())
1262  return false;
1263 
1264  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1265  return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1266  ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1267  ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1268  ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1269  ET == AArch64_AM::LSL) &&
1270  getShiftExtendAmount() <= 4;
1271  }
1272 
1273  bool isExtend64() const {
1274  if (!isExtend())
1275  return false;
1276  // Make sure the extend expects a 32-bit source register.
1277  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1278  return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1279  ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1280  ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1281  }
1282 
1283  bool isExtendLSL64() const {
1284  if (!isExtend())
1285  return false;
1286  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1287  return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1288  ET == AArch64_AM::LSL) &&
1289  getShiftExtendAmount() <= 4;
1290  }
1291 
1292  template<int Width> bool isMemXExtend() const {
1293  if (!isExtend())
1294  return false;
1295  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1296  return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1297  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1298  getShiftExtendAmount() == 0);
1299  }
1300 
1301  template<int Width> bool isMemWExtend() const {
1302  if (!isExtend())
1303  return false;
1304  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1305  return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1306  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1307  getShiftExtendAmount() == 0);
1308  }
1309 
1310  template <unsigned width>
1311  bool isArithmeticShifter() const {
1312  if (!isShifter())
1313  return false;
1314 
1315  // An arithmetic shifter is LSL, LSR, or ASR.
1316  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1317  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1318  ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1319  }
1320 
1321  template <unsigned width>
1322  bool isLogicalShifter() const {
1323  if (!isShifter())
1324  return false;
1325 
1326  // A logical shifter is LSL, LSR, ASR or ROR.
1327  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1328  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1329  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1330  getShiftExtendAmount() < width;
1331  }
1332 
1333  bool isMovImm32Shifter() const {
1334  if (!isShifter())
1335  return false;
1336 
1337  // A MOVi shifter is LSL of 0, 16, 32, or 48.
1338  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1339  if (ST != AArch64_AM::LSL)
1340  return false;
1341  uint64_t Val = getShiftExtendAmount();
1342  return (Val == 0 || Val == 16);
1343  }
1344 
1345  bool isMovImm64Shifter() const {
1346  if (!isShifter())
1347  return false;
1348 
1349  // A MOVi shifter is LSL of 0 or 16.
1350  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1351  if (ST != AArch64_AM::LSL)
1352  return false;
1353  uint64_t Val = getShiftExtendAmount();
1354  return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1355  }
1356 
1357  bool isLogicalVecShifter() const {
1358  if (!isShifter())
1359  return false;
1360 
1361  // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1362  unsigned Shift = getShiftExtendAmount();
1363  return getShiftExtendType() == AArch64_AM::LSL &&
1364  (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1365  }
1366 
1367  bool isLogicalVecHalfWordShifter() const {
1368  if (!isLogicalVecShifter())
1369  return false;
1370 
1371  // A logical vector shifter is a left shift by 0 or 8.
1372  unsigned Shift = getShiftExtendAmount();
1373  return getShiftExtendType() == AArch64_AM::LSL &&
1374  (Shift == 0 || Shift == 8);
1375  }
1376 
1377  bool isMoveVecShifter() const {
1378  if (!isShiftExtend())
1379  return false;
1380 
1381  // A logical vector shifter is a left shift by 8 or 16.
1382  unsigned Shift = getShiftExtendAmount();
1383  return getShiftExtendType() == AArch64_AM::MSL &&
1384  (Shift == 8 || Shift == 16);
1385  }
1386 
1387  // Fallback unscaled operands are for aliases of LDR/STR that fall back
1388  // to LDUR/STUR when the offset is not legal for the former but is for
1389  // the latter. As such, in addition to checking for being a legal unscaled
1390  // address, also check that it is not a legal scaled address. This avoids
1391  // ambiguity in the matcher.
1392  template<int Width>
1393  bool isSImm9OffsetFB() const {
1394  return isSImm<9>() && !isUImm12Offset<Width / 8>();
1395  }
1396 
1397  bool isAdrpLabel() const {
1398  // Validation was handled during parsing, so we just sanity check that
1399  // something didn't go haywire.
1400  if (!isImm())
1401  return false;
1402 
1403  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1404  int64_t Val = CE->getValue();
1405  int64_t Min = - (4096 * (1LL << (21 - 1)));
1406  int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1407  return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1408  }
1409 
1410  return true;
1411  }
1412 
1413  bool isAdrLabel() const {
1414  // Validation was handled during parsing, so we just sanity check that
1415  // something didn't go haywire.
1416  if (!isImm())
1417  return false;
1418 
1419  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1420  int64_t Val = CE->getValue();
1421  int64_t Min = - (1LL << (21 - 1));
1422  int64_t Max = ((1LL << (21 - 1)) - 1);
1423  return Val >= Min && Val <= Max;
1424  }
1425 
1426  return true;
1427  }
1428 
1429  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1430  // Add as immediates when possible. Null MCExpr = 0.
1431  if (!Expr)
1433  else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1434  Inst.addOperand(MCOperand::createImm(CE->getValue()));
1435  else
1436  Inst.addOperand(MCOperand::createExpr(Expr));
1437  }
1438 
1439  void addRegOperands(MCInst &Inst, unsigned N) const {
1440  assert(N == 1 && "Invalid number of operands!");
1442  }
1443 
1444  void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1445  assert(N == 1 && "Invalid number of operands!");
1446  assert(
1447  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1448 
1449  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1450  uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1451  RI->getEncodingValue(getReg()));
1452 
1453  Inst.addOperand(MCOperand::createReg(Reg));
1454  }
1455 
1456  void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1457  assert(N == 1 && "Invalid number of operands!");
1458  assert(
1459  AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1460 
1461  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1462  uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1463  RI->getEncodingValue(getReg()));
1464 
1465  Inst.addOperand(MCOperand::createReg(Reg));
1466  }
1467 
1468  template <int Width>
1469  void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1470  unsigned Base;
1471  switch (Width) {
1472  case 8: Base = AArch64::B0; break;
1473  case 16: Base = AArch64::H0; break;
1474  case 32: Base = AArch64::S0; break;
1475  case 64: Base = AArch64::D0; break;
1476  case 128: Base = AArch64::Q0; break;
1477  default:
1478  llvm_unreachable("Unsupported width");
1479  }
1480  Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1481  }
1482 
1483  void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1484  assert(N == 1 && "Invalid number of operands!");
1485  assert(
1486  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1487  Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1488  }
1489 
1490  void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1491  assert(N == 1 && "Invalid number of operands!");
1492  assert(
1493  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1495  }
1496 
1497  void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1498  assert(N == 1 && "Invalid number of operands!");
1500  }
1501 
1502  enum VecListIndexType {
1503  VecListIdx_DReg = 0,
1504  VecListIdx_QReg = 1,
1505  VecListIdx_ZReg = 2,
1506  };
1507 
1508  template <VecListIndexType RegTy, unsigned NumRegs>
1509  void addVectorListOperands(MCInst &Inst, unsigned N) const {
1510  assert(N == 1 && "Invalid number of operands!");
1511  static const unsigned FirstRegs[][5] = {
1512  /* DReg */ { AArch64::Q0,
1513  AArch64::D0, AArch64::D0_D1,
1514  AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1515  /* QReg */ { AArch64::Q0,
1516  AArch64::Q0, AArch64::Q0_Q1,
1517  AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1518  /* ZReg */ { AArch64::Z0,
1519  AArch64::Z0, AArch64::Z0_Z1,
1520  AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1521  };
1522 
1523  assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1524  " NumRegs must be <= 4 for ZRegs");
1525 
1526  unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1527  Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1528  FirstRegs[(unsigned)RegTy][0]));
1529  }
1530 
1531  void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1532  assert(N == 1 && "Invalid number of operands!");
1533  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1534  }
1535 
1536  template <unsigned ImmIs0, unsigned ImmIs1>
1537  void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1538  assert(N == 1 && "Invalid number of operands!");
1539  assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1540  Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1541  }
1542 
1543  void addImmOperands(MCInst &Inst, unsigned N) const {
1544  assert(N == 1 && "Invalid number of operands!");
1545  // If this is a pageoff symrefexpr with an addend, adjust the addend
1546  // to be only the page-offset portion. Otherwise, just add the expr
1547  // as-is.
1548  addExpr(Inst, getImm());
1549  }
1550 
1551  template <int Shift>
1552  void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1553  assert(N == 2 && "Invalid number of operands!");
1554  if (auto ShiftedVal = getShiftedVal<Shift>()) {
1555  Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1556  Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1557  } else if (isShiftedImm()) {
1558  addExpr(Inst, getShiftedImmVal());
1559  Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1560  } else {
1561  addExpr(Inst, getImm());
1563  }
1564  }
1565 
1566  template <int Shift>
1567  void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1568  assert(N == 2 && "Invalid number of operands!");
1569  if (auto ShiftedVal = getShiftedVal<Shift>()) {
1570  Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1571  Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1572  } else
1573  llvm_unreachable("Not a shifted negative immediate");
1574  }
1575 
1576  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1577  assert(N == 1 && "Invalid number of operands!");
1579  }
1580 
1581  void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1582  assert(N == 1 && "Invalid number of operands!");
1583  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1584  if (!MCE)
1585  addExpr(Inst, getImm());
1586  else
1587  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1588  }
1589 
1590  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1591  addImmOperands(Inst, N);
1592  }
1593 
1594  template<int Scale>
1595  void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1596  assert(N == 1 && "Invalid number of operands!");
1597  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1598 
1599  if (!MCE) {
1600  Inst.addOperand(MCOperand::createExpr(getImm()));
1601  return;
1602  }
1603  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1604  }
1605 
1606  void addUImm6Operands(MCInst &Inst, unsigned N) const {
1607  assert(N == 1 && "Invalid number of operands!");
1608  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1610  }
1611 
1612  template <int Scale>
1613  void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1614  assert(N == 1 && "Invalid number of operands!");
1615  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1616  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1617  }
1618 
1619  template <typename T>
1620  void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1621  assert(N == 1 && "Invalid number of operands!");
1622  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1623  typename std::make_unsigned<T>::type Val = MCE->getValue();
1624  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1625  Inst.addOperand(MCOperand::createImm(encoding));
1626  }
1627 
1628  template <typename T>
1629  void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1630  assert(N == 1 && "Invalid number of operands!");
1631  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1632  typename std::make_unsigned<T>::type Val = ~MCE->getValue();
1633  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1634  Inst.addOperand(MCOperand::createImm(encoding));
1635  }
1636 
1637  void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1638  assert(N == 1 && "Invalid number of operands!");
1639  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1640  uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1641  Inst.addOperand(MCOperand::createImm(encoding));
1642  }
1643 
1644  void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1645  // Branch operands don't encode the low bits, so shift them off
1646  // here. If it's a label, however, just put it on directly as there's
1647  // not enough information now to do anything.
1648  assert(N == 1 && "Invalid number of operands!");
1649  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1650  if (!MCE) {
1651  addExpr(Inst, getImm());
1652  return;
1653  }
1654  assert(MCE && "Invalid constant immediate operand!");
1655  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1656  }
1657 
1658  void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1659  // Branch operands don't encode the low bits, so shift them off
1660  // here. If it's a label, however, just put it on directly as there's
1661  // not enough information now to do anything.
1662  assert(N == 1 && "Invalid number of operands!");
1663  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1664  if (!MCE) {
1665  addExpr(Inst, getImm());
1666  return;
1667  }
1668  assert(MCE && "Invalid constant immediate operand!");
1669  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1670  }
1671 
1672  void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1673  // Branch operands don't encode the low bits, so shift them off
1674  // here. If it's a label, however, just put it on directly as there's
1675  // not enough information now to do anything.
1676  assert(N == 1 && "Invalid number of operands!");
1677  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1678  if (!MCE) {
1679  addExpr(Inst, getImm());
1680  return;
1681  }
1682  assert(MCE && "Invalid constant immediate operand!");
1683  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1684  }
1685 
1686  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1687  assert(N == 1 && "Invalid number of operands!");
1689  AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1690  }
1691 
1692  void addBarrierOperands(MCInst &Inst, unsigned N) const {
1693  assert(N == 1 && "Invalid number of operands!");
1694  Inst.addOperand(MCOperand::createImm(getBarrier()));
1695  }
1696 
1697  void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1698  assert(N == 1 && "Invalid number of operands!");
1699 
1700  Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1701  }
1702 
1703  void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1704  assert(N == 1 && "Invalid number of operands!");
1705 
1706  Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1707  }
1708 
1709  void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1710  assert(N == 1 && "Invalid number of operands!");
1711 
1712  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1713  }
1714 
1715  void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1716  assert(N == 1 && "Invalid number of operands!");
1717 
1718  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1719  }
1720 
1721  void addSysCROperands(MCInst &Inst, unsigned N) const {
1722  assert(N == 1 && "Invalid number of operands!");
1723  Inst.addOperand(MCOperand::createImm(getSysCR()));
1724  }
1725 
1726  void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1727  assert(N == 1 && "Invalid number of operands!");
1728  Inst.addOperand(MCOperand::createImm(getPrefetch()));
1729  }
1730 
1731  void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1732  assert(N == 1 && "Invalid number of operands!");
1733  Inst.addOperand(MCOperand::createImm(getPSBHint()));
1734  }
1735 
1736  void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1737  assert(N == 1 && "Invalid number of operands!");
1738  Inst.addOperand(MCOperand::createImm(getBTIHint()));
1739  }
1740 
1741  void addShifterOperands(MCInst &Inst, unsigned N) const {
1742  assert(N == 1 && "Invalid number of operands!");
1743  unsigned Imm =
1744  AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1745  Inst.addOperand(MCOperand::createImm(Imm));
1746  }
1747 
1748  void addExtendOperands(MCInst &Inst, unsigned N) const {
1749  assert(N == 1 && "Invalid number of operands!");
1750  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1751  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1752  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1753  Inst.addOperand(MCOperand::createImm(Imm));
1754  }
1755 
1756  void addExtend64Operands(MCInst &Inst, unsigned N) const {
1757  assert(N == 1 && "Invalid number of operands!");
1758  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1759  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1760  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1761  Inst.addOperand(MCOperand::createImm(Imm));
1762  }
1763 
1764  void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1765  assert(N == 2 && "Invalid number of operands!");
1766  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1767  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1768  Inst.addOperand(MCOperand::createImm(IsSigned));
1769  Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1770  }
1771 
1772  // For 8-bit load/store instructions with a register offset, both the
1773  // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1774  // they're disambiguated by whether the shift was explicit or implicit rather
1775  // than its size.
1776  void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1777  assert(N == 2 && "Invalid number of operands!");
1778  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1779  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1780  Inst.addOperand(MCOperand::createImm(IsSigned));
1781  Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1782  }
1783 
1784  template<int Shift>
1785  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1786  assert(N == 1 && "Invalid number of operands!");
1787 
1788  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1789  uint64_t Value = CE->getValue();
1790  Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1791  }
1792 
1793  template<int Shift>
1794  void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1795  assert(N == 1 && "Invalid number of operands!");
1796 
1797  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1798  uint64_t Value = CE->getValue();
1799  Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1800  }
1801 
1802  void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1803  assert(N == 1 && "Invalid number of operands!");
1804  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1805  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1806  }
1807 
1808  void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1809  assert(N == 1 && "Invalid number of operands!");
1810  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1811  Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1812  }
1813 
1814  void print(raw_ostream &OS) const override;
1815 
1816  static std::unique_ptr<AArch64Operand>
1817  CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1818  auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1819  Op->Tok.Data = Str.data();
1820  Op->Tok.Length = Str.size();
1821  Op->Tok.IsSuffix = IsSuffix;
1822  Op->StartLoc = S;
1823  Op->EndLoc = S;
1824  return Op;
1825  }
1826 
1827  static std::unique_ptr<AArch64Operand>
1828  CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1829  RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1831  unsigned ShiftAmount = 0,
1832  unsigned HasExplicitAmount = false) {
1833  auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1834  Op->Reg.RegNum = RegNum;
1835  Op->Reg.Kind = Kind;
1836  Op->Reg.ElementWidth = 0;
1837  Op->Reg.EqualityTy = EqTy;
1838  Op->Reg.ShiftExtend.Type = ExtTy;
1839  Op->Reg.ShiftExtend.Amount = ShiftAmount;
1840  Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1841  Op->StartLoc = S;
1842  Op->EndLoc = E;
1843  return Op;
1844  }
1845 
1846  static std::unique_ptr<AArch64Operand>
1847  CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1848  SMLoc S, SMLoc E, MCContext &Ctx,
1850  unsigned ShiftAmount = 0,
1851  unsigned HasExplicitAmount = false) {
1852  assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
1853  Kind == RegKind::SVEPredicateVector) &&
1854  "Invalid vector kind");
1855  auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1856  HasExplicitAmount);
1857  Op->Reg.ElementWidth = ElementWidth;
1858  return Op;
1859  }
1860 
1861  static std::unique_ptr<AArch64Operand>
1862  CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1863  unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1864  MCContext &Ctx) {
1865  auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1866  Op->VectorList.RegNum = RegNum;
1867  Op->VectorList.Count = Count;
1868  Op->VectorList.NumElements = NumElements;
1869  Op->VectorList.ElementWidth = ElementWidth;
1870  Op->VectorList.RegisterKind = RegisterKind;
1871  Op->StartLoc = S;
1872  Op->EndLoc = E;
1873  return Op;
1874  }
1875 
1876  static std::unique_ptr<AArch64Operand>
1877  CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1878  auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1879  Op->VectorIndex.Val = Idx;
1880  Op->StartLoc = S;
1881  Op->EndLoc = E;
1882  return Op;
1883  }
1884 
1885  static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1886  SMLoc E, MCContext &Ctx) {
1887  auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1888  Op->Imm.Val = Val;
1889  Op->StartLoc = S;
1890  Op->EndLoc = E;
1891  return Op;
1892  }
1893 
1894  static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1895  unsigned ShiftAmount,
1896  SMLoc S, SMLoc E,
1897  MCContext &Ctx) {
1898  auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1899  Op->ShiftedImm .Val = Val;
1900  Op->ShiftedImm.ShiftAmount = ShiftAmount;
1901  Op->StartLoc = S;
1902  Op->EndLoc = E;
1903  return Op;
1904  }
1905 
1906  static std::unique_ptr<AArch64Operand>
1907  CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1908  auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1909  Op->CondCode.Code = Code;
1910  Op->StartLoc = S;
1911  Op->EndLoc = E;
1912  return Op;
1913  }
1914 
1915  static std::unique_ptr<AArch64Operand>
1916  CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1917  auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1918  Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1919  Op->FPImm.IsExact = IsExact;
1920  Op->StartLoc = S;
1921  Op->EndLoc = S;
1922  return Op;
1923  }
1924 
1925  static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1926  StringRef Str,
1927  SMLoc S,
1928  MCContext &Ctx) {
1929  auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1930  Op->Barrier.Val = Val;
1931  Op->Barrier.Data = Str.data();
1932  Op->Barrier.Length = Str.size();
1933  Op->StartLoc = S;
1934  Op->EndLoc = S;
1935  return Op;
1936  }
1937 
1938  static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1939  uint32_t MRSReg,
1940  uint32_t MSRReg,
1941  uint32_t PStateField,
1942  MCContext &Ctx) {
1943  auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1944  Op->SysReg.Data = Str.data();
1945  Op->SysReg.Length = Str.size();
1946  Op->SysReg.MRSReg = MRSReg;
1947  Op->SysReg.MSRReg = MSRReg;
1948  Op->SysReg.PStateField = PStateField;
1949  Op->StartLoc = S;
1950  Op->EndLoc = S;
1951  return Op;
1952  }
1953 
1954  static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1955  SMLoc E, MCContext &Ctx) {
1956  auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1957  Op->SysCRImm.Val = Val;
1958  Op->StartLoc = S;
1959  Op->EndLoc = E;
1960  return Op;
1961  }
1962 
1963  static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1964  StringRef Str,
1965  SMLoc S,
1966  MCContext &Ctx) {
1967  auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1968  Op->Prefetch.Val = Val;
1969  Op->Barrier.Data = Str.data();
1970  Op->Barrier.Length = Str.size();
1971  Op->StartLoc = S;
1972  Op->EndLoc = S;
1973  return Op;
1974  }
1975 
1976  static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1977  StringRef Str,
1978  SMLoc S,
1979  MCContext &Ctx) {
1980  auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1981  Op->PSBHint.Val = Val;
1982  Op->PSBHint.Data = Str.data();
1983  Op->PSBHint.Length = Str.size();
1984  Op->StartLoc = S;
1985  Op->EndLoc = S;
1986  return Op;
1987  }
1988 
1989  static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
1990  StringRef Str,
1991  SMLoc S,
1992  MCContext &Ctx) {
1993  auto Op = make_unique<AArch64Operand>(k_BTIHint, Ctx);
1994  Op->BTIHint.Val = Val << 1 | 32;
1995  Op->BTIHint.Data = Str.data();
1996  Op->BTIHint.Length = Str.size();
1997  Op->StartLoc = S;
1998  Op->EndLoc = S;
1999  return Op;
2000  }
2001 
2002  static std::unique_ptr<AArch64Operand>
2003  CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2004  bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2005  auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2006  Op->ShiftExtend.Type = ShOp;
2007  Op->ShiftExtend.Amount = Val;
2008  Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2009  Op->StartLoc = S;
2010  Op->EndLoc = E;
2011  return Op;
2012  }
2013 };
2014 
2015 } // end anonymous namespace.
2016 
2017 void AArch64Operand::print(raw_ostream &OS) const {
2018  switch (Kind) {
2019  case k_FPImm:
2020  OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2021  if (!getFPImmIsExact())
2022  OS << " (inexact)";
2023  OS << ">";
2024  break;
2025  case k_Barrier: {
2026  StringRef Name = getBarrierName();
2027  if (!Name.empty())
2028  OS << "<barrier " << Name << ">";
2029  else
2030  OS << "<barrier invalid #" << getBarrier() << ">";
2031  break;
2032  }
2033  case k_Immediate:
2034  OS << *getImm();
2035  break;
2036  case k_ShiftedImm: {
2037  unsigned Shift = getShiftedImmShift();
2038  OS << "<shiftedimm ";
2039  OS << *getShiftedImmVal();
2040  OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2041  break;
2042  }
2043  case k_CondCode:
2044  OS << "<condcode " << getCondCode() << ">";
2045  break;
2046  case k_VectorList: {
2047  OS << "<vectorlist ";
2048  unsigned Reg = getVectorListStart();
2049  for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2050  OS << Reg + i << " ";
2051  OS << ">";
2052  break;
2053  }
2054  case k_VectorIndex:
2055  OS << "<vectorindex " << getVectorIndex() << ">";
2056  break;
2057  case k_SysReg:
2058  OS << "<sysreg: " << getSysReg() << '>';
2059  break;
2060  case k_Token:
2061  OS << "'" << getToken() << "'";
2062  break;
2063  case k_SysCR:
2064  OS << "c" << getSysCR();
2065  break;
2066  case k_Prefetch: {
2067  StringRef Name = getPrefetchName();
2068  if (!Name.empty())
2069  OS << "<prfop " << Name << ">";
2070  else
2071  OS << "<prfop invalid #" << getPrefetch() << ">";
2072  break;
2073  }
2074  case k_PSBHint:
2075  OS << getPSBHintName();
2076  break;
2077  case k_Register:
2078  OS << "<register " << getReg() << ">";
2079  if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2080  break;
2082  case k_BTIHint:
2083  OS << getBTIHintName();
2084  break;
2085  case k_ShiftExtend:
2086  OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2087  << getShiftExtendAmount();
2088  if (!hasShiftExtendAmount())
2089  OS << "<imp>";
2090  OS << '>';
2091  break;
2092  }
2093 }
2094 
2095 /// @name Auto-generated Match Functions
2096 /// {
2097 
2098 static unsigned MatchRegisterName(StringRef Name);
2099 
2100 /// }
2101 
2102 static unsigned MatchNeonVectorRegName(StringRef Name) {
2103  return StringSwitch<unsigned>(Name.lower())
2104  .Case("v0", AArch64::Q0)
2105  .Case("v1", AArch64::Q1)
2106  .Case("v2", AArch64::Q2)
2107  .Case("v3", AArch64::Q3)
2108  .Case("v4", AArch64::Q4)
2109  .Case("v5", AArch64::Q5)
2110  .Case("v6", AArch64::Q6)
2111  .Case("v7", AArch64::Q7)
2112  .Case("v8", AArch64::Q8)
2113  .Case("v9", AArch64::Q9)
2114  .Case("v10", AArch64::Q10)
2115  .Case("v11", AArch64::Q11)
2116  .Case("v12", AArch64::Q12)
2117  .Case("v13", AArch64::Q13)
2118  .Case("v14", AArch64::Q14)
2119  .Case("v15", AArch64::Q15)
2120  .Case("v16", AArch64::Q16)
2121  .Case("v17", AArch64::Q17)
2122  .Case("v18", AArch64::Q18)
2123  .Case("v19", AArch64::Q19)
2124  .Case("v20", AArch64::Q20)
2125  .Case("v21", AArch64::Q21)
2126  .Case("v22", AArch64::Q22)
2127  .Case("v23", AArch64::Q23)
2128  .Case("v24", AArch64::Q24)
2129  .Case("v25", AArch64::Q25)
2130  .Case("v26", AArch64::Q26)
2131  .Case("v27", AArch64::Q27)
2132  .Case("v28", AArch64::Q28)
2133  .Case("v29", AArch64::Q29)
2134  .Case("v30", AArch64::Q30)
2135  .Case("v31", AArch64::Q31)
2136  .Default(0);
2137 }
2138 
2139 /// Returns an optional pair of (#elements, element-width) if Suffix
2140 /// is a valid vector kind. Where the number of elements in a vector
2141 /// or the vector width is implicit or explicitly unknown (but still a
2142 /// valid suffix kind), 0 is used.
2143 static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2144  RegKind VectorKind) {
2145  std::pair<int, int> Res = {-1, -1};
2146 
2147  switch (VectorKind) {
2148  case RegKind::NeonVector:
2149  Res =
2150  StringSwitch<std::pair<int, int>>(Suffix.lower())
2151  .Case("", {0, 0})
2152  .Case(".1d", {1, 64})
2153  .Case(".1q", {1, 128})
2154  // '.2h' needed for fp16 scalar pairwise reductions
2155  .Case(".2h", {2, 16})
2156  .Case(".2s", {2, 32})
2157  .Case(".2d", {2, 64})
2158  // '.4b' is another special case for the ARMv8.2a dot product
2159  // operand
2160  .Case(".4b", {4, 8})
2161  .Case(".4h", {4, 16})
2162  .Case(".4s", {4, 32})
2163  .Case(".8b", {8, 8})
2164  .Case(".8h", {8, 16})
2165  .Case(".16b", {16, 8})
2166  // Accept the width neutral ones, too, for verbose syntax. If those
2167  // aren't used in the right places, the token operand won't match so
2168  // all will work out.
2169  .Case(".b", {0, 8})
2170  .Case(".h", {0, 16})
2171  .Case(".s", {0, 32})
2172  .Case(".d", {0, 64})
2173  .Default({-1, -1});
2174  break;
2175  case RegKind::SVEPredicateVector:
2176  case RegKind::SVEDataVector:
2177  Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2178  .Case("", {0, 0})
2179  .Case(".b", {0, 8})
2180  .Case(".h", {0, 16})
2181  .Case(".s", {0, 32})
2182  .Case(".d", {0, 64})
2183  .Case(".q", {0, 128})
2184  .Default({-1, -1});
2185  break;
2186  default:
2187  llvm_unreachable("Unsupported RegKind");
2188  }
2189 
2190  if (Res == std::make_pair(-1, -1))
2191  return Optional<std::pair<int, int>>();
2192 
2193  return Optional<std::pair<int, int>>(Res);
2194 }
2195 
2196 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2197  return parseVectorKind(Suffix, VectorKind).hasValue();
2198 }
2199 
2200 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2201  return StringSwitch<unsigned>(Name.lower())
2202  .Case("z0", AArch64::Z0)
2203  .Case("z1", AArch64::Z1)
2204  .Case("z2", AArch64::Z2)
2205  .Case("z3", AArch64::Z3)
2206  .Case("z4", AArch64::Z4)
2207  .Case("z5", AArch64::Z5)
2208  .Case("z6", AArch64::Z6)
2209  .Case("z7", AArch64::Z7)
2210  .Case("z8", AArch64::Z8)
2211  .Case("z9", AArch64::Z9)
2212  .Case("z10", AArch64::Z10)
2213  .Case("z11", AArch64::Z11)
2214  .Case("z12", AArch64::Z12)
2215  .Case("z13", AArch64::Z13)
2216  .Case("z14", AArch64::Z14)
2217  .Case("z15", AArch64::Z15)
2218  .Case("z16", AArch64::Z16)
2219  .Case("z17", AArch64::Z17)
2220  .Case("z18", AArch64::Z18)
2221  .Case("z19", AArch64::Z19)
2222  .Case("z20", AArch64::Z20)
2223  .Case("z21", AArch64::Z21)
2224  .Case("z22", AArch64::Z22)
2225  .Case("z23", AArch64::Z23)
2226  .Case("z24", AArch64::Z24)
2227  .Case("z25", AArch64::Z25)
2228  .Case("z26", AArch64::Z26)
2229  .Case("z27", AArch64::Z27)
2230  .Case("z28", AArch64::Z28)
2231  .Case("z29", AArch64::Z29)
2232  .Case("z30", AArch64::Z30)
2233  .Case("z31", AArch64::Z31)
2234  .Default(0);
2235 }
2236 
2237 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2238  return StringSwitch<unsigned>(Name.lower())
2239  .Case("p0", AArch64::P0)
2240  .Case("p1", AArch64::P1)
2241  .Case("p2", AArch64::P2)
2242  .Case("p3", AArch64::P3)
2243  .Case("p4", AArch64::P4)
2244  .Case("p5", AArch64::P5)
2245  .Case("p6", AArch64::P6)
2246  .Case("p7", AArch64::P7)
2247  .Case("p8", AArch64::P8)
2248  .Case("p9", AArch64::P9)
2249  .Case("p10", AArch64::P10)
2250  .Case("p11", AArch64::P11)
2251  .Case("p12", AArch64::P12)
2252  .Case("p13", AArch64::P13)
2253  .Case("p14", AArch64::P14)
2254  .Case("p15", AArch64::P15)
2255  .Default(0);
2256 }
2257 
2258 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2259  SMLoc &EndLoc) {
2260  StartLoc = getLoc();
2261  auto Res = tryParseScalarRegister(RegNo);
2262  EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2263  return Res != MatchOperand_Success;
2264 }
2265 
2266 // Matches a register name or register alias previously defined by '.req'
2267 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2268  RegKind Kind) {
2269  unsigned RegNum = 0;
2270  if ((RegNum = matchSVEDataVectorRegName(Name)))
2271  return Kind == RegKind::SVEDataVector ? RegNum : 0;
2272 
2273  if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2274  return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2275 
2276  if ((RegNum = MatchNeonVectorRegName(Name)))
2277  return Kind == RegKind::NeonVector ? RegNum : 0;
2278 
2279  // The parsed register must be of RegKind Scalar
2280  if ((RegNum = MatchRegisterName(Name)))
2281  return Kind == RegKind::Scalar ? RegNum : 0;
2282 
2283  if (!RegNum) {
2284  // Handle a few common aliases of registers.
2285  if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2286  .Case("fp", AArch64::FP)
2287  .Case("lr", AArch64::LR)
2288  .Case("x31", AArch64::XZR)
2289  .Case("w31", AArch64::WZR)
2290  .Default(0))
2291  return Kind == RegKind::Scalar ? RegNum : 0;
2292 
2293  // Check for aliases registered via .req. Canonicalize to lower case.
2294  // That's more consistent since register names are case insensitive, and
2295  // it's how the original entry was passed in from MC/MCParser/AsmParser.
2296  auto Entry = RegisterReqs.find(Name.lower());
2297  if (Entry == RegisterReqs.end())
2298  return 0;
2299 
2300  // set RegNum if the match is the right kind of register
2301  if (Kind == Entry->getValue().first)
2302  RegNum = Entry->getValue().second;
2303  }
2304  return RegNum;
2305 }
2306 
2307 /// tryParseScalarRegister - Try to parse a register name. The token must be an
2308 /// Identifier when called, and if it is a register name the token is eaten and
2309 /// the register is added to the operand list.
2311 AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2312  MCAsmParser &Parser = getParser();
2313  const AsmToken &Tok = Parser.getTok();
2314  if (Tok.isNot(AsmToken::Identifier))
2315  return MatchOperand_NoMatch;
2316 
2317  std::string lowerCase = Tok.getString().lower();
2318  unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2319  if (Reg == 0)
2320  return MatchOperand_NoMatch;
2321 
2322  RegNum = Reg;
2323  Parser.Lex(); // Eat identifier token.
2324  return MatchOperand_Success;
2325 }
2326 
2327 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2329 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2330  MCAsmParser &Parser = getParser();
2331  SMLoc S = getLoc();
2332 
2333  if (Parser.getTok().isNot(AsmToken::Identifier)) {
2334  Error(S, "Expected cN operand where 0 <= N <= 15");
2335  return MatchOperand_ParseFail;
2336  }
2337 
2338  StringRef Tok = Parser.getTok().getIdentifier();
2339  if (Tok[0] != 'c' && Tok[0] != 'C') {
2340  Error(S, "Expected cN operand where 0 <= N <= 15");
2341  return MatchOperand_ParseFail;
2342  }
2343 
2344  uint32_t CRNum;
2345  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2346  if (BadNum || CRNum > 15) {
2347  Error(S, "Expected cN operand where 0 <= N <= 15");
2348  return MatchOperand_ParseFail;
2349  }
2350 
2351  Parser.Lex(); // Eat identifier token.
2352  Operands.push_back(
2353  AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2354  return MatchOperand_Success;
2355 }
2356 
2357 /// tryParsePrefetch - Try to parse a prefetch operand.
2358 template <bool IsSVEPrefetch>
2360 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2361  MCAsmParser &Parser = getParser();
2362  SMLoc S = getLoc();
2363  const AsmToken &Tok = Parser.getTok();
2364 
2365  auto LookupByName = [](StringRef N) {
2366  if (IsSVEPrefetch) {
2367  if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2368  return Optional<unsigned>(Res->Encoding);
2369  } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2370  return Optional<unsigned>(Res->Encoding);
2371  return Optional<unsigned>();
2372  };
2373 
2374  auto LookupByEncoding = [](unsigned E) {
2375  if (IsSVEPrefetch) {
2376  if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2377  return Optional<StringRef>(Res->Name);
2378  } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2379  return Optional<StringRef>(Res->Name);
2380  return Optional<StringRef>();
2381  };
2382  unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2383 
2384  // Either an identifier for named values or a 5-bit immediate.
2385  // Eat optional hash.
2386  if (parseOptionalToken(AsmToken::Hash) ||
2387  Tok.is(AsmToken::Integer)) {
2388  const MCExpr *ImmVal;
2389  if (getParser().parseExpression(ImmVal))
2390  return MatchOperand_ParseFail;
2391 
2392  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2393  if (!MCE) {
2394  TokError("immediate value expected for prefetch operand");
2395  return MatchOperand_ParseFail;
2396  }
2397  unsigned prfop = MCE->getValue();
2398  if (prfop > MaxVal) {
2399  TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2400  "] expected");
2401  return MatchOperand_ParseFail;
2402  }
2403 
2404  auto PRFM = LookupByEncoding(MCE->getValue());
2405  Operands.push_back(AArch64Operand::CreatePrefetch(
2406  prfop, PRFM.getValueOr(""), S, getContext()));
2407  return MatchOperand_Success;
2408  }
2409 
2410  if (Tok.isNot(AsmToken::Identifier)) {
2411  TokError("prefetch hint expected");
2412  return MatchOperand_ParseFail;
2413  }
2414 
2415  auto PRFM = LookupByName(Tok.getString());
2416  if (!PRFM) {
2417  TokError("prefetch hint expected");
2418  return MatchOperand_ParseFail;
2419  }
2420 
2421  Parser.Lex(); // Eat identifier token.
2422  Operands.push_back(AArch64Operand::CreatePrefetch(
2423  *PRFM, Tok.getString(), S, getContext()));
2424  return MatchOperand_Success;
2425 }
2426 
2427 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2429 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2430  MCAsmParser &Parser = getParser();
2431  SMLoc S = getLoc();
2432  const AsmToken &Tok = Parser.getTok();
2433  if (Tok.isNot(AsmToken::Identifier)) {
2434  TokError("invalid operand for instruction");
2435  return MatchOperand_ParseFail;
2436  }
2437 
2438  auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2439  if (!PSB) {
2440  TokError("invalid operand for instruction");
2441  return MatchOperand_ParseFail;
2442  }
2443 
2444  Parser.Lex(); // Eat identifier token.
2445  Operands.push_back(AArch64Operand::CreatePSBHint(
2446  PSB->Encoding, Tok.getString(), S, getContext()));
2447  return MatchOperand_Success;
2448 }
2449 
2450 /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2452 AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2453  MCAsmParser &Parser = getParser();
2454  SMLoc S = getLoc();
2455  const AsmToken &Tok = Parser.getTok();
2456  if (Tok.isNot(AsmToken::Identifier)) {
2457  TokError("invalid operand for instruction");
2458  return MatchOperand_ParseFail;
2459  }
2460 
2461  auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2462  if (!BTI) {
2463  TokError("invalid operand for instruction");
2464  return MatchOperand_ParseFail;
2465  }
2466 
2467  Parser.Lex(); // Eat identifier token.
2468  Operands.push_back(AArch64Operand::CreateBTIHint(
2469  BTI->Encoding, Tok.getString(), S, getContext()));
2470  return MatchOperand_Success;
2471 }
2472 
2473 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2474 /// instruction.
2476 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2477  MCAsmParser &Parser = getParser();
2478  SMLoc S = getLoc();
2479  const MCExpr *Expr = nullptr;
2480 
2481  if (Parser.getTok().is(AsmToken::Hash)) {
2482  Parser.Lex(); // Eat hash token.
2483  }
2484 
2485  if (parseSymbolicImmVal(Expr))
2486  return MatchOperand_ParseFail;
2487 
2488  AArch64MCExpr::VariantKind ELFRefKind;
2489  MCSymbolRefExpr::VariantKind DarwinRefKind;
2490  int64_t Addend;
2491  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2492  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2493  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2494  // No modifier was specified at all; this is the syntax for an ELF basic
2495  // ADRP relocation (unfortunately).
2496  Expr =
2498  } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2499  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2500  Addend != 0) {
2501  Error(S, "gotpage label reference not allowed an addend");
2502  return MatchOperand_ParseFail;
2503  } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2504  DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2505  DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2506  ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2507  ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2508  ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2509  // The operand must be an @page or @gotpage qualified symbolref.
2510  Error(S, "page or gotpage label reference expected");
2511  return MatchOperand_ParseFail;
2512  }
2513  }
2514 
2515  // We have either a label reference possibly with addend or an immediate. The
2516  // addend is a raw value here. The linker will adjust it to only reference the
2517  // page.
2518  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2519  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2520 
2521  return MatchOperand_Success;
2522 }
2523 
2524 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2525 /// instruction.
2527 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2528  SMLoc S = getLoc();
2529  const MCExpr *Expr = nullptr;
2530 
2531  // Leave anything with a bracket to the default for SVE
2532  if (getParser().getTok().is(AsmToken::LBrac))
2533  return MatchOperand_NoMatch;
2534 
2535  if (getParser().getTok().is(AsmToken::Hash))
2536  getParser().Lex(); // Eat hash token.
2537 
2538  if (parseSymbolicImmVal(Expr))
2539  return MatchOperand_ParseFail;
2540 
2541  AArch64MCExpr::VariantKind ELFRefKind;
2542  MCSymbolRefExpr::VariantKind DarwinRefKind;
2543  int64_t Addend;
2544  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2545  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2546  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2547  // No modifier was specified at all; this is the syntax for an ELF basic
2548  // ADR relocation (unfortunately).
2549  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2550  } else {
2551  Error(S, "unexpected adr label");
2552  return MatchOperand_ParseFail;
2553  }
2554  }
2555 
2556  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2557  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2558  return MatchOperand_Success;
2559 }
2560 
2561 /// tryParseFPImm - A floating point immediate expression operand.
2562 template<bool AddFPZeroAsLiteral>
2564 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2565  MCAsmParser &Parser = getParser();
2566  SMLoc S = getLoc();
2567 
2568  bool Hash = parseOptionalToken(AsmToken::Hash);
2569 
2570  // Handle negation, as that still comes through as a separate token.
2571  bool isNegative = parseOptionalToken(AsmToken::Minus);
2572 
2573  const AsmToken &Tok = Parser.getTok();
2574  if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2575  if (!Hash)
2576  return MatchOperand_NoMatch;
2577  TokError("invalid floating point immediate");
2578  return MatchOperand_ParseFail;
2579  }
2580 
2581  // Parse hexadecimal representation.
2582  if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2583  if (Tok.getIntVal() > 255 || isNegative) {
2584  TokError("encoded floating point value out of range");
2585  return MatchOperand_ParseFail;
2586  }
2587 
2588  APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2589  Operands.push_back(
2590  AArch64Operand::CreateFPImm(F, true, S, getContext()));
2591  } else {
2592  // Parse FP representation.
2593  APFloat RealVal(APFloat::IEEEdouble());
2594  auto Status =
2596  if (isNegative)
2597  RealVal.changeSign();
2598 
2599  if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2600  Operands.push_back(
2601  AArch64Operand::CreateToken("#0", false, S, getContext()));
2602  Operands.push_back(
2603  AArch64Operand::CreateToken(".0", false, S, getContext()));
2604  } else
2605  Operands.push_back(AArch64Operand::CreateFPImm(
2606  RealVal, Status == APFloat::opOK, S, getContext()));
2607  }
2608 
2609  Parser.Lex(); // Eat the token.
2610 
2611  return MatchOperand_Success;
2612 }
2613 
2614 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2615 /// a shift suffix, for example '#1, lsl #12'.
2617 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2618  MCAsmParser &Parser = getParser();
2619  SMLoc S = getLoc();
2620 
2621  if (Parser.getTok().is(AsmToken::Hash))
2622  Parser.Lex(); // Eat '#'
2623  else if (Parser.getTok().isNot(AsmToken::Integer))
2624  // Operand should start from # or should be integer, emit error otherwise.
2625  return MatchOperand_NoMatch;
2626 
2627  const MCExpr *Imm = nullptr;
2628  if (parseSymbolicImmVal(Imm))
2629  return MatchOperand_ParseFail;
2630  else if (Parser.getTok().isNot(AsmToken::Comma)) {
2631  SMLoc E = Parser.getTok().getLoc();
2632  Operands.push_back(
2633  AArch64Operand::CreateImm(Imm, S, E, getContext()));
2634  return MatchOperand_Success;
2635  }
2636 
2637  // Eat ','
2638  Parser.Lex();
2639 
2640  // The optional operand must be "lsl #N" where N is non-negative.
2641  if (!Parser.getTok().is(AsmToken::Identifier) ||
2642  !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2643  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2644  return MatchOperand_ParseFail;
2645  }
2646 
2647  // Eat 'lsl'
2648  Parser.Lex();
2649 
2650  parseOptionalToken(AsmToken::Hash);
2651 
2652  if (Parser.getTok().isNot(AsmToken::Integer)) {
2653  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2654  return MatchOperand_ParseFail;
2655  }
2656 
2657  int64_t ShiftAmount = Parser.getTok().getIntVal();
2658 
2659  if (ShiftAmount < 0) {
2660  Error(Parser.getTok().getLoc(), "positive shift amount required");
2661  return MatchOperand_ParseFail;
2662  }
2663  Parser.Lex(); // Eat the number
2664 
2665  // Just in case the optional lsl #0 is used for immediates other than zero.
2666  if (ShiftAmount == 0 && Imm != nullptr) {
2667  SMLoc E = Parser.getTok().getLoc();
2668  Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2669  return MatchOperand_Success;
2670  }
2671 
2672  SMLoc E = Parser.getTok().getLoc();
2673  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2674  S, E, getContext()));
2675  return MatchOperand_Success;
2676 }
2677 
2678 /// parseCondCodeString - Parse a Condition Code string.
2679 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2681  .Case("eq", AArch64CC::EQ)
2682  .Case("ne", AArch64CC::NE)
2683  .Case("cs", AArch64CC::HS)
2684  .Case("hs", AArch64CC::HS)
2685  .Case("cc", AArch64CC::LO)
2686  .Case("lo", AArch64CC::LO)
2687  .Case("mi", AArch64CC::MI)
2688  .Case("pl", AArch64CC::PL)
2689  .Case("vs", AArch64CC::VS)
2690  .Case("vc", AArch64CC::VC)
2691  .Case("hi", AArch64CC::HI)
2692  .Case("ls", AArch64CC::LS)
2693  .Case("ge", AArch64CC::GE)
2694  .Case("lt", AArch64CC::LT)
2695  .Case("gt", AArch64CC::GT)
2696  .Case("le", AArch64CC::LE)
2697  .Case("al", AArch64CC::AL)
2698  .Case("nv", AArch64CC::NV)
2700 
2701  if (CC == AArch64CC::Invalid &&
2702  getSTI().getFeatureBits()[AArch64::FeatureSVE])
2704  .Case("none", AArch64CC::EQ)
2705  .Case("any", AArch64CC::NE)
2706  .Case("nlast", AArch64CC::HS)
2707  .Case("last", AArch64CC::LO)
2708  .Case("first", AArch64CC::MI)
2709  .Case("nfrst", AArch64CC::PL)
2710  .Case("pmore", AArch64CC::HI)
2711  .Case("plast", AArch64CC::LS)
2712  .Case("tcont", AArch64CC::GE)
2713  .Case("tstop", AArch64CC::LT)
2715 
2716  return CC;
2717 }
2718 
2719 /// parseCondCode - Parse a Condition Code operand.
2720 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2721  bool invertCondCode) {
2722  MCAsmParser &Parser = getParser();
2723  SMLoc S = getLoc();
2724  const AsmToken &Tok = Parser.getTok();
2725  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2726 
2727  StringRef Cond = Tok.getString();
2728  AArch64CC::CondCode CC = parseCondCodeString(Cond);
2729  if (CC == AArch64CC::Invalid)
2730  return TokError("invalid condition code");
2731  Parser.Lex(); // Eat identifier token.
2732 
2733  if (invertCondCode) {
2734  if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2735  return TokError("condition codes AL and NV are invalid for this instruction");
2737  }
2738 
2739  Operands.push_back(
2740  AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2741  return false;
2742 }
2743 
2744 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2745 /// them if present.
2747 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2748  MCAsmParser &Parser = getParser();
2749  const AsmToken &Tok = Parser.getTok();
2750  std::string LowerID = Tok.getString().lower();
2753  .Case("lsl", AArch64_AM::LSL)
2754  .Case("lsr", AArch64_AM::LSR)
2755  .Case("asr", AArch64_AM::ASR)
2756  .Case("ror", AArch64_AM::ROR)
2757  .Case("msl", AArch64_AM::MSL)
2758  .Case("uxtb", AArch64_AM::UXTB)
2759  .Case("uxth", AArch64_AM::UXTH)
2760  .Case("uxtw", AArch64_AM::UXTW)
2761  .Case("uxtx", AArch64_AM::UXTX)
2762  .Case("sxtb", AArch64_AM::SXTB)
2763  .Case("sxth", AArch64_AM::SXTH)
2764  .Case("sxtw", AArch64_AM::SXTW)
2765  .Case("sxtx", AArch64_AM::SXTX)
2767 
2768  if (ShOp == AArch64_AM::InvalidShiftExtend)
2769  return MatchOperand_NoMatch;
2770 
2771  SMLoc S = Tok.getLoc();
2772  Parser.Lex();
2773 
2774  bool Hash = parseOptionalToken(AsmToken::Hash);
2775 
2776  if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2777  if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2778  ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2779  ShOp == AArch64_AM::MSL) {
2780  // We expect a number here.
2781  TokError("expected #imm after shift specifier");
2782  return MatchOperand_ParseFail;
2783  }
2784 
2785  // "extend" type operations don't need an immediate, #0 is implicit.
2786  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2787  Operands.push_back(
2788  AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2789  return MatchOperand_Success;
2790  }
2791 
2792  // Make sure we do actually have a number, identifier or a parenthesized
2793  // expression.
2794  SMLoc E = Parser.getTok().getLoc();
2795  if (!Parser.getTok().is(AsmToken::Integer) &&
2796  !Parser.getTok().is(AsmToken::LParen) &&
2797  !Parser.getTok().is(AsmToken::Identifier)) {
2798  Error(E, "expected integer shift amount");
2799  return MatchOperand_ParseFail;
2800  }
2801 
2802  const MCExpr *ImmVal;
2803  if (getParser().parseExpression(ImmVal))
2804  return MatchOperand_ParseFail;
2805 
2806  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2807  if (!MCE) {
2808  Error(E, "expected constant '#imm' after shift specifier");
2809  return MatchOperand_ParseFail;
2810  }
2811 
2812  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2813  Operands.push_back(AArch64Operand::CreateShiftExtend(
2814  ShOp, MCE->getValue(), true, S, E, getContext()));
2815  return MatchOperand_Success;
2816 }
2817 
2818 static const struct Extension {
2819  const char *Name;
2821 } ExtensionMap[] = {
2822  {"crc", {AArch64::FeatureCRC}},
2823  {"sm4", {AArch64::FeatureSM4}},
2824  {"sha3", {AArch64::FeatureSHA3}},
2825  {"sha2", {AArch64::FeatureSHA2}},
2826  {"aes", {AArch64::FeatureAES}},
2827  {"crypto", {AArch64::FeatureCrypto}},
2828  {"fp", {AArch64::FeatureFPARMv8}},
2829  {"simd", {AArch64::FeatureNEON}},
2830  {"ras", {AArch64::FeatureRAS}},
2831  {"lse", {AArch64::FeatureLSE}},
2832  {"predres", {AArch64::FeaturePredRes}},
2833  {"ccdp", {AArch64::FeatureCacheDeepPersist}},
2834  {"mte", {AArch64::FeatureMTE}},
2835  {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
2836  {"pan-rwv", {AArch64::FeaturePAN_RWV}},
2837  {"ccpp", {AArch64::FeatureCCPP}},
2838  {"sve", {AArch64::FeatureSVE}},
2839  {"sve2", {AArch64::FeatureSVE2}},
2840  {"sve2-aes", {AArch64::FeatureSVE2AES}},
2841  {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
2842  {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
2843  {"bitperm", {AArch64::FeatureSVE2BitPerm}},
2844  // FIXME: Unsupported extensions
2845  {"pan", {}},
2846  {"lor", {}},
2847  {"rdma", {}},
2848  {"profile", {}},
2849 };
2850 
2851 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2852  if (FBS[AArch64::HasV8_1aOps])
2853  Str += "ARMv8.1a";
2854  else if (FBS[AArch64::HasV8_2aOps])
2855  Str += "ARMv8.2a";
2856  else if (FBS[AArch64::HasV8_3aOps])
2857  Str += "ARMv8.3a";
2858  else if (FBS[AArch64::HasV8_4aOps])
2859  Str += "ARMv8.4a";
2860  else if (FBS[AArch64::HasV8_5aOps])
2861  Str += "ARMv8.5a";
2862  else {
2863  auto ext = std::find_if(std::begin(ExtensionMap),
2865  [&](const Extension& e)
2866  // Use & in case multiple features are enabled
2867  { return (FBS & e.Features) != FeatureBitset(); }
2868  );
2869 
2870  Str += ext != std::end(ExtensionMap) ? ext->Name : "(unknown)";
2871  }
2872 }
2873 
2874 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2875  SMLoc S) {
2876  const uint16_t Op2 = Encoding & 7;
2877  const uint16_t Cm = (Encoding & 0x78) >> 3;
2878  const uint16_t Cn = (Encoding & 0x780) >> 7;
2879  const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2880 
2881  const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2882 
2883  Operands.push_back(
2884  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2885  Operands.push_back(
2886  AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2887  Operands.push_back(
2888  AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2889  Expr = MCConstantExpr::create(Op2, getContext());
2890  Operands.push_back(
2891  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2892 }
2893 
2894 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2895 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2896 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2897  OperandVector &Operands) {
2898  if (Name.find('.') != StringRef::npos)
2899  return TokError("invalid operand");
2900 
2901  Mnemonic = Name;
2902  Operands.push_back(
2903  AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2904 
2905  MCAsmParser &Parser = getParser();
2906  const AsmToken &Tok = Parser.getTok();
2907  StringRef Op = Tok.getString();
2908  SMLoc S = Tok.getLoc();
2909 
2910  if (Mnemonic == "ic") {
2911  const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2912  if (!IC)
2913  return TokError("invalid operand for IC instruction");
2914  else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2915  std::string Str("IC " + std::string(IC->Name) + " requires ");
2917  return TokError(Str.c_str());
2918  }
2919  createSysAlias(IC->Encoding, Operands, S);
2920  } else if (Mnemonic == "dc") {
2921  const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2922  if (!DC)
2923  return TokError("invalid operand for DC instruction");
2924  else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2925  std::string Str("DC " + std::string(DC->Name) + " requires ");
2927  return TokError(Str.c_str());
2928  }
2929  createSysAlias(DC->Encoding, Operands, S);
2930  } else if (Mnemonic == "at") {
2931  const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2932  if (!AT)
2933  return TokError("invalid operand for AT instruction");
2934  else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2935  std::string Str("AT " + std::string(AT->Name) + " requires ");
2937  return TokError(Str.c_str());
2938  }
2939  createSysAlias(AT->Encoding, Operands, S);
2940  } else if (Mnemonic == "tlbi") {
2941  const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2942  if (!TLBI)
2943  return TokError("invalid operand for TLBI instruction");
2944  else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2945  std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2947  return TokError(Str.c_str());
2948  }
2949  createSysAlias(TLBI->Encoding, Operands, S);
2950  } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
2951  const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
2952  if (!PRCTX)
2953  return TokError("invalid operand for prediction restriction instruction");
2954  else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
2955  std::string Str(
2956  Mnemonic.upper() + std::string(PRCTX->Name) + " requires ");
2958  return TokError(Str.c_str());
2959  }
2960  uint16_t PRCTX_Op2 =
2961  Mnemonic == "cfp" ? 4 :
2962  Mnemonic == "dvp" ? 5 :
2963  Mnemonic == "cpp" ? 7 :
2964  0;
2965  assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction");
2966  createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
2967  }
2968 
2969  Parser.Lex(); // Eat operand.
2970 
2971  bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2972  bool HasRegister = false;
2973 
2974  // Check for the optional register operand.
2975  if (parseOptionalToken(AsmToken::Comma)) {
2976  if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2977  return TokError("expected register operand");
2978  HasRegister = true;
2979  }
2980 
2981  if (ExpectRegister && !HasRegister)
2982  return TokError("specified " + Mnemonic + " op requires a register");
2983  else if (!ExpectRegister && HasRegister)
2984  return TokError("specified " + Mnemonic + " op does not use a register");
2985 
2986  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2987  return true;
2988 
2989  return false;
2990 }
2991 
2993 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2994  MCAsmParser &Parser = getParser();
2995  const AsmToken &Tok = Parser.getTok();
2996 
2997  if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
2998  TokError("'csync' operand expected");
2999  return MatchOperand_ParseFail;
3000  // Can be either a #imm style literal or an option name
3001  } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3002  // Immediate operand.
3003  const MCExpr *ImmVal;
3004  SMLoc ExprLoc = getLoc();
3005  if (getParser().parseExpression(ImmVal))
3006  return MatchOperand_ParseFail;
3007  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3008  if (!MCE) {
3009  Error(ExprLoc, "immediate value expected for barrier operand");
3010  return MatchOperand_ParseFail;
3011  }
3012  if (MCE->getValue() < 0 || MCE->getValue() > 15) {
3013  Error(ExprLoc, "barrier operand out of range");
3014  return MatchOperand_ParseFail;
3015  }
3016  auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
3017  Operands.push_back(AArch64Operand::CreateBarrier(
3018  MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
3019  return MatchOperand_Success;
3020  }
3021 
3022  if (Tok.isNot(AsmToken::Identifier)) {
3023  TokError("invalid operand for instruction");
3024  return MatchOperand_ParseFail;
3025  }
3026 
3027  auto TSB = AArch64TSB::lookupTSBByName(Tok.getString());
3028  // The only valid named option for ISB is 'sy'
3029  auto DB = AArch64DB::lookupDBByName(Tok.getString());
3030  if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3031  TokError("'sy' or #imm operand expected");
3032  return MatchOperand_ParseFail;
3033  // The only valid named option for TSB is 'csync'
3034  } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3035  TokError("'csync' operand expected");
3036  return MatchOperand_ParseFail;
3037  } else if (!DB && !TSB) {
3038  TokError("invalid barrier option name");
3039  return MatchOperand_ParseFail;
3040  }
3041 
3042  Operands.push_back(AArch64Operand::CreateBarrier(
3043  DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext()));
3044  Parser.Lex(); // Consume the option
3045 
3046  return MatchOperand_Success;
3047 }
3048 
3050 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3051  MCAsmParser &Parser = getParser();
3052  const AsmToken &Tok = Parser.getTok();
3053 
3054  if (Tok.isNot(AsmToken::Identifier))
3055  return MatchOperand_NoMatch;
3056 
3057  int MRSReg, MSRReg;
3058  auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3059  if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3060  MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3061  MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3062  } else
3063  MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3064 
3065  auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3066  unsigned PStateImm = -1;
3067  if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3068  PStateImm = PState->Encoding;
3069 
3070  Operands.push_back(
3071  AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3072  PStateImm, getContext()));
3073  Parser.Lex(); // Eat identifier
3074 
3075  return MatchOperand_Success;
3076 }
3077 
3078 /// tryParseNeonVectorRegister - Parse a vector register operand.
3079 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3080  MCAsmParser &Parser = getParser();
3081  if (Parser.getTok().isNot(AsmToken::Identifier))
3082  return true;
3083 
3084  SMLoc S = getLoc();
3085  // Check for a vector register specifier first.
3086  StringRef Kind;
3087  unsigned Reg;
3088  OperandMatchResultTy Res =
3089  tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3090  if (Res != MatchOperand_Success)
3091  return true;
3092 
3093  const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3094  if (!KindRes)
3095  return true;
3096 
3097  unsigned ElementWidth = KindRes->second;
3098  Operands.push_back(
3099  AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3100  S, getLoc(), getContext()));
3101 
3102  // If there was an explicit qualifier, that goes on as a literal text
3103  // operand.
3104  if (!Kind.empty())
3105  Operands.push_back(
3106  AArch64Operand::CreateToken(Kind, false, S, getContext()));
3107 
3108  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3109 }
3110 
3112 AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3113  SMLoc SIdx = getLoc();
3114  if (parseOptionalToken(AsmToken::LBrac)) {
3115  const MCExpr *ImmVal;
3116  if (getParser().parseExpression(ImmVal))
3117  return MatchOperand_NoMatch;
3118  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3119  if (!MCE) {
3120  TokError("immediate value expected for vector index");
3121  return MatchOperand_ParseFail;;
3122  }
3123 
3124  SMLoc E = getLoc();
3125 
3126  if (parseToken(AsmToken::RBrac, "']' expected"))
3127  return MatchOperand_ParseFail;;
3128 
3129  Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3130  E, getContext()));
3131  return MatchOperand_Success;
3132  }
3133 
3134  return MatchOperand_NoMatch;
3135 }
3136 
3137 // tryParseVectorRegister - Try to parse a vector register name with
3138 // optional kind specifier. If it is a register specifier, eat the token
3139 // and return it.
3141 AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3142  RegKind MatchKind) {
3143  MCAsmParser &Parser = getParser();
3144  const AsmToken &Tok = Parser.getTok();
3145 
3146  if (Tok.isNot(AsmToken::Identifier))
3147  return MatchOperand_NoMatch;
3148 
3149  StringRef Name = Tok.getString();
3150  // If there is a kind specifier, it's separated from the register name by
3151  // a '.'.
3152  size_t Start = 0, Next = Name.find('.');
3153  StringRef Head = Name.slice(Start, Next);
3154  unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3155 
3156  if (RegNum) {
3157  if (Next != StringRef::npos) {
3158  Kind = Name.slice(Next, StringRef::npos);
3159  if (!isValidVectorKind(Kind, MatchKind)) {
3160  TokError("invalid vector kind qualifier");
3161  return MatchOperand_ParseFail;
3162  }
3163  }
3164  Parser.Lex(); // Eat the register token.
3165 
3166  Reg = RegNum;
3167  return MatchOperand_Success;
3168  }
3169 
3170  return MatchOperand_NoMatch;
3171 }
3172 
3173 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3175 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3176  // Check for a SVE predicate register specifier first.
3177  const SMLoc S = getLoc();
3178  StringRef Kind;
3179  unsigned RegNum;
3180  auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3181  if (Res != MatchOperand_Success)
3182  return Res;
3183 
3184  const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3185  if (!KindRes)
3186  return MatchOperand_NoMatch;
3187 
3188  unsigned ElementWidth = KindRes->second;
3189  Operands.push_back(AArch64Operand::CreateVectorReg(
3190  RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3191  getLoc(), getContext()));
3192 
3193  // Not all predicates are followed by a '/m' or '/z'.
3194  MCAsmParser &Parser = getParser();
3195  if (Parser.getTok().isNot(AsmToken::Slash))
3196  return MatchOperand_Success;
3197 
3198  // But when they do they shouldn't have an element type suffix.
3199  if (!Kind.empty()) {
3200  Error(S, "not expecting size suffix");
3201  return MatchOperand_ParseFail;
3202  }
3203 
3204  // Add a literal slash as operand
3205  Operands.push_back(
3206  AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3207 
3208  Parser.Lex(); // Eat the slash.
3209 
3210  // Zeroing or merging?
3211  auto Pred = Parser.getTok().getString().lower();
3212  if (Pred != "z" && Pred != "m") {
3213  Error(getLoc(), "expecting 'm' or 'z' predication");
3214  return MatchOperand_ParseFail;
3215  }
3216 
3217  // Add zero/merge token.
3218  const char *ZM = Pred == "z" ? "z" : "m";
3219  Operands.push_back(
3220  AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3221 
3222  Parser.Lex(); // Eat zero/merge token.
3223  return MatchOperand_Success;
3224 }
3225 
3226 /// parseRegister - Parse a register operand.
3227 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3228  // Try for a Neon vector register.
3229  if (!tryParseNeonVectorRegister(Operands))
3230  return false;
3231 
3232  // Otherwise try for a scalar register.
3233  if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3234  return false;
3235 
3236  return true;
3237 }
3238 
3239 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3240  MCAsmParser &Parser = getParser();
3241  bool HasELFModifier = false;
3243 
3244  if (parseOptionalToken(AsmToken::Colon)) {
3245  HasELFModifier = true;
3246 
3247  if (Parser.getTok().isNot(AsmToken::Identifier))
3248  return TokError("expect relocation specifier in operand after ':'");
3249 
3250  std::string LowerCase = Parser.getTok().getIdentifier().lower();
3251  RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3252  .Case("lo12", AArch64MCExpr::VK_LO12)
3253  .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3254  .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3255  .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3256  .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3257  .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3258  .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3259  .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3260  .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3261  .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3262  .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3263  .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3264  .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3265  .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3266  .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3267  .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3268  .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3269  .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3270  .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3271  .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3272  .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3273  .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3274  .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3275  .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3276  .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3277  .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3278  .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3279  .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3281  .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3283  .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3284  .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3285  .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3287  .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3288  .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3290 
3291  if (RefKind == AArch64MCExpr::VK_INVALID)
3292  return TokError("expect relocation specifier in operand after ':'");
3293 
3294  Parser.Lex(); // Eat identifier
3295 
3296  if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3297  return true;
3298  }
3299 
3300  if (getParser().parseExpression(ImmVal))
3301  return true;
3302 
3303  if (HasELFModifier)
3304  ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3305 
3306  return false;
3307 }
3308 
3309 template <RegKind VectorKind>
3311 AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3312  bool ExpectMatch) {
3313  MCAsmParser &Parser = getParser();
3314  if (!Parser.getTok().is(AsmToken::LCurly))
3315  return MatchOperand_NoMatch;
3316 
3317  // Wrapper around parse function
3318  auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3319  bool NoMatchIsError) {
3320  auto RegTok = Parser.getTok();
3321  auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3322  if (ParseRes == MatchOperand_Success) {
3323  if (parseVectorKind(Kind, VectorKind))
3324  return ParseRes;
3325  llvm_unreachable("Expected a valid vector kind");
3326  }
3327 
3328  if (RegTok.isNot(AsmToken::Identifier) ||
3329  ParseRes == MatchOperand_ParseFail ||
3330  (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3331  Error(Loc, "vector register expected");
3332  return MatchOperand_ParseFail;
3333  }
3334 
3335  return MatchOperand_NoMatch;
3336  };
3337 
3338  SMLoc S = getLoc();
3339  auto LCurly = Parser.getTok();
3340  Parser.Lex(); // Eat left bracket token.
3341 
3342  StringRef Kind;
3343  unsigned FirstReg;
3344  auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3345 
3346  // Put back the original left bracket if there was no match, so that
3347  // different types of list-operands can be matched (e.g. SVE, Neon).
3348  if (ParseRes == MatchOperand_NoMatch)
3349  Parser.getLexer().UnLex(LCurly);
3350 
3351  if (ParseRes != MatchOperand_Success)
3352  return ParseRes;
3353 
3354  int64_t PrevReg = FirstReg;
3355  unsigned Count = 1;
3356 
3357  if (parseOptionalToken(AsmToken::Minus)) {
3358  SMLoc Loc = getLoc();
3359  StringRef NextKind;
3360 
3361  unsigned Reg;
3362  ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3363  if (ParseRes != MatchOperand_Success)
3364  return ParseRes;
3365 
3366  // Any Kind suffices must match on all regs in the list.
3367  if (Kind != NextKind) {
3368  Error(Loc, "mismatched register size suffix");
3369  return MatchOperand_ParseFail;
3370  }
3371 
3372  unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3373 
3374  if (Space == 0 || Space > 3) {
3375  Error(Loc, "invalid number of vectors");
3376  return MatchOperand_ParseFail;
3377  }
3378 
3379  Count += Space;
3380  }
3381  else {
3382  while (parseOptionalToken(AsmToken::Comma)) {
3383  SMLoc Loc = getLoc();
3384  StringRef NextKind;
3385  unsigned Reg;
3386  ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3387  if (ParseRes != MatchOperand_Success)
3388  return ParseRes;
3389 
3390  // Any Kind suffices must match on all regs in the list.
3391  if (Kind != NextKind) {
3392  Error(Loc, "mismatched register size suffix");
3393  return MatchOperand_ParseFail;
3394  }
3395 
3396  // Registers must be incremental (with wraparound at 31)
3397  if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3398  (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3399  Error(Loc, "registers must be sequential");
3400  return MatchOperand_ParseFail;
3401  }
3402 
3403  PrevReg = Reg;
3404  ++Count;
3405  }
3406  }
3407 
3408  if (parseToken(AsmToken::RCurly, "'}' expected"))
3409  return MatchOperand_ParseFail;
3410 
3411  if (Count > 4) {
3412  Error(S, "invalid number of vectors");
3413  return MatchOperand_ParseFail;
3414  }
3415 
3416  unsigned NumElements = 0;
3417  unsigned ElementWidth = 0;
3418  if (!Kind.empty()) {
3419  if (const auto &VK = parseVectorKind(Kind, VectorKind))
3420  std::tie(NumElements, ElementWidth) = *VK;
3421  }
3422 
3423  Operands.push_back(AArch64Operand::CreateVectorList(
3424  FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3425  getContext()));
3426 
3427  return MatchOperand_Success;
3428 }
3429 
3430 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3431 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3432  auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3433  if (ParseRes != MatchOperand_Success)
3434  return true;
3435 
3436  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3437 }
3438 
3440 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3441  SMLoc StartLoc = getLoc();
3442 
3443  unsigned RegNum;
3444  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3445  if (Res != MatchOperand_Success)
3446  return Res;
3447 
3448  if (!parseOptionalToken(AsmToken::Comma)) {
3449  Operands.push_back(AArch64Operand::CreateReg(
3450  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3451  return MatchOperand_Success;
3452  }
3453 
3454  parseOptionalToken(AsmToken::Hash);
3455 
3456  if (getParser().getTok().isNot(AsmToken::Integer)) {
3457  Error(getLoc(), "index must be absent or #0");
3458  return MatchOperand_ParseFail;
3459  }
3460 
3461  const MCExpr *ImmVal;
3462  if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3463  cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3464  Error(getLoc(), "index must be absent or #0");
3465  return MatchOperand_ParseFail;
3466  }
3467 
3468  Operands.push_back(AArch64Operand::CreateReg(
3469  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3470  return MatchOperand_Success;
3471 }
3472 
3473 template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3475 AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3476  SMLoc StartLoc = getLoc();
3477 
3478  unsigned RegNum;
3479  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3480  if (Res != MatchOperand_Success)
3481  return Res;
3482 
3483  // No shift/extend is the default.
3484  if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3485  Operands.push_back(AArch64Operand::CreateReg(
3486  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3487  return MatchOperand_Success;
3488  }
3489 
3490  // Eat the comma
3491  getParser().Lex();
3492 
3493  // Match the shift
3495  Res = tryParseOptionalShiftExtend(ExtOpnd);
3496  if (Res != MatchOperand_Success)
3497  return Res;
3498 
3499  auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3500  Operands.push_back(AArch64Operand::CreateReg(
3501  RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3502  Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3503  Ext->hasShiftExtendAmount()));
3504 
3505  return MatchOperand_Success;
3506 }
3507 
3508 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3509  MCAsmParser &Parser = getParser();
3510 
3511  // Some SVE instructions have a decoration after the immediate, i.e.
3512  // "mul vl". We parse them here and add tokens, which must be present in the
3513  // asm string in the tablegen instruction.
3514  bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3515  bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3516  if (!Parser.getTok().getString().equals_lower("mul") ||
3517  !(NextIsVL || NextIsHash))
3518  return true;
3519 
3520  Operands.push_back(
3521  AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3522  Parser.Lex(); // Eat the "mul"
3523 
3524  if (NextIsVL) {
3525  Operands.push_back(
3526  AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3527  Parser.Lex(); // Eat the "vl"
3528  return false;
3529  }
3530 
3531  if (NextIsHash) {
3532  Parser.Lex(); // Eat the #
3533  SMLoc S = getLoc();
3534 
3535  // Parse immediate operand.
3536  const MCExpr *ImmVal;
3537  if (!Parser.parseExpression(ImmVal))
3538  if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3539  Operands.push_back(AArch64Operand::CreateImm(
3540  MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3541  getContext()));
3542  return MatchOperand_Success;
3543  }
3544  }
3545 
3546  return Error(getLoc(), "expected 'vl' or '#<imm>'");
3547 }
3548 
3549 /// parseOperand - Parse a arm instruction operand. For now this parses the
3550 /// operand regardless of the mnemonic.
3551 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3552  bool invertCondCode) {
3553  MCAsmParser &Parser = getParser();
3554 
3555  OperandMatchResultTy ResTy =
3556  MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3557 
3558  // Check if the current operand has a custom associated parser, if so, try to
3559  // custom parse the operand, or fallback to the general approach.
3560  if (ResTy == MatchOperand_Success)
3561  return false;
3562  // If there wasn't a custom match, try the generic matcher below. Otherwise,
3563  // there was a match, but an error occurred, in which case, just return that
3564  // the operand parsing failed.
3565  if (ResTy == MatchOperand_ParseFail)
3566  return true;
3567 
3568  // Nothing custom, so do general case parsing.
3569  SMLoc S, E;
3570  switch (getLexer().getKind()) {
3571  default: {
3572  SMLoc S = getLoc();
3573  const MCExpr *Expr;
3574  if (parseSymbolicImmVal(Expr))
3575  return Error(S, "invalid operand");
3576 
3577  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3578  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3579  return false;
3580  }
3581  case AsmToken::LBrac: {
3582  SMLoc Loc = Parser.getTok().getLoc();
3583  Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3584  getContext()));
3585  Parser.Lex(); // Eat '['
3586 
3587  // There's no comma after a '[', so we can parse the next operand
3588  // immediately.
3589  return parseOperand(Operands, false, false);
3590  }
3591  case AsmToken::LCurly:
3592  return parseNeonVectorList(Operands);
3593  case AsmToken::Identifier: {
3594  // If we're expecting a Condition Code operand, then just parse that.
3595  if (isCondCode)
3596  return parseCondCode(Operands, invertCondCode);
3597 
3598  // If it's a register name, parse it.
3599  if (!parseRegister(Operands))
3600  return false;
3601 
3602  // See if this is a "mul vl" decoration or "mul #<int>" operand used
3603  // by SVE instructions.
3604  if (!parseOptionalMulOperand(Operands))
3605  return false;
3606 
3607  // This could be an optional "shift" or "extend" operand.
3608  OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3609  // We can only continue if no tokens were eaten.
3610  if (GotShift != MatchOperand_NoMatch)
3611  return GotShift;
3612 
3613  // This was not a register so parse other operands that start with an
3614  // identifier (like labels) as expressions and create them as immediates.
3615  const MCExpr *IdVal;
3616  S = getLoc();
3617  if (getParser().parseExpression(IdVal))
3618  return true;
3619  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3620  Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3621  return false;
3622  }
3623  case AsmToken::Integer:
3624  case AsmToken::Real:
3625  case AsmToken::Hash: {
3626  // #42 -> immediate.
3627  S = getLoc();
3628 
3629  parseOptionalToken(AsmToken::Hash);
3630 
3631  // Parse a negative sign
3632  bool isNegative = false;
3633  if (Parser.getTok().is(AsmToken::Minus)) {
3634  isNegative = true;
3635  // We need to consume this token only when we have a Real, otherwise
3636  // we let parseSymbolicImmVal take care of it
3637  if (Parser.getLexer().peekTok().is(AsmToken::Real))
3638  Parser.Lex();
3639  }
3640 
3641  // The only Real that should come through here is a literal #0.0 for
3642  // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3643  // so convert the value.
3644  const AsmToken &Tok = Parser.getTok();
3645  if (Tok.is(AsmToken::Real)) {
3646  APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3647  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3648  if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3649  Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3650  Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3651  return TokError("unexpected floating point literal");
3652  else if (IntVal != 0 || isNegative)
3653  return TokError("expected floating-point constant #0.0");
3654  Parser.Lex(); // Eat the token.
3655 
3656  Operands.push_back(
3657  AArch64Operand::CreateToken("#0", false, S, getContext()));
3658  Operands.push_back(
3659  AArch64Operand::CreateToken(".0", false, S, getContext()));
3660  return false;
3661  }
3662 
3663  const MCExpr *ImmVal;
3664  if (parseSymbolicImmVal(ImmVal))
3665  return true;
3666 
3667  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3668  Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3669  return false;
3670  }
3671  case AsmToken::Equal: {
3672  SMLoc Loc = getLoc();
3673  if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3674  return TokError("unexpected token in operand");
3675  Parser.Lex(); // Eat '='
3676  const MCExpr *SubExprVal;
3677  if (getParser().parseExpression(SubExprVal))
3678  return true;
3679 
3680  if (Operands.size() < 2 ||
3681  !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3682  return Error(Loc, "Only valid when first operand is register");
3683 
3684  bool IsXReg =
3685  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3686  Operands[1]->getReg());
3687 
3688  MCContext& Ctx = getContext();
3689  E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3690  // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3691  if (isa<MCConstantExpr>(SubExprVal)) {
3692  uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3693  uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3694  while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3695  ShiftAmt += 16;
3696  Imm >>= 16;
3697  }
3698  if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3699  Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3700  Operands.push_back(AArch64Operand::CreateImm(
3701  MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3702  if (ShiftAmt)
3703  Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3704  ShiftAmt, true, S, E, Ctx));
3705  return false;
3706  }
3707  APInt Simm = APInt(64, Imm << ShiftAmt);
3708  // check if the immediate is an unsigned or signed 32-bit int for W regs
3709  if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3710  return Error(Loc, "Immediate too large for register");
3711  }
3712  // If it is a label or an imm that cannot fit in a movz, put it into CP.
3713  const MCExpr *CPLoc =
3714  getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3715  Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3716  return false;
3717  }
3718  }
3719 }
3720 
3721 bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3722  const MCParsedAsmOperand &Op2) const {
3723  auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3724  auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3725  if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3726  AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3727  return MCTargetAsmParser::regsEqual(Op1, Op2);
3728 
3729  assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
3730  "Testing equality of non-scalar registers not supported");
3731 
3732  // Check if a registers match their sub/super register classes.
3733  if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3734  return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3735  if (AOp1.getRegEqualityTy() == EqualsSubReg)
3736  return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3737  if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3738  return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3739  if (AOp2.getRegEqualityTy() == EqualsSubReg)
3740  return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3741 
3742  return false;
3743 }
3744 
3745 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3746 /// operands.
3747 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3748  StringRef Name, SMLoc NameLoc,
3749  OperandVector &Operands) {
3750  MCAsmParser &Parser = getParser();
3751  Name = StringSwitch<StringRef>(Name.lower())
3752  .Case("beq", "b.eq")
3753  .Case("bne", "b.ne")
3754  .Case("bhs", "b.hs")
3755  .Case("bcs", "b.cs")
3756  .Case("blo", "b.lo")
3757  .Case("bcc", "b.cc")
3758  .Case("bmi", "b.mi")
3759  .Case("bpl", "b.pl")
3760  .Case("bvs", "b.vs")
3761  .Case("bvc", "b.vc")
3762  .Case("bhi", "b.hi")
3763  .Case("bls", "b.ls")
3764  .Case("bge", "b.ge")
3765  .Case("blt", "b.lt")
3766  .Case("bgt", "b.gt")
3767  .Case("ble", "b.le")
3768  .Case("bal", "b.al")
3769  .Case("bnv", "b.nv")
3770  .Default(Name);
3771 
3772  // First check for the AArch64-specific .req directive.
3773  if (Parser.getTok().is(AsmToken::Identifier) &&
3774  Parser.getTok().getIdentifier() == ".req") {
3775  parseDirectiveReq(Name, NameLoc);
3776  // We always return 'error' for this, as we're done with this
3777  // statement and don't need to match the 'instruction."
3778  return true;
3779  }
3780 
3781  // Create the leading tokens for the mnemonic, split by '.' characters.
3782  size_t Start = 0, Next = Name.find('.');
3783  StringRef Head = Name.slice(Start, Next);
3784 
3785  // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
3786  // the SYS instruction.
3787  if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
3788  Head == "cfp" || Head == "dvp" || Head == "cpp")
3789  return parseSysAlias(Head, NameLoc, Operands);
3790 
3791  Operands.push_back(
3792  AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3793  Mnemonic = Head;
3794 
3795  // Handle condition codes for a branch mnemonic
3796  if (Head == "b" && Next != StringRef::npos) {
3797  Start = Next;
3798  Next = Name.find('.', Start + 1);
3799  Head = Name.slice(Start + 1, Next);
3800 
3801  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3802  (Head.data() - Name.data()));
3803  AArch64CC::CondCode CC = parseCondCodeString(Head);
3804  if (CC == AArch64CC::Invalid)
3805  return Error(SuffixLoc, "invalid condition code");
3806  Operands.push_back(
3807  AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3808  Operands.push_back(
3809  AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3810  }
3811 
3812  // Add the remaining tokens in the mnemonic.
3813  while (Next != StringRef::npos) {
3814  Start = Next;
3815  Next = Name.find('.', Start + 1);
3816  Head = Name.slice(Start, Next);
3817  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3818  (Head.data() - Name.data()) + 1);
3819  Operands.push_back(
3820  AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3821  }
3822 
3823  // Conditional compare instructions have a Condition Code operand, which needs
3824  // to be parsed and an immediate operand created.
3825  bool condCodeFourthOperand =
3826  (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3827  Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3828  Head == "csinc" || Head == "csinv" || Head == "csneg");
3829 
3830  // These instructions are aliases to some of the conditional select
3831  // instructions. However, the condition code is inverted in the aliased
3832  // instruction.
3833  //
3834  // FIXME: Is this the correct way to handle these? Or should the parser
3835  // generate the aliased instructions directly?
3836  bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3837  bool condCodeThirdOperand =
3838  (Head == "cinc" || Head == "cinv" || Head == "cneg");
3839 
3840  // Read the remaining operands.
3841  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3842 
3843  unsigned N = 1;
3844  do {
3845  // Parse and remember the operand.
3846  if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3847  (N == 3 && condCodeThirdOperand) ||
3848  (N == 2 && condCodeSecondOperand),
3849  condCodeSecondOperand || condCodeThirdOperand)) {
3850  return true;
3851  }
3852 
3853  // After successfully parsing some operands there are two special cases to
3854  // consider (i.e. notional operands not separated by commas). Both are due
3855  // to memory specifiers:
3856  // + An RBrac will end an address for load/store/prefetch
3857  // + An '!' will indicate a pre-indexed operation.
3858  //
3859  // It's someone else's responsibility to make sure these tokens are sane
3860  // in the given context!
3861 
3862  SMLoc RLoc = Parser.getTok().getLoc();
3863  if (parseOptionalToken(AsmToken::RBrac))
3864  Operands.push_back(
3865  AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3866  SMLoc ELoc = Parser.getTok().getLoc();
3867  if (parseOptionalToken(AsmToken::Exclaim))
3868  Operands.push_back(
3869  AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3870 
3871  ++N;
3872  } while (parseOptionalToken(AsmToken::Comma));
3873  }
3874 
3875  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3876  return true;
3877 
3878  return false;
3879 }
3880 
3881 static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
3882  assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
3883  return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
3884  (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
3885  (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
3886  (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
3887  (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
3888  (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
3889 }
3890 
3891 // FIXME: This entire function is a giant hack to provide us with decent
3892 // operand range validation/diagnostics until TableGen/MC can be extended
3893 // to support autogeneration of this kind of validation.
3894 bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
3895  SmallVectorImpl<SMLoc> &Loc) {
3896  const MCRegisterInfo *RI = getContext().getRegisterInfo();
3897  const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
3898 
3899  // A prefix only applies to the instruction following it. Here we extract
3900  // prefix information for the next instruction before validating the current
3901  // one so that in the case of failure we don't erronously continue using the
3902  // current prefix.
3903  PrefixInfo Prefix = NextPrefix;
3904  NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
3905 
3906  // Before validating the instruction in isolation we run through the rules
3907  // applicable when it follows a prefix instruction.
3908  // NOTE: brk & hlt can be prefixed but require no additional validation.
3909  if (Prefix.isActive() &&
3910  (Inst.getOpcode() != AArch64::BRK) &&
3911  (Inst.getOpcode() != AArch64::HLT)) {
3912 
3913  // Prefixed intructions must have a destructive operand.
3916  return Error(IDLoc, "instruction is unpredictable when following a"
3917  " movprfx, suggest replacing movprfx with mov");
3918 
3919  // Destination operands must match.
3920  if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
3921  return Error(Loc[0], "instruction is unpredictable when following a"
3922  " movprfx writing to a different destination");
3923 
3924  // Destination operand must not be used in any other location.
3925  for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
3926  if (Inst.getOperand(i).isReg() &&
3927  (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
3928  isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
3929  return Error(Loc[0], "instruction is unpredictable when following a"
3930  " movprfx and destination also used as non-destructive"
3931  " source");
3932  }
3933 
3934  auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
3935  if (Prefix.isPredicated()) {
3936  int PgIdx = -1;
3937 
3938  // Find the instructions general predicate.
3939  for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
3940  if (Inst.getOperand(i).isReg() &&
3941  PPRRegClass.contains(Inst.getOperand(i).getReg())) {
3942  PgIdx = i;
3943  break;
3944  }
3945 
3946  // Instruction must be predicated if the movprfx is predicated.
3947  if (PgIdx == -1 ||
3949  return Error(IDLoc, "instruction is unpredictable when following a"
3950  " predicated movprfx, suggest using unpredicated movprfx");
3951 
3952  // Instruction must use same general predicate as the movprfx.
3953  if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
3954  return Error(IDLoc, "instruction is unpredictable when following a"
3955  " predicated movprfx using a different general predicate");
3956 
3957  // Instruction element type must match the movprfx.
3958  if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
3959  return Error(IDLoc, "instruction is unpredictable when following a"
3960  " predicated movprfx with a different element size");
3961  }
3962  }
3963 
3964  // Check for indexed addressing modes w/ the base register being the
3965  // same as a destination/source register or pair load where
3966  // the Rt == Rt2. All of those are undefined behaviour.
3967  switch (Inst.getOpcode()) {
3968  case AArch64::LDPSWpre:
3969  case AArch64::LDPWpost:
3970  case AArch64::LDPWpre:
3971  case AArch64::LDPXpost:
3972  case AArch64::LDPXpre: {
3973  unsigned Rt = Inst.getOperand(1).getReg();
3974  unsigned Rt2 = Inst.getOperand(2).getReg();
3975  unsigned Rn = Inst.getOperand(3).getReg();
3976  if (RI->isSubRegisterEq(Rn, Rt))
3977  return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3978  "is also a destination");
3979  if (RI->isSubRegisterEq(Rn, Rt2))
3980  return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3981  "is also a destination");
3983  }
3984  case AArch64::LDPDi:
3985  case AArch64::LDPQi:
3986  case AArch64::LDPSi:
3987  case AArch64::LDPSWi:
3988  case AArch64::LDPWi:
3989  case AArch64::LDPXi: {
3990  unsigned Rt = Inst.getOperand(0).getReg();
3991  unsigned Rt2 = Inst.getOperand(1).getReg();
3992  if (Rt == Rt2)
3993  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3994  break;
3995  }
3996  case AArch64::LDPDpost:
3997  case AArch64::LDPDpre:
3998  case AArch64::LDPQpost:
3999  case AArch64::LDPQpre:
4000  case AArch64::LDPSpost:
4001  case AArch64::LDPSpre:
4002  case AArch64::LDPSWpost: {
4003  unsigned Rt = Inst.getOperand(1).getReg();
4004  unsigned Rt2 = Inst.getOperand(2).getReg();
4005  if (Rt == Rt2)
4006  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4007  break;
4008  }
4009  case AArch64::STPDpost:
4010  case AArch64::STPDpre:
4011  case AArch64::STPQpost:
4012  case AArch64::STPQpre:
4013  case AArch64::STPSpost:
4014  case AArch64::STPSpre:
4015  case AArch64::STPWpost:
4016  case AArch64::STPWpre:
4017  case AArch64::STPXpost:
4018  case AArch64::STPXpre: {
4019  unsigned Rt = Inst.getOperand(1).getReg();
4020  unsigned Rt2 = Inst.getOperand(2).getReg();
4021  unsigned Rn = Inst.getOperand(3).getReg();
4022  if (RI->isSubRegisterEq(Rn, Rt))
4023  return Error(Loc[0], "unpredictable STP instruction, writeback base "
4024  "is also a source");
4025  if (RI->isSubRegisterEq(Rn, Rt2))
4026  return Error(Loc[1], "unpredictable STP instruction, writeback base "
4027  "is also a source");
4028  break;
4029  }
4030  case AArch64::LDRBBpre:
4031  case AArch64::LDRBpre:
4032  case AArch64::LDRHHpre:
4033  case AArch64::LDRHpre:
4034  case AArch64::LDRSBWpre:
4035  case AArch64::LDRSBXpre:
4036  case AArch64::LDRSHWpre:
4037  case AArch64::LDRSHXpre:
4038  case AArch64::LDRSWpre:
4039  case AArch64::LDRWpre:
4040  case AArch64::LDRXpre:
4041  case AArch64::LDRBBpost:
4042  case AArch64::LDRBpost:
4043  case AArch64::LDRHHpost:
4044  case AArch64::LDRHpost:
4045  case AArch64::LDRSBWpost:
4046  case AArch64::LDRSBXpost:
4047  case AArch64::LDRSHWpost:
4048  case AArch64::LDRSHXpost:
4049  case AArch64::LDRSWpost:
4050  case AArch64::LDRWpost:
4051  case AArch64::LDRXpost: {
4052  unsigned Rt = Inst.getOperand(1).getReg();
4053  unsigned Rn = Inst.getOperand(2).getReg();
4054  if (RI->isSubRegisterEq(Rn, Rt))
4055  return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4056  "is also a source");
4057  break;
4058  }
4059  case AArch64::STRBBpost:
4060  case AArch64::STRBpost:
4061  case AArch64::STRHHpost:
4062  case AArch64::STRHpost:
4063  case AArch64::STRWpost:
4064  case AArch64::STRXpost:
4065  case AArch64::STRBBpre:
4066  case AArch64::STRBpre:
4067  case AArch64::STRHHpre:
4068  case AArch64::STRHpre:
4069  case AArch64::STRWpre:
4070  case AArch64::STRXpre: {
4071  unsigned Rt = Inst.getOperand(1).getReg();
4072  unsigned Rn = Inst.getOperand(2).getReg();
4073  if (RI->isSubRegisterEq(Rn, Rt))
4074  return Error(Loc[0], "unpredictable STR instruction, writeback base "
4075  "is also a source");
4076  break;
4077  }
4078  case AArch64::STXRB:
4079  case AArch64::STXRH:
4080  case AArch64::STXRW:
4081  case AArch64::STXRX:
4082  case AArch64::STLXRB:
4083  case AArch64::STLXRH:
4084  case AArch64::STLXRW:
4085  case AArch64::STLXRX: {
4086  unsigned Rs = Inst.getOperand(0).getReg();
4087  unsigned Rt = Inst.getOperand(1).getReg();
4088  unsigned Rn = Inst.getOperand(2).getReg();
4089  if (RI->isSubRegisterEq(Rt, Rs) ||
4090  (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4091  return Error(Loc[0],
4092  "unpredictable STXR instruction, status is also a source");
4093  break;
4094  }
4095  case AArch64::STXPW:
4096  case AArch64::STXPX:
4097  case AArch64::STLXPW:
4098  case AArch64::STLXPX: {
4099  unsigned Rs = Inst.getOperand(0).getReg();
4100  unsigned Rt1 = Inst.getOperand(1).getReg();
4101  unsigned Rt2 = Inst.getOperand(2).getReg();
4102  unsigned Rn = Inst.getOperand(3).getReg();
4103  if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4104  (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4105  return Error(Loc[0],
4106  "unpredictable STXP instruction, status is also a source");
4107  break;
4108  }
4109  }
4110 
4111 
4112  // Now check immediate ranges. Separate from the above as there is overlap
4113  // in the instructions being checked and this keeps the nested conditionals
4114  // to a minimum.
4115  switch (Inst.getOpcode()) {
4116  case AArch64::ADDSWri:
4117  case AArch64::ADDSXri:
4118  case AArch64::ADDWri:
4119  case AArch64::ADDXri:
4120  case AArch64::SUBSWri:
4121  case AArch64::SUBSXri:
4122  case AArch64::SUBWri:
4123  case AArch64::SUBXri: {
4124  // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4125  // some slight duplication here.
4126  if (Inst.getOperand(2).isExpr()) {
4127  const MCExpr *Expr = Inst.getOperand(2).getExpr();
4128  AArch64MCExpr::VariantKind ELFRefKind;
4129  MCSymbolRefExpr::VariantKind DarwinRefKind;
4130  int64_t Addend;
4131  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4132 
4133  // Only allow these with ADDXri.
4134  if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4135  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4136  Inst.getOpcode() == AArch64::ADDXri)
4137  return false;
4138 
4139  // Only allow these with ADDXri/ADDWri
4140  if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4141  ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4142  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4143  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4144  ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4145  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4146  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4147  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4148  ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4149  ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4150  (Inst.getOpcode() == AArch64::ADDXri ||
4151  Inst.getOpcode() == AArch64::ADDWri))
4152  return false;
4153 
4154  // Don't allow symbol refs in the immediate field otherwise
4155  // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4156  // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4157  // 'cmp w0, 'borked')
4158  return Error(Loc.back(), "invalid immediate expression");
4159  }
4160  // We don't validate more complex expressions here
4161  }
4162  return false;
4163  }
4164  default:
4165  return false;
4166  }
4167 }
4168 
4169 static std::string AArch64MnemonicSpellCheck(StringRef S,
4170  const FeatureBitset &FBS,
4171  unsigned VariantID = 0);
4172 
4173 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4174  uint64_t ErrorInfo,
4175  OperandVector &Operands) {
4176  switch (ErrCode) {
4177  case Match_InvalidTiedOperand: {
4179  static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4180  .getRegEqualityTy();
4181  switch (EqTy) {
4182  case RegConstraintEqualityTy::EqualsSubReg:
4183  return Error(Loc, "operand must be 64-bit form of destination register");
4184  case RegConstraintEqualityTy::EqualsSuperReg:
4185  return Error(Loc, "operand must be 32-bit form of destination register");
4186  case RegConstraintEqualityTy::EqualsReg:
4187  return Error(Loc, "operand must match destination register");
4188  }
4189  llvm_unreachable("Unknown RegConstraintEqualityTy");
4190  }
4191  case Match_MissingFeature:
4192  return Error(Loc,
4193  "instruction requires a CPU feature not currently enabled");
4194  case Match_InvalidOperand:
4195  return Error(Loc, "invalid operand for instruction");
4196  case Match_InvalidSuffix:
4197  return Error(Loc, "invalid type suffix for instruction");
4198  case Match_InvalidCondCode:
4199  return Error(Loc, "expected AArch64 condition code");
4200  case Match_AddSubRegExtendSmall:
4201  return Error(Loc,
4202  "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
4203  case Match_AddSubRegExtendLarge:
4204  return Error(Loc,
4205  "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4206  case Match_AddSubSecondSource:
4207  return Error(Loc,
4208  "expected compatible register, symbol or integer in range [0, 4095]");
4209  case Match_LogicalSecondSource:
4210  return Error(Loc, "expected compatible register or logical immediate");
4211  case Match_InvalidMovImm32Shift:
4212  return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4213  case Match_InvalidMovImm64Shift:
4214  return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4215  case Match_AddSubRegShift32:
4216  return Error(Loc,
4217  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4218  case Match_AddSubRegShift64:
4219  return Error(Loc,
4220  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4221  case Match_InvalidFPImm:
4222  return Error(Loc,
4223  "expected compatible register or floating-point constant");
4224  case Match_InvalidMemoryIndexedSImm6:
4225  return Error(Loc, "index must be an integer in range [-32, 31].");
4226  case Match_InvalidMemoryIndexedSImm5:
4227  return Error(Loc, "index must be an integer in range [-16, 15].");
4228  case Match_InvalidMemoryIndexed1SImm4:
4229  return Error(Loc, "index must be an integer in range [-8, 7].");
4230  case Match_InvalidMemoryIndexed2SImm4:
4231  return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4232  case Match_InvalidMemoryIndexed3SImm4:
4233  return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4234  case Match_InvalidMemoryIndexed4SImm4:
4235  return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4236  case Match_InvalidMemoryIndexed16SImm4:
4237  return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4238  case Match_InvalidMemoryIndexed1SImm6:
4239  return Error(Loc, "index must be an integer in range [-32, 31].");
4240  case Match_InvalidMemoryIndexedSImm8:
4241  return Error(Loc, "index must be an integer in range [-128, 127].");
4242  case Match_InvalidMemoryIndexedSImm9:
4243  return Error(Loc, "index must be an integer in range [-256, 255].");
4244  case Match_InvalidMemoryIndexed16SImm9:
4245  return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4246  case Match_InvalidMemoryIndexed8SImm10:
4247  return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4248  case Match_InvalidMemoryIndexed4SImm7:
4249  return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4250  case Match_InvalidMemoryIndexed8SImm7:
4251  return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4252  case Match_InvalidMemoryIndexed16SImm7:
4253  return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4254  case Match_InvalidMemoryIndexed8UImm5:
4255  return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4256  case Match_InvalidMemoryIndexed4UImm5:
4257  return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4258  case Match_InvalidMemoryIndexed2UImm5:
4259  return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4260  case Match_InvalidMemoryIndexed8UImm6:
4261  return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4262  case Match_InvalidMemoryIndexed16UImm6:
4263  return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
4264  case Match_InvalidMemoryIndexed4UImm6:
4265  return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4266  case Match_InvalidMemoryIndexed2UImm6:
4267  return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4268  case Match_InvalidMemoryIndexed1UImm6:
4269  return Error(Loc, "index must be in range [0, 63].");
4270  case Match_InvalidMemoryWExtend8:
4271  return Error(Loc,
4272  "expected 'uxtw' or 'sxtw' with optional shift of #0");
4273  case Match_InvalidMemoryWExtend16:
4274  return Error(Loc,
4275  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4276  case Match_InvalidMemoryWExtend32:
4277  return Error(Loc,
4278  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4279  case Match_InvalidMemoryWExtend64:
4280  return Error(Loc,
4281  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4282  case Match_InvalidMemoryWExtend128:
4283  return Error(Loc,
4284  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4285  case Match_InvalidMemoryXExtend8:
4286  return Error(Loc,
4287  "expected 'lsl' or 'sxtx' with optional shift of #0");
4288  case Match_InvalidMemoryXExtend16:
4289  return Error(Loc,
4290  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4291  case Match_InvalidMemoryXExtend32:
4292  return Error(Loc,
4293  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4294  case Match_InvalidMemoryXExtend64:
4295  return Error(Loc,
4296  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4297  case Match_InvalidMemoryXExtend128:
4298  return Error(Loc,
4299  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4300  case Match_InvalidMemoryIndexed1:
4301  return Error(Loc, "index must be an integer in range [0, 4095].");
4302  case Match_InvalidMemoryIndexed2:
4303  return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4304  case Match_InvalidMemoryIndexed4:
4305  return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4306  case Match_InvalidMemoryIndexed8:
4307  return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4308  case Match_InvalidMemoryIndexed16:
4309  return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4310  case Match_InvalidImm0_1:
4311  return Error(Loc, "immediate must be an integer in range [0, 1].");
4312  case Match_InvalidImm0_7:
4313  return Error(Loc, "immediate must be an integer in range [0, 7].");
4314  case Match_InvalidImm0_15:
4315  return Error(Loc, "immediate must be an integer in range [0, 15].");
4316  case Match_InvalidImm0_31:
4317  return Error(Loc, "immediate must be an integer in range [0, 31].");
4318  case Match_InvalidImm0_63:
4319  return Error(Loc, "immediate must be an integer in range [0, 63].");
4320  case Match_InvalidImm0_127:
4321  return Error(Loc, "immediate must be an integer in range [0, 127].");
4322  case Match_InvalidImm0_255:
4323  return Error(Loc, "immediate must be an integer in range [0, 255].");
4324  case Match_InvalidImm0_65535:
4325  return Error(Loc, "immediate must be an integer in range [0, 65535].");
4326  case Match_InvalidImm1_8:
4327  return Error(Loc, "immediate must be an integer in range [1, 8].");
4328  case Match_InvalidImm1_16:
4329  return Error(Loc, "immediate must be an integer in range [1, 16].");
4330  case Match_InvalidImm1_32:
4331  return Error(Loc, "immediate must be an integer in range [1, 32].");
4332  case Match_InvalidImm1_64:
4333  return Error(Loc, "immediate must be an integer in range [1, 64].");
4334  case Match_InvalidSVEAddSubImm8:
4335  return Error(Loc, "immediate must be an integer in range [0, 255]"
4336  " with a shift amount of 0");
4337  case Match_InvalidSVEAddSubImm16:
4338  case Match_InvalidSVEAddSubImm32:
4339  case Match_InvalidSVEAddSubImm64:
4340  return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4341  "multiple of 256 in range [256, 65280]");
4342  case Match_InvalidSVECpyImm8:
4343  return Error(Loc, "immediate must be an integer in range [-128, 255]"
4344  " with a shift amount of 0");
4345  case Match_InvalidSVECpyImm16:
4346  return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4347  "multiple of 256 in range [-32768, 65280]");
4348  case Match_InvalidSVECpyImm32:
4349  case Match_InvalidSVECpyImm64:
4350  return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4351  "multiple of 256 in range [-32768, 32512]");
4352  case Match_InvalidIndexRange1_1:
4353  return Error(Loc, "expected lane specifier '[1]'");
4354  case Match_InvalidIndexRange0_15:
4355  return Error(Loc, "vector lane must be an integer in range [0, 15].");
4356  case Match_InvalidIndexRange0_7:
4357  return Error(Loc, "vector lane must be an integer in range [0, 7].");
4358  case Match_InvalidIndexRange0_3:
4359  return Error(Loc, "vector lane must be an integer in range [0, 3].");
4360  case Match_InvalidIndexRange0_1:
4361  return Error(Loc, "vector lane must be an integer in range [0, 1].");
4362  case Match_InvalidSVEIndexRange0_63:
4363  return Error(Loc, "vector lane must be an integer in range [0, 63].");
4364  case Match_InvalidSVEIndexRange0_31:
4365  return Error(Loc, "vector lane must be an integer in range [0, 31].");
4366  case Match_InvalidSVEIndexRange0_15:
4367  return Error(Loc, "vector lane must be an integer in range [0, 15].");
4368  case Match_InvalidSVEIndexRange0_7:
4369  return Error(Loc, "vector lane must be an integer in range [0, 7].");
4370  case Match_InvalidSVEIndexRange0_3:
4371  return Error(Loc, "vector lane must be an integer in range [0, 3].");
4372  case Match_InvalidLabel:
4373  return Error(Loc, "expected label or encodable integer pc offset");
4374  case Match_MRS:
4375  return Error(Loc, "expected readable system register");
4376  case Match_MSR:
4377  return Error(Loc, "expected writable system register or pstate");
4378  case Match_InvalidComplexRotationEven:
4379  return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4380  case Match_InvalidComplexRotationOdd:
4381  return Error(Loc, "complex rotation must be 90 or 270.");
4382  case Match_MnemonicFail: {
4383  std::string Suggestion = AArch64MnemonicSpellCheck(
4384  ((AArch64Operand &)*Operands[0]).getToken(),
4385  ComputeAvailableFeatures(STI->getFeatureBits()));
4386  return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4387  }
4388  case Match_InvalidGPR64shifted8:
4389  return Error(Loc, "register must be x0..x30 or xzr, without shift");
4390  case Match_InvalidGPR64shifted16:
4391  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4392  case Match_InvalidGPR64shifted32:
4393  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4394  case Match_InvalidGPR64shifted64:
4395  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4396  case Match_InvalidGPR64NoXZRshifted8:
4397  return Error(Loc, "register must be x0..x30 without shift");
4398  case Match_InvalidGPR64NoXZRshifted16:
4399  return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4400  case Match_InvalidGPR64NoXZRshifted32:
4401  return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4402  case Match_InvalidGPR64NoXZRshifted64:
4403  return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4404  case Match_InvalidZPR32UXTW8:
4405  case Match_InvalidZPR32SXTW8:
4406  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4407  case Match_InvalidZPR32UXTW16:
4408  case Match_InvalidZPR32SXTW16:
4409  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4410  case Match_InvalidZPR32UXTW32:
4411  case Match_InvalidZPR32SXTW32:
4412  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4413  case Match_InvalidZPR32UXTW64:
4414  case Match_InvalidZPR32SXTW64:
4415  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4416  case Match_InvalidZPR64UXTW8:
4417  case Match_InvalidZPR64SXTW8:
4418  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4419  case Match_InvalidZPR64UXTW16:
4420  case Match_InvalidZPR64SXTW16:
4421  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4422  case Match_InvalidZPR64UXTW32:
4423  case Match_InvalidZPR64SXTW32:
4424  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4425  case Match_InvalidZPR64UXTW64:
4426  case Match_InvalidZPR64SXTW64:
4427  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4428  case Match_InvalidZPR32LSL8:
4429  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4430  case Match_InvalidZPR32LSL16:
4431  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4432  case Match_InvalidZPR32LSL32:
4433  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4434  case Match_InvalidZPR32LSL64:
4435  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4436  case Match_InvalidZPR64LSL8:
4437  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4438  case Match_InvalidZPR64LSL16:
4439  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4440  case Match_InvalidZPR64LSL32:
4441  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4442  case Match_InvalidZPR64LSL64:
4443  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4444  case Match_InvalidZPR0:
4445  return Error(Loc, "expected register without element width suffix");
4446  case Match_InvalidZPR8:
4447  case Match_InvalidZPR16:
4448  case Match_InvalidZPR32:
4449  case Match_InvalidZPR64:
4450  case Match_InvalidZPR128:
4451  return Error(Loc, "invalid element width");
4452  case Match_InvalidZPR_3b8:
4453  return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4454  case Match_InvalidZPR_3b16:
4455  return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4456  case Match_InvalidZPR_3b32:
4457  return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4458  case Match_InvalidZPR_4b16:
4459  return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4460  case Match_InvalidZPR_4b32:
4461  return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4462  case Match_InvalidZPR_4b64:
4463  return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4464  case Match_InvalidSVEPattern:
4465  return Error(Loc, "invalid predicate pattern");
4466  case Match_InvalidSVEPredicateAnyReg:
4467  case Match_InvalidSVEPredicateBReg:
4468  case Match_InvalidSVEPredicateHReg:
4469  case Match_InvalidSVEPredicateSReg:
4470  case Match_InvalidSVEPredicateDReg:
4471  return Error(Loc, "invalid predicate register.");
4472  case Match_InvalidSVEPredicate3bAnyReg:
4473  case Match_InvalidSVEPredicate3bBReg:
4474  case Match_InvalidSVEPredicate3bHReg:
4475  case Match_InvalidSVEPredicate3bSReg:
4476  case Match_InvalidSVEPredicate3bDReg:
4477  return Error(Loc, "restricted predicate has range [0, 7].");
4478  case Match_InvalidSVEExactFPImmOperandHalfOne:
4479  return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4480  case Match_InvalidSVEExactFPImmOperandHalfTwo:
4481  return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4482  case Match_InvalidSVEExactFPImmOperandZeroOne:
4483  return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4484  default:
4485  llvm_unreachable("unexpected error code!");
4486  }
4487 }
4488 
4489 static const char *getSubtargetFeatureName(uint64_t Val);
4490 
4491 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4492  OperandVector &Operands,
4493  MCStreamer &Out,
4494  uint64_t &ErrorInfo,
4495  bool MatchingInlineAsm) {
4496  assert(!Operands.empty() && "Unexpect empty operand list!");
4497  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4498  assert(Op.isToken() && "Leading operand should always be a mnemonic!");
4499 
4500  StringRef Tok = Op.getToken();
4501  unsigned NumOperands = Operands.size();
4502 
4503  if (NumOperands == 4 && Tok == "lsl") {
4504  AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4505  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4506  if (Op2.isScalarReg() && Op3.isImm()) {
4507  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4508  if (Op3CE) {
4509  uint64_t Op3Val = Op3CE->getValue();
4510  uint64_t NewOp3Val = 0;
4511  uint64_t NewOp4Val = 0;
4512  if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4513  Op2.getReg())) {
4514  NewOp3Val = (32 - Op3Val) & 0x1f;
4515  NewOp4Val = 31 - Op3Val;
4516  } else {
4517  NewOp3Val = (64 - Op3Val) & 0x3f;
4518  NewOp4Val = 63 - Op3Val;
4519  }
4520 
4521  const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4522  const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4523 
4524  Operands[0] = AArch64Operand::CreateToken(
4525  "ubfm", false, Op.getStartLoc(), getContext());
4526  Operands.push_back(AArch64Operand::CreateImm(
4527  NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4528  Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4529  Op3.getEndLoc(), getContext());
4530  }
4531  }
4532  } else if (NumOperands == 4 && Tok == "bfc") {
4533  // FIXME: Horrible hack to handle BFC->BFM alias.
4534  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4535  AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4536  AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4537 
4538  if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4539  const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4540  const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4541 
4542  if (LSBCE && WidthCE) {
4543  uint64_t LSB = LSBCE->getValue();
4544  uint64_t Width = WidthCE->getValue();
4545 
4546  uint64_t RegWidth = 0;
4547  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4548  Op1.getReg()))
4549  RegWidth = 64;
4550  else
4551  RegWidth = 32;
4552 
4553  if (LSB >= RegWidth)
4554  return Error(LSBOp.getStartLoc(),
4555  "expected integer in range [0, 31]");
4556  if (Width < 1 || Width > RegWidth)
4557  return Error(WidthOp.getStartLoc(),
4558  "expected integer in range [1, 32]");
4559 
4560  uint64_t ImmR = 0;
4561  if (RegWidth == 32)
4562  ImmR = (32 - LSB) & 0x1f;
4563  else
4564  ImmR = (64 - LSB) & 0x3f;
4565 
4566  uint64_t ImmS = Width - 1;
4567 
4568  if (ImmR != 0 && ImmS >= ImmR)
4569  return Error(WidthOp.getStartLoc(),
4570  "requested insert overflows register");
4571 
4572  const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4573  const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4574  Operands[0] = AArch64Operand::CreateToken(
4575  "bfm", false, Op.getStartLoc(), getContext());
4576  Operands[2] = AArch64Operand::CreateReg(
4577  RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4578  SMLoc(), SMLoc(), getContext());
4579  Operands[3] = AArch64Operand::CreateImm(
4580  ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4581  Operands.emplace_back(
4582  AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4583  WidthOp.getEndLoc(), getContext()));
4584  }
4585  }
4586  } else if (NumOperands == 5) {
4587  // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4588  // UBFIZ -> UBFM aliases.
4589  if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4590  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4591  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4592  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4593 
4594  if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4595  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4596  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4597 
4598  if (Op3CE && Op4CE) {
4599  uint64_t Op3Val = Op3CE->getValue();
4600  uint64_t Op4Val = Op4CE->getValue();
4601 
4602  uint64_t RegWidth = 0;
4603  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4604  Op1.getReg()))
4605  RegWidth = 64;
4606  else
4607  RegWidth = 32;
4608 
4609  if (Op3Val >= RegWidth)
4610  return Error(Op3.getStartLoc(),
4611  "expected integer in range [0, 31]");
4612  if (Op4Val < 1 || Op4Val > RegWidth)
4613  return Error(Op4.getStartLoc(),
4614  "expected integer in range [1, 32]");
4615 
4616  uint64_t NewOp3Val = 0;
4617  if (RegWidth == 32)
4618  NewOp3Val = (32 - Op3Val) & 0x1f;
4619  else
4620  NewOp3Val = (64 - Op3Val) & 0x3f;
4621 
4622  uint64_t NewOp4Val = Op4Val - 1;
4623 
4624  if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4625  return Error(Op4.getStartLoc(),
4626  "requested insert overflows register");
4627 
4628  const MCExpr *NewOp3 =
4629  MCConstantExpr::create(NewOp3Val, getContext());
4630  const MCExpr *NewOp4 =
4631  MCConstantExpr::create(NewOp4Val, getContext());
4632  Operands[3] = AArch64Operand::CreateImm(
4633  NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4634  Operands[4] = AArch64Operand::CreateImm(
4635  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4636  if (Tok == "bfi")
4637  Operands[0] = AArch64Operand::CreateToken(
4638  "bfm", false, Op.getStartLoc(), getContext());
4639  else if (Tok == "sbfiz")
4640  Operands[0] = AArch64Operand::CreateToken(
4641  "sbfm", false, Op.getStartLoc(), getContext());
4642  else if (Tok == "ubfiz")
4643  Operands[0] = AArch64Operand::CreateToken(
4644  "ubfm", false, Op.getStartLoc(), getContext());
4645  else
4646  llvm_unreachable("No valid mnemonic for alias?");
4647  }
4648  }
4649 
4650  // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4651  // UBFX -> UBFM aliases.
4652  } else if (NumOperands == 5 &&
4653  (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4654  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4655  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4656  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4657 
4658  if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4659  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4660  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4661 
4662  if (Op3CE && Op4CE) {
4663  uint64_t Op3Val = Op3CE->getValue();
4664  uint64_t Op4Val = Op4CE->getValue();
4665 
4666  uint64_t RegWidth = 0;
4667  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4668  Op1.getReg()))
4669  RegWidth = 64;
4670  else
4671  RegWidth = 32;
4672 
4673  if (Op3Val >= RegWidth)
4674  return Error(Op3.getStartLoc(),
4675  "expected integer in range [0, 31]");
4676  if (Op4Val < 1 || Op4Val > RegWidth)
4677  return Error(Op4.getStartLoc(),
4678  "expected integer in range [1, 32]");
4679 
4680  uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4681 
4682  if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4683  return Error(Op4.getStartLoc(),
4684  "requested extract overflows register");
4685 
4686  const MCExpr *NewOp4 =
4687  MCConstantExpr::create(NewOp4Val, getContext());
4688  Operands[4] = AArch64Operand::CreateImm(
4689  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4690  if (Tok == "bfxil")
4691  Operands[0] = AArch64Operand::CreateToken(
4692  "bfm", false, Op.getStartLoc(), getContext());
4693  else if (Tok == "sbfx")
4694  Operands[0] = AArch64Operand::CreateToken(
4695  "sbfm", false, Op.getStartLoc(), getContext());
4696  else if (Tok == "ubfx")
4697  Operands[0] = AArch64Operand::CreateToken(
4698  "ubfm", false, Op.getStartLoc(), getContext());
4699  else
4700  llvm_unreachable("No valid mnemonic for alias?");
4701  }
4702  }
4703  }
4704  }
4705 
4706  // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4707  // instruction for FP registers correctly in some rare circumstances. Convert
4708  // it to a safe instruction and warn (because silently changing someone's
4709  // assembly is rude).
4710  if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4711  NumOperands == 4 && Tok == "movi") {
4712  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4713  AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4714  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4715  if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4716  (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4717  StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4718  if (Suffix.lower() == ".2d" &&
4719  cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4720  Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4721  " correctly on this CPU, converting to equivalent movi.16b");
4722  // Switch the suffix to .16b.
4723  unsigned Idx = Op1.isToken() ? 1 : 2;
4724  Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4725  getContext());
4726  }
4727  }
4728  }
4729 
4730  // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4731  // InstAlias can't quite handle this since the reg classes aren't
4732  // subclasses.
4733  if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4734  // The source register can be Wn here, but the matcher expects a
4735  // GPR64. Twiddle it here if necessary.
4736  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4737  if (Op.isScalarReg()) {
4738  unsigned Reg = getXRegFromWReg(Op.getReg());
4739  Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4740  Op.getStartLoc(), Op.getEndLoc(),
4741  getContext());
4742  }
4743  }
4744  // FIXME: Likewise for sxt[bh] with a Xd dst operand
4745  else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4746  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4747  if (Op.isScalarReg() &&
4748  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4749  Op.getReg())) {
4750  // The source register can be Wn here, but the matcher expects a
4751  // GPR64. Twiddle it here if necessary.
4752  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4753  if (Op.isScalarReg()) {
4754  unsigned Reg = getXRegFromWReg(Op.getReg());
4755  Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4756  Op.getStartLoc(),
4757  Op.getEndLoc(), getContext());
4758  }
4759  }
4760  }
4761  // FIXME: Likewise for uxt[bh] with a Xd dst operand
4762  else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4763  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4764  if (Op.isScalarReg() &&
4765  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4766  Op.getReg())) {
4767  // The source register can be Wn here, but the matcher expects a
4768  // GPR32. Twiddle it here if necessary.
4769  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4770  if (Op.isScalarReg()) {
4771  unsigned Reg = getWRegFromXReg(Op.getReg());
4772  Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4773  Op.getStartLoc(),
4774  Op.getEndLoc(), getContext());
4775  }
4776  }
4777  }
4778 
4779  MCInst Inst;
4780  FeatureBitset MissingFeatures;
4781  // First try to match against the secondary set of tables containing the
4782  // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4783  unsigned MatchResult =
4784  MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
4785  MatchingInlineAsm, 1);
4786 
4787  // If that fails, try against the alternate table containing long-form NEON:
4788  // "fadd v0.2s, v1.2s, v2.2s"
4789  if (MatchResult != Match_Success) {
4790  // But first, save the short-form match result: we can use it in case the
4791  // long-form match also fails.
4792  auto ShortFormNEONErrorInfo = ErrorInfo;
4793  auto ShortFormNEONMatchResult = MatchResult;
4794  auto ShortFormNEONMissingFeatures = MissingFeatures;
4795 
4796  MatchResult =
4797  MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
4798  MatchingInlineAsm, 0);
4799 
4800  // Now, both matches failed, and the long-form match failed on the mnemonic
4801  // suffix token operand. The short-form match failure is probably more
4802  // relevant: use it instead.
4803  if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4804  Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4805  ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4806  MatchResult = ShortFormNEONMatchResult;
4807  ErrorInfo = ShortFormNEONErrorInfo;
4808  MissingFeatures = ShortFormNEONMissingFeatures;
4809  }
4810  }
4811 
4812  switch (MatchResult) {
4813  case Match_Success: {
4814  // Perform range checking and other semantic validations
4815  SmallVector<SMLoc, 8> OperandLocs;
4816  NumOperands = Operands.size();
4817  for (unsigned i = 1; i < NumOperands; ++i)
4818  OperandLocs.push_back(Operands[i]->getStartLoc());
4819  if (validateInstruction(Inst, IDLoc, OperandLocs))
4820  return true;
4821 
4822  Inst.setLoc(IDLoc);
4823  Out.EmitInstruction(Inst, getSTI());
4824  return false;
4825  }
4826  case Match_MissingFeature: {
4827  assert(MissingFeatures.any() && "Unknown missing feature!");
4828  // Special case the error message for the very common case where only
4829  // a single subtarget feature is missing (neon, e.g.).
4830  std::string Msg = "instruction requires:";
4831  for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
4832  if (MissingFeatures[i]) {
4833  Msg += " ";
4834  Msg += getSubtargetFeatureName(i);
4835  }
4836  }
4837  return Error(IDLoc, Msg);
4838  }
4839  case Match_MnemonicFail:
4840  return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
4841  case Match_InvalidOperand: {
4842  SMLoc ErrorLoc = IDLoc;
4843 
4844  if (ErrorInfo != ~0ULL) {
4845  if (ErrorInfo >= Operands.size())
4846  return Error(IDLoc, "too few operands for instruction",
4847  SMRange(IDLoc, getTok().getLoc()));
4848 
4849  ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4850  if (ErrorLoc == SMLoc())
4851  ErrorLoc = IDLoc;
4852  }
4853  // If the match failed on a suffix token operand, tweak the diagnostic
4854  // accordingly.
4855  if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4856  ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4857  MatchResult = Match_InvalidSuffix;
4858 
4859  return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4860  }
4861  case Match_InvalidTiedOperand:
4862  case Match_InvalidMemoryIndexed1:
4863  case Match_InvalidMemoryIndexed2:
4864  case Match_InvalidMemoryIndexed4:
4865  case Match_InvalidMemoryIndexed8:
4866  case Match_InvalidMemoryIndexed16:
4867  case Match_InvalidCondCode:
4868  case Match_AddSubRegExtendSmall:
4869  case Match_AddSubRegExtendLarge:
4870  case Match_AddSubSecondSource:
4871  case Match_LogicalSecondSource:
4872  case Match_AddSubRegShift32:
4873  case Match_AddSubRegShift64:
4874  case Match_InvalidMovImm32Shift:
4875  case Match_InvalidMovImm64Shift:
4876  case Match_InvalidFPImm:
4877  case Match_InvalidMemoryWExtend8:
4878  case Match_InvalidMemoryWExtend16:
4879  case Match_InvalidMemoryWExtend32:
4880  case Match_InvalidMemoryWExtend64:
4881  case Match_InvalidMemoryWExtend128:
4882  case Match_InvalidMemoryXExtend8:
4883  case Match_InvalidMemoryXExtend16:
4884  case Match_InvalidMemoryXExtend32:
4885  case Match_InvalidMemoryXExtend64:
4886  case Match_InvalidMemoryXExtend128:
4887  case Match_InvalidMemoryIndexed1SImm4:
4888  case Match_InvalidMemoryIndexed2SImm4:
4889  case Match_InvalidMemoryIndexed3SImm4:
4890  case Match_InvalidMemoryIndexed4SImm4:
4891  case Match_InvalidMemoryIndexed1SImm6:
4892  case Match_InvalidMemoryIndexed16SImm4:
4893  case Match_InvalidMemoryIndexed4SImm7:
4894  case Match_InvalidMemoryIndexed8SImm7:
4895  case Match_InvalidMemoryIndexed16SImm7:
4896  case Match_InvalidMemoryIndexed8UImm5:
4897  case Match_InvalidMemoryIndexed4UImm5:
4898  case Match_InvalidMemoryIndexed2UImm5:
4899  case Match_InvalidMemoryIndexed1UImm6:
4900  case Match_InvalidMemoryIndexed2UImm6:
4901  case Match_InvalidMemoryIndexed4UImm6:
4902  case Match_InvalidMemoryIndexed8UImm6:
4903  case Match_InvalidMemoryIndexed16UImm6:
4904  case Match_InvalidMemoryIndexedSImm6:
4905  case Match_InvalidMemoryIndexedSImm5:
4906  case Match_InvalidMemoryIndexedSImm8:
4907  case Match_InvalidMemoryIndexedSImm9:
4908  case Match_InvalidMemoryIndexed16SImm9:
4909  case Match_InvalidMemoryIndexed8SImm10:
4910  case Match_InvalidImm0_1:
4911  case Match_InvalidImm0_7:
4912  case Match_InvalidImm0_15:
4913  case Match_InvalidImm0_31:
4914  case Match_InvalidImm0_63:
4915  case Match_InvalidImm0_127:
4916  case Match_InvalidImm0_255:
4917  case Match_InvalidImm0_65535:
4918  case Match_InvalidImm1_8:
4919  case Match_InvalidImm1_16:
4920  case Match_InvalidImm1_32:
4921  case Match_InvalidImm1_64:
4922  case Match_InvalidSVEAddSubImm8:
4923  case Match_InvalidSVEAddSubImm16:
4924  case Match_InvalidSVEAddSubImm32:
4925  case Match_InvalidSVEAddSubImm64:
4926  case Match_InvalidSVECpyImm8:
4927  case Match_InvalidSVECpyImm16:
4928  case Match_InvalidSVECpyImm32:
4929  case Match_InvalidSVECpyImm64:
4930  case Match_InvalidIndexRange1_1:
4931  case Match_InvalidIndexRange0_15:
4932  case Match_InvalidIndexRange0_7:
4933  case Match_InvalidIndexRange0_3:
4934  case Match_InvalidIndexRange0_1:
4935  case Match_InvalidSVEIndexRange0_63:
4936  case Match_InvalidSVEIndexRange0_31:
4937  case Match_InvalidSVEIndexRange0_15:
4938  case Match_InvalidSVEIndexRange0_7:
4939  case Match_InvalidSVEIndexRange0_3:
4940  case Match_InvalidLabel:
4941  case Match_InvalidComplexRotationEven:
4942  case Match_InvalidComplexRotationOdd:
4943  case Match_InvalidGPR64shifted8:
4944  case Match_InvalidGPR64shifted16:
4945  case Match_InvalidGPR64shifted32:
4946  case Match_InvalidGPR64shifted64:
4947  case Match_InvalidGPR64NoXZRshifted8:
4948  case Match_InvalidGPR64NoXZRshifted16:
4949  case Match_InvalidGPR64NoXZRshifted32:
4950  case Match_InvalidGPR64NoXZRshifted64:
4951  case Match_InvalidZPR32UXTW8:
4952  case Match_InvalidZPR32UXTW16:
4953  case Match_InvalidZPR32UXTW32:
4954  case Match_InvalidZPR32UXTW64:
4955  case Match_InvalidZPR32SXTW8:
4956  case Match_InvalidZPR32SXTW16:
4957  case Match_InvalidZPR32SXTW32:
4958  case Match_InvalidZPR32SXTW64:
4959  case Match_InvalidZPR64UXTW8:
4960  case Match_InvalidZPR64SXTW8:
4961  case Match_InvalidZPR64UXTW16:
4962  case Match_InvalidZPR64SXTW16:
4963  case Match_InvalidZPR64UXTW32:
4964  case Match_InvalidZPR64SXTW32:
4965  case Match_InvalidZPR64UXTW64:
4966  case Match_InvalidZPR64SXTW64:
4967  case Match_InvalidZPR32LSL8:
4968  case Match_InvalidZPR32LSL16:
4969  case Match_InvalidZPR32LSL32:
4970  case Match_InvalidZPR32LSL64:
4971  case Match_InvalidZPR64LSL8:
4972  case Match_InvalidZPR64LSL16:
4973  case Match_InvalidZPR64LSL32:
4974  case Match_InvalidZPR64LSL64:
4975  case Match_InvalidZPR0:
4976  case Match_InvalidZPR8:
4977  case Match_InvalidZPR16:
4978  case Match_InvalidZPR32:
4979  case Match_InvalidZPR64:
4980  case Match_InvalidZPR128:
4981  case Match_InvalidZPR_3b8:
4982  case Match_InvalidZPR_3b16:
4983  case Match_InvalidZPR_3b32:
4984  case Match_InvalidZPR_4b16:
4985  case Match_InvalidZPR_4b32:
4986  case Match_InvalidZPR_4b64:
4987  case Match_InvalidSVEPredicateAnyReg:
4988  case Match_InvalidSVEPattern:
4989  case Match_InvalidSVEPredicateBReg:
4990  case Match_InvalidSVEPredicateHReg:
4991  case Match_InvalidSVEPredicateSReg:
4992  case Match_InvalidSVEPredicateDReg:
4993  case Match_InvalidSVEPredicate3bAnyReg:
4994  case Match_InvalidSVEPredicate3bBReg:
4995  case Match_InvalidSVEPredicate3bHReg:
4996  case Match_InvalidSVEPredicate3bSReg:
4997  case Match_InvalidSVEPredicate3bDReg:
4998  case Match_InvalidSVEExactFPImmOperandHalfOne:
4999  case Match_InvalidSVEExactFPImmOperandHalfTwo:
5000  case Match_InvalidSVEExactFPImmOperandZeroOne:
5001  case Match_MSR:
5002  case Match_MRS: {
5003  if (ErrorInfo >= Operands.size())
5004  return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
5005  // Any time we get here, there's nothing fancy to do. Just get the
5006  // operand SMLoc and display the diagnostic.
5007  SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5008  if (ErrorLoc == SMLoc())
5009  ErrorLoc = IDLoc;
5010  return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5011  }
5012  }
5013 
5014  llvm_unreachable("Implement any new match types added!");
5015 }
5016 
5017 /// ParseDirective parses the arm specific directives
5018 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5020  getContext().getObjectFileInfo()->getObjectFileType();
5021  bool IsMachO = Format == MCObjectFileInfo::IsMachO;
5022 
5023  StringRef IDVal = DirectiveID.getIdentifier();
5024  SMLoc Loc = DirectiveID.getLoc();
5025  if (IDVal == ".arch")
5026  parseDirectiveArch(Loc);
5027  else if (IDVal == ".cpu")
5028  parseDirectiveCPU(Loc);
5029  else if (IDVal == ".tlsdesccall")
5030  parseDirectiveTLSDescCall(Loc);
5031  else if (IDVal == ".ltorg" || IDVal == ".pool")
5032  parseDirectiveLtorg(Loc);
5033  else if (IDVal == ".unreq")
5034  parseDirectiveUnreq(Loc);
5035  else if (IDVal == ".inst")
5036  parseDirectiveInst(Loc);
5037  else if (IDVal == ".cfi_negate_ra_state")
5038  parseDirectiveCFINegateRAState();
5039  else if (IDVal == ".cfi_b_key_frame")
5040  parseDirectiveCFIBKeyFrame();
5041  else if (IDVal == ".arch_extension")
5042  parseDirectiveArchExtension(Loc);
5043  else if (IsMachO) {
5044  if (IDVal == MCLOHDirectiveName())
5045  parseDirectiveLOH(IDVal, Loc);
5046  else
5047  return true;
5048  } else
5049  return true;
5050  return false;
5051 }
5052 
5054  SmallVector<StringRef, 4> &RequestedExtensions) {
5055  const bool NoCrypto =
5056  (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5057  "nocrypto") != std::end(RequestedExtensions));
5058  const bool Crypto =
5059  (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5060  "crypto") != std::end(RequestedExtensions));
5061 
5062  if (!NoCrypto && Crypto) {
5063  switch (ArchKind) {
5064  default:
5065  // Map 'generic' (and others) to sha2 and aes, because
5066  // that was the traditional meaning of crypto.
5067  case AArch64::ArchKind::ARMV8_1A:
5068  case AArch64::ArchKind::ARMV8_2A:
5069  case AArch64::ArchKind::ARMV8_3A:
5070  RequestedExtensions.push_back("sha2");
5071  RequestedExtensions.push_back("aes");
5072  break;
5073  case AArch64::ArchKind::ARMV8_4A:
5074  case AArch64::ArchKind::ARMV8_5A:
5075  RequestedExtensions.push_back("sm4");
5076  RequestedExtensions.push_back("sha3");
5077  RequestedExtensions.push_back("sha2");
5078  RequestedExtensions.push_back("aes");
5079  break;
5080  }
5081  } else if (NoCrypto) {
5082  switch (ArchKind) {
5083  default:
5084  // Map 'generic' (and others) to sha2 and aes, because
5085  // that was the traditional meaning of crypto.
5086  case AArch64::ArchKind::ARMV8_1A:
5087  case AArch64::ArchKind::ARMV8_2A:
5088  case AArch64::ArchKind::ARMV8_3A:
5089  RequestedExtensions.push_back("nosha2");
5090  RequestedExtensions.push_back("noaes");
5091  break;
5092  case AArch64::ArchKind::ARMV8_4A:
5093  case AArch64::ArchKind::ARMV8_5A:
5094  RequestedExtensions.push_back("nosm4");
5095  RequestedExtensions.push_back("nosha3");
5096  RequestedExtensions.push_back("nosha2");
5097  RequestedExtensions.push_back("noaes");
5098  break;
5099  }
5100  }
5101 }
5102 
5103 /// parseDirectiveArch
5104 /// ::= .arch token
5105 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
5106  SMLoc ArchLoc = getLoc();
5107 
5108  StringRef Arch, ExtensionString;
5109  std::tie(Arch, ExtensionString) =
5110  getParser().parseStringToEndOfStatement().trim().split('+');
5111 
5113  if (ID == AArch64::ArchKind::INVALID)
5114  return Error(ArchLoc, "unknown arch name");
5115 
5116  if (parseToken(AsmToken::EndOfStatement))
5117  return true;
5118 
5119  // Get the architecture and extension features.
5120  std::vector<StringRef> AArch64Features;
5121  AArch64::getArchFeatures(ID, AArch64Features);
5123  AArch64Features);
5124 
5125  MCSubtargetInfo &STI = copySTI();
5126  std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
5127  STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
5128 
5129  SmallVector<StringRef, 4> RequestedExtensions;
5130  if (!ExtensionString.empty())
5131  ExtensionString.split(RequestedExtensions, '+');
5132 
5133  ExpandCryptoAEK(ID, RequestedExtensions);
5134 
5135  FeatureBitset Features = STI.getFeatureBits();
5136  for (auto Name : RequestedExtensions) {
5137  bool EnableFeature = true;
5138 
5139  if (Name.startswith_lower("no")) {
5140  EnableFeature = false;
5141  Name = Name.substr(2);
5142  }
5143 
5144  for (const auto &Extension : ExtensionMap) {
5145  if (Extension.Name != Name)
5146  continue;
5147 
5148  if (Extension.Features.none())
5149  report_fatal_error("unsupported architectural extension: " + Name);
5150 
5151  FeatureBitset ToggleFeatures = EnableFeature
5152  ? (~Features & Extension.Features)
5153  : ( Features & Extension.Features);
5154  FeatureBitset Features =
5155  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5156  setAvailableFeatures(Features);
5157  break;
5158  }
5159  }
5160  return false;
5161 }
5162 
5163 /// parseDirectiveArchExtension
5164 /// ::= .arch_extension [no]feature
5165 bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
5166  SMLoc ExtLoc = getLoc();
5167 
5168  StringRef Name = getParser().parseStringToEndOfStatement().trim();
5169 
5170  if (parseToken(AsmToken::EndOfStatement,
5171  "unexpected token in '.arch_extension' directive"))
5172  return true;
5173 
5174  bool EnableFeature = true;
5175  if (Name.startswith_lower("no")) {
5176  EnableFeature = false;
5177  Name = Name.substr(2);
5178  }
5179 
5180  MCSubtargetInfo &STI = copySTI();
5181  FeatureBitset Features = STI.getFeatureBits();
5182  for (const auto &Extension : ExtensionMap) {
5183  if (Extension.Name != Name)
5184  continue;
5185 
5186  if (Extension.Features.none())
5187  return Error(ExtLoc, "unsupported architectural extension: " + Name);
5188 
5189  FeatureBitset ToggleFeatures = EnableFeature
5190  ? (~Features & Extension.Features)
5191  : (Features & Extension.Features);
5192  FeatureBitset Features =
5193  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5194  setAvailableFeatures(Features);
5195  return false;
5196  }
5197 
5198  return Error(ExtLoc, "unknown architectural extension: " + Name);
5199 }
5200 
5201 static SMLoc incrementLoc(SMLoc L, int Offset) {
5202  return SMLoc::getFromPointer(L.getPointer() + Offset);
5203 }
5204 
5205 /// parseDirectiveCPU
5206 /// ::= .cpu id
5207 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
5208  SMLoc CurLoc = getLoc();
5209 
5210  StringRef CPU, ExtensionString;
5211  std::tie(CPU, ExtensionString) =
5212  getParser().parseStringToEndOfStatement().trim().split('+');
5213 
5214  if (parseToken(AsmToken::EndOfStatement))
5215  return true;
5216 
5217  SmallVector<StringRef, 4> RequestedExtensions;
5218  if (!ExtensionString.empty())
5219  ExtensionString.split(RequestedExtensions, '+');
5220 
5221  // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5222  // once that is tablegen'ed
5223  if (!getSTI().isCPUStringValid(CPU)) {
5224  Error(CurLoc, "unknown CPU name");
5225  return false;
5226  }
5227 
5228  MCSubtargetInfo &STI = copySTI();
5229  STI.setDefaultFeatures(CPU, "");
5230  CurLoc = incrementLoc(CurLoc, CPU.size());
5231 
5232  ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
5233 
5234  FeatureBitset Features = STI.getFeatureBits();
5235  for (auto Name : RequestedExtensions) {
5236  // Advance source location past '+'.
5237  CurLoc = incrementLoc(CurLoc, 1);
5238 
5239  bool EnableFeature = true;
5240 
5241  if (Name.startswith_lower("no")) {
5242  EnableFeature = false;
5243  Name = Name.substr(2);
5244  }
5245 
5246  bool FoundExtension = false;
5247  for (const auto &Extension : ExtensionMap) {
5248  if (Extension.Name != Name)
5249  continue;
5250 
5251  if (Extension.Features.none())
5252  report_fatal_error("unsupported architectural extension: " + Name);
5253 
5254  FeatureBitset ToggleFeatures = EnableFeature
5255  ? (~Features & Extension.Features)
5256  : ( Features & Extension.Features);
5257  FeatureBitset Features =
5258  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5259  setAvailableFeatures(Features);
5260  FoundExtension = true;
5261 
5262  break;
5263  }
5264 
5265  if (!FoundExtension)
5266  Error(CurLoc, "unsupported architectural extension");
5267 
5268  CurLoc = incrementLoc(CurLoc, Name.size());
5269  }
5270  return false;
5271 }
5272 
5273 /// parseDirectiveInst
5274 /// ::= .inst opcode [, ...]
5275 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
5276  if (getLexer().is(AsmToken::EndOfStatement))
5277  return Error(Loc, "expected expression following '.inst' directive");
5278 
5279  auto parseOp = [&]() -> bool {
5280  SMLoc L = getLoc();
5281  const MCExpr *Expr;
5282  if (check(getParser().parseExpression(Expr), L, "expected expression"))
5283  return true;
5284  const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5285  if (check(!Value, L, "expected constant expression"))
5286  return true;
5287  getTargetStreamer().emitInst(Value->getValue());
5288  return false;
5289  };
5290 
5291  if (parseMany(parseOp))
5292  return addErrorSuffix(" in '.inst' directive");
5293  return false;
5294 }
5295 
5296 // parseDirectiveTLSDescCall:
5297 // ::= .tlsdesccall symbol
5298 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
5299  StringRef Name;
5300  if (check(getParser().parseIdentifier(Name), L,
5301  "expected symbol after directive") ||
5302  parseToken(AsmToken::EndOfStatement))
5303  return true;
5304 
5305  MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5306  const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
5307  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
5308 
5309  MCInst Inst;
5310  Inst.setOpcode(AArch64::TLSDESCCALL);
5311  Inst.addOperand(MCOperand::createExpr(Expr));
5312 
5313  getParser().getStreamer().EmitInstruction(Inst, getSTI());
5314  return false;
5315 }
5316 
5317 /// ::= .loh <lohName | lohId> label1, ..., labelN
5318 /// The number of arguments depends on the loh identifier.
5319 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
5320  MCLOHType Kind;
5321  if (getParser().getTok().isNot(AsmToken::Identifier)) {
5322  if (getParser().getTok().isNot(AsmToken::Integer))
5323  return TokError("expected an identifier or a number in directive");
5324  // We successfully get a numeric value for the identifier.
5325  // Check if it is valid.
5326  int64_t Id = getParser().getTok().getIntVal();
5327  if (Id <= -1U && !isValidMCLOHType(Id))
5328  return TokError("invalid numeric identifier in directive");
5329  Kind = (MCLOHType)Id;
5330  } else {
5331  StringRef Name = getTok().getIdentifier();
5332  // We successfully parse an identifier.
5333  // Check if it is a recognized one.
5334  int Id = MCLOHNameToId(Name);
5335 
5336  if (Id == -1)
5337  return TokError("invalid identifier in directive");
5338  Kind = (MCLOHType)Id;
5339  }
5340  // Consume the identifier.
5341  Lex();
5342  // Get the number of arguments of this LOH.
5343  int NbArgs = MCLOHIdToNbArgs(Kind);
5344 
5345  assert(NbArgs != -1 && "Invalid number of arguments");
5346 
5348  for (int Idx = 0; Idx < NbArgs; ++Idx) {
5349  StringRef Name;
5350  if (getParser().parseIdentifier(Name))
5351  return TokError("expected identifier in directive");
5352  Args.push_back(getContext().getOrCreateSymbol(Name));
5353 
5354  if (Idx + 1 == NbArgs)
5355  break;
5356  if (parseToken(AsmToken::Comma,
5357  "unexpected token in '" + Twine(IDVal) + "' directive"))
5358  return true;
5359  }
5360  if (parseToken(AsmToken::EndOfStatement,
5361  "unexpected token in '" + Twine(IDVal) + "' directive"))
5362  return true;
5363 
5364  getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
5365  return false;
5366 }
5367 
5368 /// parseDirectiveLtorg
5369 /// ::= .ltorg | .pool
5370 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5371  if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5372  return true;
5373  getTargetStreamer().emitCurrentConstantPool();
5374  return false;
5375 }
5376 
5377 /// parseDirectiveReq
5378 /// ::= name .req registername
5379 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5380  MCAsmParser &Parser = getParser();
5381  Parser.Lex(); // Eat the '.req' token.
5382  SMLoc SRegLoc = getLoc();
5384  unsigned RegNum;
5385  OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5386 
5387  if (ParseRes != MatchOperand_Success) {
5388  StringRef Kind;
5389  RegisterKind = RegKind::NeonVector;
5390  ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5391 
5392  if (ParseRes == MatchOperand_ParseFail)
5393  return true;
5394 
5395  if (ParseRes == MatchOperand_Success && !Kind.empty())
5396  return Error(SRegLoc, "vector register without type specifier expected");
5397  }
5398 
5399  if (ParseRes != MatchOperand_Success) {
5400  StringRef Kind;
5401  RegisterKind = RegKind::SVEDataVector;
5402  ParseRes =
5403  tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5404 
5405  if (ParseRes == MatchOperand_ParseFail)
5406  return true;
5407 
5408  if (ParseRes == MatchOperand_Success && !Kind.empty())
5409  return Error(SRegLoc,
5410  "sve vector register without type specifier expected");
5411  }
5412 
5413  if (ParseRes != MatchOperand_Success) {
5414  StringRef Kind;
5415  RegisterKind = RegKind::SVEPredicateVector;
5416  ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
5417 
5418  if (ParseRes == MatchOperand_ParseFail)
5419  return true;
5420