LLVM  9.0.0svn
AArch64AsmParser.cpp
Go to the documentation of this file.
1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
14 #include "AArch64InstrInfo.h"
15 #include "Utils/AArch64BaseInfo.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/StringMap.h"
23 #include "llvm/ADT/StringRef.h"
24 #include "llvm/ADT/StringSwitch.h"
25 #include "llvm/ADT/Twine.h"
26 #include "llvm/MC/MCContext.h"
27 #include "llvm/MC/MCExpr.h"
28 #include "llvm/MC/MCInst.h"
36 #include "llvm/MC/MCRegisterInfo.h"
37 #include "llvm/MC/MCStreamer.h"
39 #include "llvm/MC/MCSymbol.h"
42 #include "llvm/MC/MCValue.h"
43 #include "llvm/Support/Casting.h"
44 #include "llvm/Support/Compiler.h"
47 #include "llvm/Support/SMLoc.h"
51 #include <cassert>
52 #include <cctype>
53 #include <cstdint>
54 #include <cstdio>
55 #include <string>
56 #include <tuple>
57 #include <utility>
58 #include <vector>
59 
60 using namespace llvm;
61 
62 namespace {
63 
64 enum class RegKind {
65  Scalar,
66  NeonVector,
67  SVEDataVector,
68  SVEPredicateVector
69 };
70 
72  EqualsReg,
73  EqualsSuperReg,
74  EqualsSubReg
75 };
76 
77 class AArch64AsmParser : public MCTargetAsmParser {
78 private:
79  StringRef Mnemonic; ///< Instruction mnemonic.
80 
81  // Map of register aliases registers via the .req directive.
83 
84  class PrefixInfo {
85  public:
86  static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
87  PrefixInfo Prefix;
88  switch (Inst.getOpcode()) {
89  case AArch64::MOVPRFX_ZZ:
90  Prefix.Active = true;
91  Prefix.Dst = Inst.getOperand(0).getReg();
92  break;
93  case AArch64::MOVPRFX_ZPmZ_B:
94  case AArch64::MOVPRFX_ZPmZ_H:
95  case AArch64::MOVPRFX_ZPmZ_S:
96  case AArch64::MOVPRFX_ZPmZ_D:
97  Prefix.Active = true;
98  Prefix.Predicated = true;
99  Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
100  assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
101  "No destructive element size set for movprfx");
102  Prefix.Dst = Inst.getOperand(0).getReg();
103  Prefix.Pg = Inst.getOperand(2).getReg();
104  break;
105  case AArch64::MOVPRFX_ZPzZ_B:
106  case AArch64::MOVPRFX_ZPzZ_H:
107  case AArch64::MOVPRFX_ZPzZ_S:
108  case AArch64::MOVPRFX_ZPzZ_D:
109  Prefix.Active = true;
110  Prefix.Predicated = true;
111  Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
112  assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
113  "No destructive element size set for movprfx");
114  Prefix.Dst = Inst.getOperand(0).getReg();
115  Prefix.Pg = Inst.getOperand(1).getReg();
116  break;
117  default:
118  break;
119  }
120 
121  return Prefix;
122  }
123 
124  PrefixInfo() : Active(false), Predicated(false) {}
125  bool isActive() const { return Active; }
126  bool isPredicated() const { return Predicated; }
127  unsigned getElementSize() const {
128  assert(Predicated);
129  return ElementSize;
130  }
131  unsigned getDstReg() const { return Dst; }
132  unsigned getPgReg() const {
133  assert(Predicated);
134  return Pg;
135  }
136 
137  private:
138  bool Active;
139  bool Predicated;
140  unsigned ElementSize;
141  unsigned Dst;
142  unsigned Pg;
143  } NextPrefix;
144 
145  AArch64TargetStreamer &getTargetStreamer() {
146  MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
147  return static_cast<AArch64TargetStreamer &>(TS);
148  }
149 
150  SMLoc getLoc() const { return getParser().getTok().getLoc(); }
151 
152  bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
153  void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
154  AArch64CC::CondCode parseCondCodeString(StringRef Cond);
155  bool parseCondCode(OperandVector &Operands, bool invertCondCode);
156  unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
157  bool parseRegister(OperandVector &Operands);
158  bool parseSymbolicImmVal(const MCExpr *&ImmVal);
159  bool parseNeonVectorList(OperandVector &Operands);
160  bool parseOptionalMulOperand(OperandVector &Operands);
161  bool parseOperand(OperandVector &Operands, bool isCondCode,
162  bool invertCondCode);
163 
164  bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
165  OperandVector &Operands);
166 
167  bool parseDirectiveArch(SMLoc L);
168  bool parseDirectiveArchExtension(SMLoc L);
169  bool parseDirectiveCPU(SMLoc L);
170  bool parseDirectiveInst(SMLoc L);
171 
172  bool parseDirectiveTLSDescCall(SMLoc L);
173 
174  bool parseDirectiveLOH(StringRef LOH, SMLoc L);
175  bool parseDirectiveLtorg(SMLoc L);
176 
177  bool parseDirectiveReq(StringRef Name, SMLoc L);
178  bool parseDirectiveUnreq(SMLoc L);
179  bool parseDirectiveCFINegateRAState();
180  bool parseDirectiveCFIBKeyFrame();
181 
182  bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
184  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
185  OperandVector &Operands, MCStreamer &Out,
186  uint64_t &ErrorInfo,
187  bool MatchingInlineAsm) override;
188 /// @name Auto-generated Match Functions
189 /// {
190 
191 #define GET_ASSEMBLER_HEADER
192 #include "AArch64GenAsmMatcher.inc"
193 
194  /// }
195 
196  OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
197  OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
198  RegKind MatchKind);
199  OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
200  OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
201  OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
202  OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
203  OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
204  template <bool IsSVEPrefetch = false>
205  OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
206  OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
207  OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
208  OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
209  OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
210  template<bool AddFPZeroAsLiteral>
211  OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
212  OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
213  OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
214  bool tryParseNeonVectorRegister(OperandVector &Operands);
215  OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
216  OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
217  template <bool ParseShiftExtend,
218  RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
219  OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
220  template <bool ParseShiftExtend, bool ParseSuffix>
221  OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
222  OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
223  template <RegKind VectorKind>
224  OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
225  bool ExpectMatch = false);
226  OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
227 
228 public:
229  enum AArch64MatchResultTy {
230  Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
231 #define GET_OPERAND_DIAGNOSTIC_TYPES
232 #include "AArch64GenAsmMatcher.inc"
233  };
234  bool IsILP32;
235 
236  AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
237  const MCInstrInfo &MII, const MCTargetOptions &Options)
238  : MCTargetAsmParser(Options, STI, MII) {
239  IsILP32 = Options.getABIName() == "ilp32";
241  MCStreamer &S = getParser().getStreamer();
242  if (S.getTargetStreamer() == nullptr)
243  new AArch64TargetStreamer(S);
244 
245  // Alias .hword/.word/xword to the target-independent .2byte/.4byte/.8byte
246  // directives as they have the same form and semantics:
247  /// ::= (.hword | .word | .xword ) [ expression (, expression)* ]
248  Parser.addAliasForDirective(".hword", ".2byte");
249  Parser.addAliasForDirective(".word", ".4byte");
250  Parser.addAliasForDirective(".xword", ".8byte");
251 
252  // Initialize the set of available features.
253  setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
254  }
255 
256  bool regsEqual(const MCParsedAsmOperand &Op1,
257  const MCParsedAsmOperand &Op2) const override;
258  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
259  SMLoc NameLoc, OperandVector &Operands) override;
260  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
261  bool ParseDirective(AsmToken DirectiveID) override;
262  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
263  unsigned Kind) override;
264 
265  static bool classifySymbolRef(const MCExpr *Expr,
266  AArch64MCExpr::VariantKind &ELFRefKind,
267  MCSymbolRefExpr::VariantKind &DarwinRefKind,
268  int64_t &Addend);
269 };
270 
271 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
272 /// instruction.
273 class AArch64Operand : public MCParsedAsmOperand {
274 private:
275  enum KindTy {
276  k_Immediate,
277  k_ShiftedImm,
278  k_CondCode,
279  k_Register,
280  k_VectorList,
281  k_VectorIndex,
282  k_Token,
283  k_SysReg,
284  k_SysCR,
285  k_Prefetch,
286  k_ShiftExtend,
287  k_FPImm,
288  k_Barrier,
289  k_PSBHint,
290  k_BTIHint,
291  } Kind;
292 
293  SMLoc StartLoc, EndLoc;
294 
295  struct TokOp {
296  const char *Data;
297  unsigned Length;
298  bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
299  };
300 
301  // Separate shift/extend operand.
302  struct ShiftExtendOp {
304  unsigned Amount;
305  bool HasExplicitAmount;
306  };
307 
308  struct RegOp {
309  unsigned RegNum;
310  RegKind Kind;
311  int ElementWidth;
312 
313  // The register may be allowed as a different register class,
314  // e.g. for GPR64as32 or GPR32as64.
315  RegConstraintEqualityTy EqualityTy;
316 
317  // In some cases the shift/extend needs to be explicitly parsed together
318  // with the register, rather than as a separate operand. This is needed
319  // for addressing modes where the instruction as a whole dictates the
320  // scaling/extend, rather than specific bits in the instruction.
321  // By parsing them as a single operand, we avoid the need to pass an
322  // extra operand in all CodeGen patterns (because all operands need to
323  // have an associated value), and we avoid the need to update TableGen to
324  // accept operands that have no associated bits in the instruction.
325  //
326  // An added benefit of parsing them together is that the assembler
327  // can give a sensible diagnostic if the scaling is not correct.
328  //
329  // The default is 'lsl #0' (HasExplicitAmount = false) if no
330  // ShiftExtend is specified.
331  ShiftExtendOp ShiftExtend;
332  };
333 
334  struct VectorListOp {
335  unsigned RegNum;
336  unsigned Count;
337  unsigned NumElements;
338  unsigned ElementWidth;
340  };
341 
342  struct VectorIndexOp {
343  unsigned Val;
344  };
345 
346  struct ImmOp {
347  const MCExpr *Val;
348  };
349 
350  struct ShiftedImmOp {
351  const MCExpr *Val;
352  unsigned ShiftAmount;
353  };
354 
355  struct CondCodeOp {
356  AArch64CC::CondCode Code;
357  };
358 
359  struct FPImmOp {
360  uint64_t Val; // APFloat value bitcasted to uint64_t.
361  bool IsExact; // describes whether parsed value was exact.
362  };
363 
364  struct BarrierOp {
365  const char *Data;
366  unsigned Length;
367  unsigned Val; // Not the enum since not all values have names.
368  };
369 
370  struct SysRegOp {
371  const char *Data;
372  unsigned Length;
373  uint32_t MRSReg;
374  uint32_t MSRReg;
375  uint32_t PStateField;
376  };
377 
378  struct SysCRImmOp {
379  unsigned Val;
380  };
381 
382  struct PrefetchOp {
383  const char *Data;
384  unsigned Length;
385  unsigned Val;
386  };
387 
388  struct PSBHintOp {
389  const char *Data;
390  unsigned Length;
391  unsigned Val;
392  };
393 
394  struct BTIHintOp {
395  const char *Data;
396  unsigned Length;
397  unsigned Val;
398  };
399 
400  struct ExtendOp {
401  unsigned Val;
402  };
403 
404  union {
405  struct TokOp Tok;
406  struct RegOp Reg;
407  struct VectorListOp VectorList;
408  struct VectorIndexOp VectorIndex;
409  struct ImmOp Imm;
410  struct ShiftedImmOp ShiftedImm;
411  struct CondCodeOp CondCode;
412  struct FPImmOp FPImm;
413  struct BarrierOp Barrier;
414  struct SysRegOp SysReg;
415  struct SysCRImmOp SysCRImm;
416  struct PrefetchOp Prefetch;
417  struct PSBHintOp PSBHint;
418  struct BTIHintOp BTIHint;
419  struct ShiftExtendOp ShiftExtend;
420  };
421 
422  // Keep the MCContext around as the MCExprs may need manipulated during
423  // the add<>Operands() calls.
424  MCContext &Ctx;
425 
426 public:
427  AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
428 
429  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
430  Kind = o.Kind;
431  StartLoc = o.StartLoc;
432  EndLoc = o.EndLoc;
433  switch (Kind) {
434  case k_Token:
435  Tok = o.Tok;
436  break;
437  case k_Immediate:
438  Imm = o.Imm;
439  break;
440  case k_ShiftedImm:
441  ShiftedImm = o.ShiftedImm;
442  break;
443  case k_CondCode:
444  CondCode = o.CondCode;
445  break;
446  case k_FPImm:
447  FPImm = o.FPImm;
448  break;
449  case k_Barrier:
450  Barrier = o.Barrier;
451  break;
452  case k_Register:
453  Reg = o.Reg;
454  break;
455  case k_VectorList:
456  VectorList = o.VectorList;
457  break;
458  case k_VectorIndex:
459  VectorIndex = o.VectorIndex;
460  break;
461  case k_SysReg:
462  SysReg = o.SysReg;
463  break;
464  case k_SysCR:
465  SysCRImm = o.SysCRImm;
466  break;
467  case k_Prefetch:
468  Prefetch = o.Prefetch;
469  break;
470  case k_PSBHint:
471  PSBHint = o.PSBHint;
472  break;
473  case k_BTIHint:
474  BTIHint = o.BTIHint;
475  break;
476  case k_ShiftExtend:
477  ShiftExtend = o.ShiftExtend;
478  break;
479  }
480  }
481 
482  /// getStartLoc - Get the location of the first token of this operand.
483  SMLoc getStartLoc() const override { return StartLoc; }
484  /// getEndLoc - Get the location of the last token of this operand.
485  SMLoc getEndLoc() const override { return EndLoc; }
486 
487  StringRef getToken() const {
488  assert(Kind == k_Token && "Invalid access!");
489  return StringRef(Tok.Data, Tok.Length);
490  }
491 
492  bool isTokenSuffix() const {
493  assert(Kind == k_Token && "Invalid access!");
494  return Tok.IsSuffix;
495  }
496 
497  const MCExpr *getImm() const {
498  assert(Kind == k_Immediate && "Invalid access!");
499  return Imm.Val;
500  }
501 
502  const MCExpr *getShiftedImmVal() const {
503  assert(Kind == k_ShiftedImm && "Invalid access!");
504  return ShiftedImm.Val;
505  }
506 
507  unsigned getShiftedImmShift() const {
508  assert(Kind == k_ShiftedImm && "Invalid access!");
509  return ShiftedImm.ShiftAmount;
510  }
511 
513  assert(Kind == k_CondCode && "Invalid access!");
514  return CondCode.Code;
515  }
516 
517  APFloat getFPImm() const {
518  assert (Kind == k_FPImm && "Invalid access!");
519  return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
520  }
521 
522  bool getFPImmIsExact() const {
523  assert (Kind == k_FPImm && "Invalid access!");
524  return FPImm.IsExact;
525  }
526 
527  unsigned getBarrier() const {
528  assert(Kind == k_Barrier && "Invalid access!");
529  return Barrier.Val;
530  }
531 
532  StringRef getBarrierName() const {
533  assert(Kind == k_Barrier && "Invalid access!");
534  return StringRef(Barrier.Data, Barrier.Length);
535  }
536 
537  unsigned getReg() const override {
538  assert(Kind == k_Register && "Invalid access!");
539  return Reg.RegNum;
540  }
541 
542  RegConstraintEqualityTy getRegEqualityTy() const {
543  assert(Kind == k_Register && "Invalid access!");
544  return Reg.EqualityTy;
545  }
546 
547  unsigned getVectorListStart() const {
548  assert(Kind == k_VectorList && "Invalid access!");
549  return VectorList.RegNum;
550  }
551 
552  unsigned getVectorListCount() const {
553  assert(Kind == k_VectorList && "Invalid access!");
554  return VectorList.Count;
555  }
556 
557  unsigned getVectorIndex() const {
558  assert(Kind == k_VectorIndex && "Invalid access!");
559  return VectorIndex.Val;
560  }
561 
562  StringRef getSysReg() const {
563  assert(Kind == k_SysReg && "Invalid access!");
564  return StringRef(SysReg.Data, SysReg.Length);
565  }
566 
567  unsigned getSysCR() const {
568  assert(Kind == k_SysCR && "Invalid access!");
569  return SysCRImm.Val;
570  }
571 
572  unsigned getPrefetch() const {
573  assert(Kind == k_Prefetch && "Invalid access!");
574  return Prefetch.Val;
575  }
576 
577  unsigned getPSBHint() const {
578  assert(Kind == k_PSBHint && "Invalid access!");
579  return PSBHint.Val;
580  }
581 
582  StringRef getPSBHintName() const {
583  assert(Kind == k_PSBHint && "Invalid access!");
584  return StringRef(PSBHint.Data, PSBHint.Length);
585  }
586 
587  unsigned getBTIHint() const {
588  assert(Kind == k_BTIHint && "Invalid access!");
589  return BTIHint.Val;
590  }
591 
592  StringRef getBTIHintName() const {
593  assert(Kind == k_BTIHint && "Invalid access!");
594  return StringRef(BTIHint.Data, BTIHint.Length);
595  }
596 
597  StringRef getPrefetchName() const {
598  assert(Kind == k_Prefetch && "Invalid access!");
599  return StringRef(Prefetch.Data, Prefetch.Length);
600  }
601 
602  AArch64_AM::ShiftExtendType getShiftExtendType() const {
603  if (Kind == k_ShiftExtend)
604  return ShiftExtend.Type;
605  if (Kind == k_Register)
606  return Reg.ShiftExtend.Type;
607  llvm_unreachable("Invalid access!");
608  }
609 
610  unsigned getShiftExtendAmount() const {
611  if (Kind == k_ShiftExtend)
612  return ShiftExtend.Amount;
613  if (Kind == k_Register)
614  return Reg.ShiftExtend.Amount;
615  llvm_unreachable("Invalid access!");
616  }
617 
618  bool hasShiftExtendAmount() const {
619  if (Kind == k_ShiftExtend)
620  return ShiftExtend.HasExplicitAmount;
621  if (Kind == k_Register)
622  return Reg.ShiftExtend.HasExplicitAmount;
623  llvm_unreachable("Invalid access!");
624  }
625 
626  bool isImm() const override { return Kind == k_Immediate; }
627  bool isMem() const override { return false; }
628 
629  bool isUImm6() const {
630  if (!isImm())
631  return false;
632  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
633  if (!MCE)
634  return false;
635  int64_t Val = MCE->getValue();
636  return (Val >= 0 && Val < 64);
637  }
638 
639  template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
640 
641  template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
642  return isImmScaled<Bits, Scale>(true);
643  }
644 
645  template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
646  return isImmScaled<Bits, Scale>(false);
647  }
648 
649  template <int Bits, int Scale>
650  DiagnosticPredicate isImmScaled(bool Signed) const {
651  if (!isImm())
653 
654  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
655  if (!MCE)
657 
658  int64_t MinVal, MaxVal;
659  if (Signed) {
660  int64_t Shift = Bits - 1;
661  MinVal = (int64_t(1) << Shift) * -Scale;
662  MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
663  } else {
664  MinVal = 0;
665  MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
666  }
667 
668  int64_t Val = MCE->getValue();
669  if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
671 
673  }
674 
675  DiagnosticPredicate isSVEPattern() const {
676  if (!isImm())
678  auto *MCE = dyn_cast<MCConstantExpr>(getImm());
679  if (!MCE)
681  int64_t Val = MCE->getValue();
682  if (Val >= 0 && Val < 32)
685  }
686 
687  bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
688  AArch64MCExpr::VariantKind ELFRefKind;
689  MCSymbolRefExpr::VariantKind DarwinRefKind;
690  int64_t Addend;
691  if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
692  Addend)) {
693  // If we don't understand the expression, assume the best and
694  // let the fixup and relocation code deal with it.
695  return true;
696  }
697 
698  if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
699  ELFRefKind == AArch64MCExpr::VK_LO12 ||
700  ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
701  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
702  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
703  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
704  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
705  ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
706  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
707  ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
708  ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
709  // Note that we don't range-check the addend. It's adjusted modulo page
710  // size when converted, so there is no "out of range" condition when using
711  // @pageoff.
712  return true;
713  } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
714  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
715  // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
716  return Addend == 0;
717  }
718 
719  return false;
720  }
721 
722  template <int Scale> bool isUImm12Offset() const {
723  if (!isImm())
724  return false;
725 
726  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
727  if (!MCE)
728  return isSymbolicUImm12Offset(getImm());
729 
730  int64_t Val = MCE->getValue();
731  return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
732  }
733 
734  template <int N, int M>
735  bool isImmInRange() const {
736  if (!isImm())
737  return false;
738  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
739  if (!MCE)
740  return false;
741  int64_t Val = MCE->getValue();
742  return (Val >= N && Val <= M);
743  }
744 
745  // NOTE: Also used for isLogicalImmNot as anything that can be represented as
746  // a logical immediate can always be represented when inverted.
747  template <typename T>
748  bool isLogicalImm() const {
749  if (!isImm())
750  return false;
751  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
752  if (!MCE)
753  return false;
754 
755  int64_t Val = MCE->getValue();
756  int64_t SVal = typename std::make_signed<T>::type(Val);
757  int64_t UVal = typename std::make_unsigned<T>::type(Val);
758  if (Val != SVal && Val != UVal)
759  return false;
760 
761  return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8);
762  }
763 
764  bool isShiftedImm() const { return Kind == k_ShiftedImm; }
765 
766  /// Returns the immediate value as a pair of (imm, shift) if the immediate is
767  /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
768  /// immediate that can be shifted by 'Shift'.
769  template <unsigned Width>
770  Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
771  if (isShiftedImm() && Width == getShiftedImmShift())
772  if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
773  return std::make_pair(CE->getValue(), Width);
774 
775  if (isImm())
776  if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
777  int64_t Val = CE->getValue();
778  if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
779  return std::make_pair(Val >> Width, Width);
780  else
781  return std::make_pair(Val, 0u);
782  }
783 
784  return {};
785  }
786 
787  bool isAddSubImm() const {
788  if (!isShiftedImm() && !isImm())
789  return false;
790 
791  const MCExpr *Expr;
792 
793  // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
794  if (isShiftedImm()) {
795  unsigned Shift = ShiftedImm.ShiftAmount;
796  Expr = ShiftedImm.Val;
797  if (Shift != 0 && Shift != 12)
798  return false;
799  } else {
800  Expr = getImm();
801  }
802 
803  AArch64MCExpr::VariantKind ELFRefKind;
804  MCSymbolRefExpr::VariantKind DarwinRefKind;
805  int64_t Addend;
806  if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
807  DarwinRefKind, Addend)) {
808  return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
809  || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
810  || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
811  || ELFRefKind == AArch64MCExpr::VK_LO12
812  || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
813  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
814  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
815  || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
816  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
817  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
818  || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
819  || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
820  || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
821  }
822 
823  // If it's a constant, it should be a real immediate in range.
824  if (auto ShiftedVal = getShiftedVal<12>())
825  return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
826 
827  // If it's an expression, we hope for the best and let the fixup/relocation
828  // code deal with it.
829  return true;
830  }
831 
832  bool isAddSubImmNeg() const {
833  if (!isShiftedImm() && !isImm())
834  return false;
835 
836  // Otherwise it should be a real negative immediate in range.
837  if (auto ShiftedVal = getShiftedVal<12>())
838  return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
839 
840  return false;
841  }
842 
843  // Signed value in the range -128 to +127. For element widths of
844  // 16 bits or higher it may also be a signed multiple of 256 in the
845  // range -32768 to +32512.
846  // For element-width of 8 bits a range of -128 to 255 is accepted,
847  // since a copy of a byte can be either signed/unsigned.
848  template <typename T>
850  if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
852 
853  bool IsByte =
854  std::is_same<int8_t, typename std::make_signed<T>::type>::value;
855  if (auto ShiftedImm = getShiftedVal<8>())
856  if (!(IsByte && ShiftedImm->second) &&
857  AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
858  << ShiftedImm->second))
860 
862  }
863 
864  // Unsigned value in the range 0 to 255. For element widths of
865  // 16 bits or higher it may also be a signed multiple of 256 in the
866  // range 0 to 65280.
867  template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
868  if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
870 
871  bool IsByte =
872  std::is_same<int8_t, typename std::make_signed<T>::type>::value;
873  if (auto ShiftedImm = getShiftedVal<8>())
874  if (!(IsByte && ShiftedImm->second) &&
875  AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
876  << ShiftedImm->second))
878 
880  }
881 
882  template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
883  if (isLogicalImm<T>() && !isSVECpyImm<T>())
886  }
887 
888  bool isCondCode() const { return Kind == k_CondCode; }
889 
890  bool isSIMDImmType10() const {
891  if (!isImm())
892  return false;
893  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
894  if (!MCE)
895  return false;
897  }
898 
899  template<int N>
900  bool isBranchTarget() const {
901  if (!isImm())
902  return false;
903  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
904  if (!MCE)
905  return true;
906  int64_t Val = MCE->getValue();
907  if (Val & 0x3)
908  return false;
909  assert(N > 0 && "Branch target immediate cannot be 0 bits!");
910  return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
911  }
912 
913  bool
914  isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
915  if (!isImm())
916  return false;
917 
918  AArch64MCExpr::VariantKind ELFRefKind;
919  MCSymbolRefExpr::VariantKind DarwinRefKind;
920  int64_t Addend;
921  if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
922  DarwinRefKind, Addend)) {
923  return false;
924  }
925  if (DarwinRefKind != MCSymbolRefExpr::VK_None)
926  return false;
927 
928  for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
929  if (ELFRefKind == AllowedModifiers[i])
930  return true;
931  }
932 
933  return false;
934  }
935 
936  bool isMovZSymbolG3() const {
937  return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
938  }
939 
940  bool isMovZSymbolG2() const {
944  }
945 
946  bool isMovZSymbolG1() const {
947  return isMovWSymbol({
951  });
952  }
953 
954  bool isMovZSymbolG0() const {
958  }
959 
960  bool isMovKSymbolG3() const {
961  return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
962  }
963 
964  bool isMovKSymbolG2() const {
965  return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
966  }
967 
968  bool isMovKSymbolG1() const {
969  return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
972  }
973 
974  bool isMovKSymbolG0() const {
975  return isMovWSymbol(
978  }
979 
980  template<int RegWidth, int Shift>
981  bool isMOVZMovAlias() const {
982  if (!isImm()) return false;
983 
984  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
985  if (!CE) return false;
986  uint64_t Value = CE->getValue();
987 
988  return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
989  }
990 
991  template<int RegWidth, int Shift>
992  bool isMOVNMovAlias() const {
993  if (!isImm()) return false;
994 
995  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
996  if (!CE) return false;
997  uint64_t Value = CE->getValue();
998 
999  return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1000  }
1001 
1002  bool isFPImm() const {
1003  return Kind == k_FPImm &&
1004  AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1005  }
1006 
1007  bool isBarrier() const { return Kind == k_Barrier; }
1008  bool isSysReg() const { return Kind == k_SysReg; }
1009 
1010  bool isMRSSystemRegister() const {
1011  if (!isSysReg()) return false;
1012 
1013  return SysReg.MRSReg != -1U;
1014  }
1015 
1016  bool isMSRSystemRegister() const {
1017  if (!isSysReg()) return false;
1018  return SysReg.MSRReg != -1U;
1019  }
1020 
1021  bool isSystemPStateFieldWithImm0_1() const {
1022  if (!isSysReg()) return false;
1023  return (SysReg.PStateField == AArch64PState::PAN ||
1024  SysReg.PStateField == AArch64PState::DIT ||
1025  SysReg.PStateField == AArch64PState::UAO ||
1026  SysReg.PStateField == AArch64PState::SSBS);
1027  }
1028 
1029  bool isSystemPStateFieldWithImm0_15() const {
1030  if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1031  return SysReg.PStateField != -1U;
1032  }
1033 
1034  bool isReg() const override {
1035  return Kind == k_Register;
1036  }
1037 
1038  bool isScalarReg() const {
1039  return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1040  }
1041 
1042  bool isNeonVectorReg() const {
1043  return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1044  }
1045 
1046  bool isNeonVectorRegLo() const {
1047  return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1048  AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1049  Reg.RegNum);
1050  }
1051 
1052  template <unsigned Class> bool isSVEVectorReg() const {
1053  RegKind RK;
1054  switch (Class) {
1055  case AArch64::ZPRRegClassID:
1056  case AArch64::ZPR_3bRegClassID:
1057  case AArch64::ZPR_4bRegClassID:
1058  RK = RegKind::SVEDataVector;
1059  break;
1060  case AArch64::PPRRegClassID:
1061  case AArch64::PPR_3bRegClassID:
1062  RK = RegKind::SVEPredicateVector;
1063  break;
1064  default:
1065  llvm_unreachable("Unsupport register class");
1066  }
1067 
1068  return (Kind == k_Register && Reg.Kind == RK) &&
1069  AArch64MCRegisterClasses[Class].contains(getReg());
1070  }
1071 
1072  template <unsigned Class> bool isFPRasZPR() const {
1073  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1074  AArch64MCRegisterClasses[Class].contains(getReg());
1075  }
1076 
1077  template <int ElementWidth, unsigned Class>
1078  DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1079  if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1081 
1082  if (isSVEVectorReg<Class>() &&
1083  (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
1085 
1087  }
1088 
1089  template <int ElementWidth, unsigned Class>
1090  DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1091  if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1093 
1094  if (isSVEVectorReg<Class>() &&
1095  (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
1097 
1099  }
1100 
1101  template <int ElementWidth, unsigned Class,
1102  AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1103  bool ShiftWidthAlwaysSame>
1104  DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1105  auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1106  if (!VectorMatch.isMatch())
1108 
1109  // Give a more specific diagnostic when the user has explicitly typed in
1110  // a shift-amount that does not match what is expected, but for which
1111  // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1112  bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1113  if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1114  ShiftExtendTy == AArch64_AM::SXTW) &&
1115  !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1117 
1118  if (MatchShift && ShiftExtendTy == getShiftExtendType())
1120 
1122  }
1123 
1124  bool isGPR32as64() const {
1125  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1126  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1127  }
1128 
1129  bool isGPR64as32() const {
1130  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1131  AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1132  }
1133 
1134  bool isWSeqPair() const {
1135  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1136  AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1137  Reg.RegNum);
1138  }
1139 
1140  bool isXSeqPair() const {
1141  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1142  AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1143  Reg.RegNum);
1144  }
1145 
1146  template<int64_t Angle, int64_t Remainder>
1147  DiagnosticPredicate isComplexRotation() const {
1148  if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1149 
1150  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1151  if (!CE) return DiagnosticPredicateTy::NoMatch;
1152  uint64_t Value = CE->getValue();
1153 
1154  if (Value % Angle == Remainder && Value <= 270)
1157  }
1158 
1159  template <unsigned RegClassID> bool isGPR64() const {
1160  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1161  AArch64MCRegisterClasses[RegClassID].contains(getReg());
1162  }
1163 
1164  template <unsigned RegClassID, int ExtWidth>
1165  DiagnosticPredicate isGPR64WithShiftExtend() const {
1166  if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1168 
1169  if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1170  getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1173  }
1174 
1175  /// Is this a vector list with the type implicit (presumably attached to the
1176  /// instruction itself)?
1177  template <RegKind VectorKind, unsigned NumRegs>
1178  bool isImplicitlyTypedVectorList() const {
1179  return Kind == k_VectorList && VectorList.Count == NumRegs &&
1180  VectorList.NumElements == 0 &&
1181  VectorList.RegisterKind == VectorKind;
1182  }
1183 
1184  template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1185  unsigned ElementWidth>
1186  bool isTypedVectorList() const {
1187  if (Kind != k_VectorList)
1188  return false;
1189  if (VectorList.Count != NumRegs)
1190  return false;
1191  if (VectorList.RegisterKind != VectorKind)
1192  return false;
1193  if (VectorList.ElementWidth != ElementWidth)
1194  return false;
1195  return VectorList.NumElements == NumElements;
1196  }
1197 
1198  template <int Min, int Max>
1199  DiagnosticPredicate isVectorIndex() const {
1200  if (Kind != k_VectorIndex)
1202  if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1205  }
1206 
1207  bool isToken() const override { return Kind == k_Token; }
1208 
1209  bool isTokenEqual(StringRef Str) const {
1210  return Kind == k_Token && getToken() == Str;
1211  }
1212  bool isSysCR() const { return Kind == k_SysCR; }
1213  bool isPrefetch() const { return Kind == k_Prefetch; }
1214  bool isPSBHint() const { return Kind == k_PSBHint; }
1215  bool isBTIHint() const { return Kind == k_BTIHint; }
1216  bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1217  bool isShifter() const {
1218  if (!isShiftExtend())
1219  return false;
1220 
1221  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1222  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1223  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1224  ST == AArch64_AM::MSL);
1225  }
1226 
1227  template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1228  if (Kind != k_FPImm)
1230 
1231  if (getFPImmIsExact()) {
1232  // Lookup the immediate from table of supported immediates.
1233  auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1234  assert(Desc && "Unknown enum value");
1235 
1236  // Calculate its FP value.
1237  APFloat RealVal(APFloat::IEEEdouble());
1238  if (RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero) !=
1239  APFloat::opOK)
1240  llvm_unreachable("FP immediate is not exact");
1241 
1242  if (getFPImm().bitwiseIsEqual(RealVal))
1244  }
1245 
1247  }
1248 
1249  template <unsigned ImmA, unsigned ImmB>
1250  DiagnosticPredicate isExactFPImm() const {
1252  if ((Res = isExactFPImm<ImmA>()))
1254  if ((Res = isExactFPImm<ImmB>()))
1256  return Res;
1257  }
1258 
1259  bool isExtend() const {
1260  if (!isShiftExtend())
1261  return false;
1262 
1263  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1264  return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1265  ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1266  ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1267  ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1268  ET == AArch64_AM::LSL) &&
1269  getShiftExtendAmount() <= 4;
1270  }
1271 
1272  bool isExtend64() const {
1273  if (!isExtend())
1274  return false;
1275  // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
1276  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1277  return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
1278  }
1279 
1280  bool isExtendLSL64() const {
1281  if (!isExtend())
1282  return false;
1283  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1284  return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1285  ET == AArch64_AM::LSL) &&
1286  getShiftExtendAmount() <= 4;
1287  }
1288 
1289  template<int Width> bool isMemXExtend() const {
1290  if (!isExtend())
1291  return false;
1292  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1293  return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1294  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1295  getShiftExtendAmount() == 0);
1296  }
1297 
1298  template<int Width> bool isMemWExtend() const {
1299  if (!isExtend())
1300  return false;
1301  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1302  return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1303  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1304  getShiftExtendAmount() == 0);
1305  }
1306 
1307  template <unsigned width>
1308  bool isArithmeticShifter() const {
1309  if (!isShifter())
1310  return false;
1311 
1312  // An arithmetic shifter is LSL, LSR, or ASR.
1313  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1314  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1315  ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1316  }
1317 
1318  template <unsigned width>
1319  bool isLogicalShifter() const {
1320  if (!isShifter())
1321  return false;
1322 
1323  // A logical shifter is LSL, LSR, ASR or ROR.
1324  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1325  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1326  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1327  getShiftExtendAmount() < width;
1328  }
1329 
1330  bool isMovImm32Shifter() const {
1331  if (!isShifter())
1332  return false;
1333 
1334  // A MOVi shifter is LSL of 0, 16, 32, or 48.
1335  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1336  if (ST != AArch64_AM::LSL)
1337  return false;
1338  uint64_t Val = getShiftExtendAmount();
1339  return (Val == 0 || Val == 16);
1340  }
1341 
1342  bool isMovImm64Shifter() const {
1343  if (!isShifter())
1344  return false;
1345 
1346  // A MOVi shifter is LSL of 0 or 16.
1347  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1348  if (ST != AArch64_AM::LSL)
1349  return false;
1350  uint64_t Val = getShiftExtendAmount();
1351  return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1352  }
1353 
1354  bool isLogicalVecShifter() const {
1355  if (!isShifter())
1356  return false;
1357 
1358  // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1359  unsigned Shift = getShiftExtendAmount();
1360  return getShiftExtendType() == AArch64_AM::LSL &&
1361  (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1362  }
1363 
1364  bool isLogicalVecHalfWordShifter() const {
1365  if (!isLogicalVecShifter())
1366  return false;
1367 
1368  // A logical vector shifter is a left shift by 0 or 8.
1369  unsigned Shift = getShiftExtendAmount();
1370  return getShiftExtendType() == AArch64_AM::LSL &&
1371  (Shift == 0 || Shift == 8);
1372  }
1373 
1374  bool isMoveVecShifter() const {
1375  if (!isShiftExtend())
1376  return false;
1377 
1378  // A logical vector shifter is a left shift by 8 or 16.
1379  unsigned Shift = getShiftExtendAmount();
1380  return getShiftExtendType() == AArch64_AM::MSL &&
1381  (Shift == 8 || Shift == 16);
1382  }
1383 
1384  // Fallback unscaled operands are for aliases of LDR/STR that fall back
1385  // to LDUR/STUR when the offset is not legal for the former but is for
1386  // the latter. As such, in addition to checking for being a legal unscaled
1387  // address, also check that it is not a legal scaled address. This avoids
1388  // ambiguity in the matcher.
1389  template<int Width>
1390  bool isSImm9OffsetFB() const {
1391  return isSImm<9>() && !isUImm12Offset<Width / 8>();
1392  }
1393 
1394  bool isAdrpLabel() const {
1395  // Validation was handled during parsing, so we just sanity check that
1396  // something didn't go haywire.
1397  if (!isImm())
1398  return false;
1399 
1400  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1401  int64_t Val = CE->getValue();
1402  int64_t Min = - (4096 * (1LL << (21 - 1)));
1403  int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1404  return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1405  }
1406 
1407  return true;
1408  }
1409 
1410  bool isAdrLabel() const {
1411  // Validation was handled during parsing, so we just sanity check that
1412  // something didn't go haywire.
1413  if (!isImm())
1414  return false;
1415 
1416  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1417  int64_t Val = CE->getValue();
1418  int64_t Min = - (1LL << (21 - 1));
1419  int64_t Max = ((1LL << (21 - 1)) - 1);
1420  return Val >= Min && Val <= Max;
1421  }
1422 
1423  return true;
1424  }
1425 
1426  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1427  // Add as immediates when possible. Null MCExpr = 0.
1428  if (!Expr)
1430  else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1431  Inst.addOperand(MCOperand::createImm(CE->getValue()));
1432  else
1433  Inst.addOperand(MCOperand::createExpr(Expr));
1434  }
1435 
1436  void addRegOperands(MCInst &Inst, unsigned N) const {
1437  assert(N == 1 && "Invalid number of operands!");
1439  }
1440 
1441  void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1442  assert(N == 1 && "Invalid number of operands!");
1443  assert(
1444  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1445 
1446  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1447  uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1448  RI->getEncodingValue(getReg()));
1449 
1450  Inst.addOperand(MCOperand::createReg(Reg));
1451  }
1452 
1453  void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1454  assert(N == 1 && "Invalid number of operands!");
1455  assert(
1456  AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1457 
1458  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1459  uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1460  RI->getEncodingValue(getReg()));
1461 
1462  Inst.addOperand(MCOperand::createReg(Reg));
1463  }
1464 
1465  template <int Width>
1466  void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1467  unsigned Base;
1468  switch (Width) {
1469  case 8: Base = AArch64::B0; break;
1470  case 16: Base = AArch64::H0; break;
1471  case 32: Base = AArch64::S0; break;
1472  case 64: Base = AArch64::D0; break;
1473  case 128: Base = AArch64::Q0; break;
1474  default:
1475  llvm_unreachable("Unsupported width");
1476  }
1477  Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1478  }
1479 
1480  void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1481  assert(N == 1 && "Invalid number of operands!");
1482  assert(
1483  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1484  Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1485  }
1486 
1487  void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1488  assert(N == 1 && "Invalid number of operands!");
1489  assert(
1490  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1492  }
1493 
1494  void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1495  assert(N == 1 && "Invalid number of operands!");
1497  }
1498 
1499  enum VecListIndexType {
1500  VecListIdx_DReg = 0,
1501  VecListIdx_QReg = 1,
1502  VecListIdx_ZReg = 2,
1503  };
1504 
1505  template <VecListIndexType RegTy, unsigned NumRegs>
1506  void addVectorListOperands(MCInst &Inst, unsigned N) const {
1507  assert(N == 1 && "Invalid number of operands!");
1508  static const unsigned FirstRegs[][5] = {
1509  /* DReg */ { AArch64::Q0,
1510  AArch64::D0, AArch64::D0_D1,
1511  AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1512  /* QReg */ { AArch64::Q0,
1513  AArch64::Q0, AArch64::Q0_Q1,
1514  AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1515  /* ZReg */ { AArch64::Z0,
1516  AArch64::Z0, AArch64::Z0_Z1,
1517  AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1518  };
1519 
1520  assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1521  " NumRegs must be <= 4 for ZRegs");
1522 
1523  unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1524  Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1525  FirstRegs[(unsigned)RegTy][0]));
1526  }
1527 
1528  void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1529  assert(N == 1 && "Invalid number of operands!");
1530  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1531  }
1532 
1533  template <unsigned ImmIs0, unsigned ImmIs1>
1534  void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1535  assert(N == 1 && "Invalid number of operands!");
1536  assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1537  Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1538  }
1539 
1540  void addImmOperands(MCInst &Inst, unsigned N) const {
1541  assert(N == 1 && "Invalid number of operands!");
1542  // If this is a pageoff symrefexpr with an addend, adjust the addend
1543  // to be only the page-offset portion. Otherwise, just add the expr
1544  // as-is.
1545  addExpr(Inst, getImm());
1546  }
1547 
1548  template <int Shift>
1549  void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1550  assert(N == 2 && "Invalid number of operands!");
1551  if (auto ShiftedVal = getShiftedVal<Shift>()) {
1552  Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1553  Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1554  } else if (isShiftedImm()) {
1555  addExpr(Inst, getShiftedImmVal());
1556  Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1557  } else {
1558  addExpr(Inst, getImm());
1560  }
1561  }
1562 
1563  template <int Shift>
1564  void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1565  assert(N == 2 && "Invalid number of operands!");
1566  if (auto ShiftedVal = getShiftedVal<Shift>()) {
1567  Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1568  Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1569  } else
1570  llvm_unreachable("Not a shifted negative immediate");
1571  }
1572 
1573  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1574  assert(N == 1 && "Invalid number of operands!");
1576  }
1577 
1578  void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1579  assert(N == 1 && "Invalid number of operands!");
1580  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1581  if (!MCE)
1582  addExpr(Inst, getImm());
1583  else
1584  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1585  }
1586 
1587  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1588  addImmOperands(Inst, N);
1589  }
1590 
1591  template<int Scale>
1592  void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1593  assert(N == 1 && "Invalid number of operands!");
1594  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1595 
1596  if (!MCE) {
1597  Inst.addOperand(MCOperand::createExpr(getImm()));
1598  return;
1599  }
1600  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1601  }
1602 
1603  void addUImm6Operands(MCInst &Inst, unsigned N) const {
1604  assert(N == 1 && "Invalid number of operands!");
1605  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1607  }
1608 
1609  template <int Scale>
1610  void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1611  assert(N == 1 && "Invalid number of operands!");
1612  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1613  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1614  }
1615 
1616  template <typename T>
1617  void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1618  assert(N == 1 && "Invalid number of operands!");
1619  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1620  typename std::make_unsigned<T>::type Val = MCE->getValue();
1621  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1622  Inst.addOperand(MCOperand::createImm(encoding));
1623  }
1624 
1625  template <typename T>
1626  void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1627  assert(N == 1 && "Invalid number of operands!");
1628  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1629  typename std::make_unsigned<T>::type Val = ~MCE->getValue();
1630  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1631  Inst.addOperand(MCOperand::createImm(encoding));
1632  }
1633 
1634  void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1635  assert(N == 1 && "Invalid number of operands!");
1636  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1637  uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1638  Inst.addOperand(MCOperand::createImm(encoding));
1639  }
1640 
1641  void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1642  // Branch operands don't encode the low bits, so shift them off
1643  // here. If it's a label, however, just put it on directly as there's
1644  // not enough information now to do anything.
1645  assert(N == 1 && "Invalid number of operands!");
1646  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1647  if (!MCE) {
1648  addExpr(Inst, getImm());
1649  return;
1650  }
1651  assert(MCE && "Invalid constant immediate operand!");
1652  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1653  }
1654 
1655  void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1656  // Branch operands don't encode the low bits, so shift them off
1657  // here. If it's a label, however, just put it on directly as there's
1658  // not enough information now to do anything.
1659  assert(N == 1 && "Invalid number of operands!");
1660  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1661  if (!MCE) {
1662  addExpr(Inst, getImm());
1663  return;
1664  }
1665  assert(MCE && "Invalid constant immediate operand!");
1666  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1667  }
1668 
1669  void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1670  // Branch operands don't encode the low bits, so shift them off
1671  // here. If it's a label, however, just put it on directly as there's
1672  // not enough information now to do anything.
1673  assert(N == 1 && "Invalid number of operands!");
1674  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1675  if (!MCE) {
1676  addExpr(Inst, getImm());
1677  return;
1678  }
1679  assert(MCE && "Invalid constant immediate operand!");
1680  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1681  }
1682 
1683  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1684  assert(N == 1 && "Invalid number of operands!");
1686  AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1687  }
1688 
1689  void addBarrierOperands(MCInst &Inst, unsigned N) const {
1690  assert(N == 1 && "Invalid number of operands!");
1691  Inst.addOperand(MCOperand::createImm(getBarrier()));
1692  }
1693 
1694  void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1695  assert(N == 1 && "Invalid number of operands!");
1696 
1697  Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1698  }
1699 
1700  void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1701  assert(N == 1 && "Invalid number of operands!");
1702 
1703  Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1704  }
1705 
1706  void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1707  assert(N == 1 && "Invalid number of operands!");
1708 
1709  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1710  }
1711 
1712  void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1713  assert(N == 1 && "Invalid number of operands!");
1714 
1715  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1716  }
1717 
1718  void addSysCROperands(MCInst &Inst, unsigned N) const {
1719  assert(N == 1 && "Invalid number of operands!");
1720  Inst.addOperand(MCOperand::createImm(getSysCR()));
1721  }
1722 
1723  void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1724  assert(N == 1 && "Invalid number of operands!");
1725  Inst.addOperand(MCOperand::createImm(getPrefetch()));
1726  }
1727 
1728  void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1729  assert(N == 1 && "Invalid number of operands!");
1730  Inst.addOperand(MCOperand::createImm(getPSBHint()));
1731  }
1732 
1733  void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1734  assert(N == 1 && "Invalid number of operands!");
1735  Inst.addOperand(MCOperand::createImm(getBTIHint()));
1736  }
1737 
1738  void addShifterOperands(MCInst &Inst, unsigned N) const {
1739  assert(N == 1 && "Invalid number of operands!");
1740  unsigned Imm =
1741  AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1742  Inst.addOperand(MCOperand::createImm(Imm));
1743  }
1744 
1745  void addExtendOperands(MCInst &Inst, unsigned N) const {
1746  assert(N == 1 && "Invalid number of operands!");
1747  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1748  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1749  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1750  Inst.addOperand(MCOperand::createImm(Imm));
1751  }
1752 
1753  void addExtend64Operands(MCInst &Inst, unsigned N) const {
1754  assert(N == 1 && "Invalid number of operands!");
1755  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1756  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1757  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1758  Inst.addOperand(MCOperand::createImm(Imm));
1759  }
1760 
1761  void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1762  assert(N == 2 && "Invalid number of operands!");
1763  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1764  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1765  Inst.addOperand(MCOperand::createImm(IsSigned));
1766  Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1767  }
1768 
1769  // For 8-bit load/store instructions with a register offset, both the
1770  // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1771  // they're disambiguated by whether the shift was explicit or implicit rather
1772  // than its size.
1773  void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1774  assert(N == 2 && "Invalid number of operands!");
1775  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1776  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1777  Inst.addOperand(MCOperand::createImm(IsSigned));
1778  Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1779  }
1780 
1781  template<int Shift>
1782  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1783  assert(N == 1 && "Invalid number of operands!");
1784 
1785  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1786  uint64_t Value = CE->getValue();
1787  Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1788  }
1789 
1790  template<int Shift>
1791  void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1792  assert(N == 1 && "Invalid number of operands!");
1793 
1794  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1795  uint64_t Value = CE->getValue();
1796  Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1797  }
1798 
1799  void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1800  assert(N == 1 && "Invalid number of operands!");
1801  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1802  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1803  }
1804 
1805  void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1806  assert(N == 1 && "Invalid number of operands!");
1807  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1808  Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1809  }
1810 
1811  void print(raw_ostream &OS) const override;
1812 
1813  static std::unique_ptr<AArch64Operand>
1814  CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1815  auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1816  Op->Tok.Data = Str.data();
1817  Op->Tok.Length = Str.size();
1818  Op->Tok.IsSuffix = IsSuffix;
1819  Op->StartLoc = S;
1820  Op->EndLoc = S;
1821  return Op;
1822  }
1823 
1824  static std::unique_ptr<AArch64Operand>
1825  CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1826  RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1828  unsigned ShiftAmount = 0,
1829  unsigned HasExplicitAmount = false) {
1830  auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1831  Op->Reg.RegNum = RegNum;
1832  Op->Reg.Kind = Kind;
1833  Op->Reg.ElementWidth = 0;
1834  Op->Reg.EqualityTy = EqTy;
1835  Op->Reg.ShiftExtend.Type = ExtTy;
1836  Op->Reg.ShiftExtend.Amount = ShiftAmount;
1837  Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1838  Op->StartLoc = S;
1839  Op->EndLoc = E;
1840  return Op;
1841  }
1842 
1843  static std::unique_ptr<AArch64Operand>
1844  CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1845  SMLoc S, SMLoc E, MCContext &Ctx,
1847  unsigned ShiftAmount = 0,
1848  unsigned HasExplicitAmount = false) {
1849  assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
1850  Kind == RegKind::SVEPredicateVector) &&
1851  "Invalid vector kind");
1852  auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1853  HasExplicitAmount);
1854  Op->Reg.ElementWidth = ElementWidth;
1855  return Op;
1856  }
1857 
1858  static std::unique_ptr<AArch64Operand>
1859  CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1860  unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1861  MCContext &Ctx) {
1862  auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1863  Op->VectorList.RegNum = RegNum;
1864  Op->VectorList.Count = Count;
1865  Op->VectorList.NumElements = NumElements;
1866  Op->VectorList.ElementWidth = ElementWidth;
1867  Op->VectorList.RegisterKind = RegisterKind;
1868  Op->StartLoc = S;
1869  Op->EndLoc = E;
1870  return Op;
1871  }
1872 
1873  static std::unique_ptr<AArch64Operand>
1874  CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1875  auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1876  Op->VectorIndex.Val = Idx;
1877  Op->StartLoc = S;
1878  Op->EndLoc = E;
1879  return Op;
1880  }
1881 
1882  static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1883  SMLoc E, MCContext &Ctx) {
1884  auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1885  Op->Imm.Val = Val;
1886  Op->StartLoc = S;
1887  Op->EndLoc = E;
1888  return Op;
1889  }
1890 
1891  static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1892  unsigned ShiftAmount,
1893  SMLoc S, SMLoc E,
1894  MCContext &Ctx) {
1895  auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1896  Op->ShiftedImm .Val = Val;
1897  Op->ShiftedImm.ShiftAmount = ShiftAmount;
1898  Op->StartLoc = S;
1899  Op->EndLoc = E;
1900  return Op;
1901  }
1902 
1903  static std::unique_ptr<AArch64Operand>
1904  CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1905  auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1906  Op->CondCode.Code = Code;
1907  Op->StartLoc = S;
1908  Op->EndLoc = E;
1909  return Op;
1910  }
1911 
1912  static std::unique_ptr<AArch64Operand>
1913  CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1914  auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1915  Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1916  Op->FPImm.IsExact = IsExact;
1917  Op->StartLoc = S;
1918  Op->EndLoc = S;
1919  return Op;
1920  }
1921 
1922  static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1923  StringRef Str,
1924  SMLoc S,
1925  MCContext &Ctx) {
1926  auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1927  Op->Barrier.Val = Val;
1928  Op->Barrier.Data = Str.data();
1929  Op->Barrier.Length = Str.size();
1930  Op->StartLoc = S;
1931  Op->EndLoc = S;
1932  return Op;
1933  }
1934 
1935  static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1936  uint32_t MRSReg,
1937  uint32_t MSRReg,
1938  uint32_t PStateField,
1939  MCContext &Ctx) {
1940  auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1941  Op->SysReg.Data = Str.data();
1942  Op->SysReg.Length = Str.size();
1943  Op->SysReg.MRSReg = MRSReg;
1944  Op->SysReg.MSRReg = MSRReg;
1945  Op->SysReg.PStateField = PStateField;
1946  Op->StartLoc = S;
1947  Op->EndLoc = S;
1948  return Op;
1949  }
1950 
1951  static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1952  SMLoc E, MCContext &Ctx) {
1953  auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1954  Op->SysCRImm.Val = Val;
1955  Op->StartLoc = S;
1956  Op->EndLoc = E;
1957  return Op;
1958  }
1959 
1960  static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1961  StringRef Str,
1962  SMLoc S,
1963  MCContext &Ctx) {
1964  auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1965  Op->Prefetch.Val = Val;
1966  Op->Barrier.Data = Str.data();
1967  Op->Barrier.Length = Str.size();
1968  Op->StartLoc = S;
1969  Op->EndLoc = S;
1970  return Op;
1971  }
1972 
1973  static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1974  StringRef Str,
1975  SMLoc S,
1976  MCContext &Ctx) {
1977  auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1978  Op->PSBHint.Val = Val;
1979  Op->PSBHint.Data = Str.data();
1980  Op->PSBHint.Length = Str.size();
1981  Op->StartLoc = S;
1982  Op->EndLoc = S;
1983  return Op;
1984  }
1985 
1986  static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
1987  StringRef Str,
1988  SMLoc S,
1989  MCContext &Ctx) {
1990  auto Op = make_unique<AArch64Operand>(k_BTIHint, Ctx);
1991  Op->BTIHint.Val = Val << 1 | 32;
1992  Op->BTIHint.Data = Str.data();
1993  Op->BTIHint.Length = Str.size();
1994  Op->StartLoc = S;
1995  Op->EndLoc = S;
1996  return Op;
1997  }
1998 
1999  static std::unique_ptr<AArch64Operand>
2000  CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2001  bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2002  auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2003  Op->ShiftExtend.Type = ShOp;
2004  Op->ShiftExtend.Amount = Val;
2005  Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2006  Op->StartLoc = S;
2007  Op->EndLoc = E;
2008  return Op;
2009  }
2010 };
2011 
2012 } // end anonymous namespace.
2013 
2014 void AArch64Operand::print(raw_ostream &OS) const {
2015  switch (Kind) {
2016  case k_FPImm:
2017  OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2018  if (!getFPImmIsExact())
2019  OS << " (inexact)";
2020  OS << ">";
2021  break;
2022  case k_Barrier: {
2023  StringRef Name = getBarrierName();
2024  if (!Name.empty())
2025  OS << "<barrier " << Name << ">";
2026  else
2027  OS << "<barrier invalid #" << getBarrier() << ">";
2028  break;
2029  }
2030  case k_Immediate:
2031  OS << *getImm();
2032  break;
2033  case k_ShiftedImm: {
2034  unsigned Shift = getShiftedImmShift();
2035  OS << "<shiftedimm ";
2036  OS << *getShiftedImmVal();
2037  OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2038  break;
2039  }
2040  case k_CondCode:
2041  OS << "<condcode " << getCondCode() << ">";
2042  break;
2043  case k_VectorList: {
2044  OS << "<vectorlist ";
2045  unsigned Reg = getVectorListStart();
2046  for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2047  OS << Reg + i << " ";
2048  OS << ">";
2049  break;
2050  }
2051  case k_VectorIndex:
2052  OS << "<vectorindex " << getVectorIndex() << ">";
2053  break;
2054  case k_SysReg:
2055  OS << "<sysreg: " << getSysReg() << '>';
2056  break;
2057  case k_Token:
2058  OS << "'" << getToken() << "'";
2059  break;
2060  case k_SysCR:
2061  OS << "c" << getSysCR();
2062  break;
2063  case k_Prefetch: {
2064  StringRef Name = getPrefetchName();
2065  if (!Name.empty())
2066  OS << "<prfop " << Name << ">";
2067  else
2068  OS << "<prfop invalid #" << getPrefetch() << ">";
2069  break;
2070  }
2071  case k_PSBHint:
2072  OS << getPSBHintName();
2073  break;
2074  case k_Register:
2075  OS << "<register " << getReg() << ">";
2076  if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2077  break;
2079  case k_BTIHint:
2080  OS << getBTIHintName();
2081  break;
2082  case k_ShiftExtend:
2083  OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2084  << getShiftExtendAmount();
2085  if (!hasShiftExtendAmount())
2086  OS << "<imp>";
2087  OS << '>';
2088  break;
2089  }
2090 }
2091 
2092 /// @name Auto-generated Match Functions
2093 /// {
2094 
2095 static unsigned MatchRegisterName(StringRef Name);
2096 
2097 /// }
2098 
2099 static unsigned MatchNeonVectorRegName(StringRef Name) {
2100  return StringSwitch<unsigned>(Name.lower())
2101  .Case("v0", AArch64::Q0)
2102  .Case("v1", AArch64::Q1)
2103  .Case("v2", AArch64::Q2)
2104  .Case("v3", AArch64::Q3)
2105  .Case("v4", AArch64::Q4)
2106  .Case("v5", AArch64::Q5)
2107  .Case("v6", AArch64::Q6)
2108  .Case("v7", AArch64::Q7)
2109  .Case("v8", AArch64::Q8)
2110  .Case("v9", AArch64::Q9)
2111  .Case("v10", AArch64::Q10)
2112  .Case("v11", AArch64::Q11)
2113  .Case("v12", AArch64::Q12)
2114  .Case("v13", AArch64::Q13)
2115  .Case("v14", AArch64::Q14)
2116  .Case("v15", AArch64::Q15)
2117  .Case("v16", AArch64::Q16)
2118  .Case("v17", AArch64::Q17)
2119  .Case("v18", AArch64::Q18)
2120  .Case("v19", AArch64::Q19)
2121  .Case("v20", AArch64::Q20)
2122  .Case("v21", AArch64::Q21)
2123  .Case("v22", AArch64::Q22)
2124  .Case("v23", AArch64::Q23)
2125  .Case("v24", AArch64::Q24)
2126  .Case("v25", AArch64::Q25)
2127  .Case("v26", AArch64::Q26)
2128  .Case("v27", AArch64::Q27)
2129  .Case("v28", AArch64::Q28)
2130  .Case("v29", AArch64::Q29)
2131  .Case("v30", AArch64::Q30)
2132  .Case("v31", AArch64::Q31)
2133  .Default(0);
2134 }
2135 
2136 /// Returns an optional pair of (#elements, element-width) if Suffix
2137 /// is a valid vector kind. Where the number of elements in a vector
2138 /// or the vector width is implicit or explicitly unknown (but still a
2139 /// valid suffix kind), 0 is used.
2140 static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2141  RegKind VectorKind) {
2142  std::pair<int, int> Res = {-1, -1};
2143 
2144  switch (VectorKind) {
2145  case RegKind::NeonVector:
2146  Res =
2147  StringSwitch<std::pair<int, int>>(Suffix.lower())
2148  .Case("", {0, 0})
2149  .Case(".1d", {1, 64})
2150  .Case(".1q", {1, 128})
2151  // '.2h' needed for fp16 scalar pairwise reductions
2152  .Case(".2h", {2, 16})
2153  .Case(".2s", {2, 32})
2154  .Case(".2d", {2, 64})
2155  // '.4b' is another special case for the ARMv8.2a dot product
2156  // operand
2157  .Case(".4b", {4, 8})
2158  .Case(".4h", {4, 16})
2159  .Case(".4s", {4, 32})
2160  .Case(".8b", {8, 8})
2161  .Case(".8h", {8, 16})
2162  .Case(".16b", {16, 8})
2163  // Accept the width neutral ones, too, for verbose syntax. If those
2164  // aren't used in the right places, the token operand won't match so
2165  // all will work out.
2166  .Case(".b", {0, 8})
2167  .Case(".h", {0, 16})
2168  .Case(".s", {0, 32})
2169  .Case(".d", {0, 64})
2170  .Default({-1, -1});
2171  break;
2172  case RegKind::SVEPredicateVector:
2173  case RegKind::SVEDataVector:
2174  Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2175  .Case("", {0, 0})
2176  .Case(".b", {0, 8})
2177  .Case(".h", {0, 16})
2178  .Case(".s", {0, 32})
2179  .Case(".d", {0, 64})
2180  .Case(".q", {0, 128})
2181  .Default({-1, -1});
2182  break;
2183  default:
2184  llvm_unreachable("Unsupported RegKind");
2185  }
2186 
2187  if (Res == std::make_pair(-1, -1))
2188  return Optional<std::pair<int, int>>();
2189 
2190  return Optional<std::pair<int, int>>(Res);
2191 }
2192 
2193 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2194  return parseVectorKind(Suffix, VectorKind).hasValue();
2195 }
2196 
2197 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2198  return StringSwitch<unsigned>(Name.lower())
2199  .Case("z0", AArch64::Z0)
2200  .Case("z1", AArch64::Z1)
2201  .Case("z2", AArch64::Z2)
2202  .Case("z3", AArch64::Z3)
2203  .Case("z4", AArch64::Z4)
2204  .Case("z5", AArch64::Z5)
2205  .Case("z6", AArch64::Z6)
2206  .Case("z7", AArch64::Z7)
2207  .Case("z8", AArch64::Z8)
2208  .Case("z9", AArch64::Z9)
2209  .Case("z10", AArch64::Z10)
2210  .Case("z11", AArch64::Z11)
2211  .Case("z12", AArch64::Z12)
2212  .Case("z13", AArch64::Z13)
2213  .Case("z14", AArch64::Z14)
2214  .Case("z15", AArch64::Z15)
2215  .Case("z16", AArch64::Z16)
2216  .Case("z17", AArch64::Z17)
2217  .Case("z18", AArch64::Z18)
2218  .Case("z19", AArch64::Z19)
2219  .Case("z20", AArch64::Z20)
2220  .Case("z21", AArch64::Z21)
2221  .Case("z22", AArch64::Z22)
2222  .Case("z23", AArch64::Z23)
2223  .Case("z24", AArch64::Z24)
2224  .Case("z25", AArch64::Z25)
2225  .Case("z26", AArch64::Z26)
2226  .Case("z27", AArch64::Z27)
2227  .Case("z28", AArch64::Z28)
2228  .Case("z29", AArch64::Z29)
2229  .Case("z30", AArch64::Z30)
2230  .Case("z31", AArch64::Z31)
2231  .Default(0);
2232 }
2233 
2234 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2235  return StringSwitch<unsigned>(Name.lower())
2236  .Case("p0", AArch64::P0)
2237  .Case("p1", AArch64::P1)
2238  .Case("p2", AArch64::P2)
2239  .Case("p3", AArch64::P3)
2240  .Case("p4", AArch64::P4)
2241  .Case("p5", AArch64::P5)
2242  .Case("p6", AArch64::P6)
2243  .Case("p7", AArch64::P7)
2244  .Case("p8", AArch64::P8)
2245  .Case("p9", AArch64::P9)
2246  .Case("p10", AArch64::P10)
2247  .Case("p11", AArch64::P11)
2248  .Case("p12", AArch64::P12)
2249  .Case("p13", AArch64::P13)
2250  .Case("p14", AArch64::P14)
2251  .Case("p15", AArch64::P15)
2252  .Default(0);
2253 }
2254 
2255 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2256  SMLoc &EndLoc) {
2257  StartLoc = getLoc();
2258  auto Res = tryParseScalarRegister(RegNo);
2259  EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2260  return Res != MatchOperand_Success;
2261 }
2262 
2263 // Matches a register name or register alias previously defined by '.req'
2264 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2265  RegKind Kind) {
2266  unsigned RegNum = 0;
2267  if ((RegNum = matchSVEDataVectorRegName(Name)))
2268  return Kind == RegKind::SVEDataVector ? RegNum : 0;
2269 
2270  if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2271  return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2272 
2273  if ((RegNum = MatchNeonVectorRegName(Name)))
2274  return Kind == RegKind::NeonVector ? RegNum : 0;
2275 
2276  // The parsed register must be of RegKind Scalar
2277  if ((RegNum = MatchRegisterName(Name)))
2278  return Kind == RegKind::Scalar ? RegNum : 0;
2279 
2280  if (!RegNum) {
2281  // Handle a few common aliases of registers.
2282  if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2283  .Case("fp", AArch64::FP)
2284  .Case("lr", AArch64::LR)
2285  .Case("x31", AArch64::XZR)
2286  .Case("w31", AArch64::WZR)
2287  .Default(0))
2288  return Kind == RegKind::Scalar ? RegNum : 0;
2289 
2290  // Check for aliases registered via .req. Canonicalize to lower case.
2291  // That's more consistent since register names are case insensitive, and
2292  // it's how the original entry was passed in from MC/MCParser/AsmParser.
2293  auto Entry = RegisterReqs.find(Name.lower());
2294  if (Entry == RegisterReqs.end())
2295  return 0;
2296 
2297  // set RegNum if the match is the right kind of register
2298  if (Kind == Entry->getValue().first)
2299  RegNum = Entry->getValue().second;
2300  }
2301  return RegNum;
2302 }
2303 
2304 /// tryParseScalarRegister - Try to parse a register name. The token must be an
2305 /// Identifier when called, and if it is a register name the token is eaten and
2306 /// the register is added to the operand list.
2308 AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2309  MCAsmParser &Parser = getParser();
2310  const AsmToken &Tok = Parser.getTok();
2311  if (Tok.isNot(AsmToken::Identifier))
2312  return MatchOperand_NoMatch;
2313 
2314  std::string lowerCase = Tok.getString().lower();
2315  unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2316  if (Reg == 0)
2317  return MatchOperand_NoMatch;
2318 
2319  RegNum = Reg;
2320  Parser.Lex(); // Eat identifier token.
2321  return MatchOperand_Success;
2322 }
2323 
2324 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2326 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2327  MCAsmParser &Parser = getParser();
2328  SMLoc S = getLoc();
2329 
2330  if (Parser.getTok().isNot(AsmToken::Identifier)) {
2331  Error(S, "Expected cN operand where 0 <= N <= 15");
2332  return MatchOperand_ParseFail;
2333  }
2334 
2335  StringRef Tok = Parser.getTok().getIdentifier();
2336  if (Tok[0] != 'c' && Tok[0] != 'C') {
2337  Error(S, "Expected cN operand where 0 <= N <= 15");
2338  return MatchOperand_ParseFail;
2339  }
2340 
2341  uint32_t CRNum;
2342  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2343  if (BadNum || CRNum > 15) {
2344  Error(S, "Expected cN operand where 0 <= N <= 15");
2345  return MatchOperand_ParseFail;
2346  }
2347 
2348  Parser.Lex(); // Eat identifier token.
2349  Operands.push_back(
2350  AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2351  return MatchOperand_Success;
2352 }
2353 
2354 /// tryParsePrefetch - Try to parse a prefetch operand.
2355 template <bool IsSVEPrefetch>
2357 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2358  MCAsmParser &Parser = getParser();
2359  SMLoc S = getLoc();
2360  const AsmToken &Tok = Parser.getTok();
2361 
2362  auto LookupByName = [](StringRef N) {
2363  if (IsSVEPrefetch) {
2364  if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2365  return Optional<unsigned>(Res->Encoding);
2366  } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2367  return Optional<unsigned>(Res->Encoding);
2368  return Optional<unsigned>();
2369  };
2370 
2371  auto LookupByEncoding = [](unsigned E) {
2372  if (IsSVEPrefetch) {
2373  if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2374  return Optional<StringRef>(Res->Name);
2375  } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2376  return Optional<StringRef>(Res->Name);
2377  return Optional<StringRef>();
2378  };
2379  unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2380 
2381  // Either an identifier for named values or a 5-bit immediate.
2382  // Eat optional hash.
2383  if (parseOptionalToken(AsmToken::Hash) ||
2384  Tok.is(AsmToken::Integer)) {
2385  const MCExpr *ImmVal;
2386  if (getParser().parseExpression(ImmVal))
2387  return MatchOperand_ParseFail;
2388 
2389  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2390  if (!MCE) {
2391  TokError("immediate value expected for prefetch operand");
2392  return MatchOperand_ParseFail;
2393  }
2394  unsigned prfop = MCE->getValue();
2395  if (prfop > MaxVal) {
2396  TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2397  "] expected");
2398  return MatchOperand_ParseFail;
2399  }
2400 
2401  auto PRFM = LookupByEncoding(MCE->getValue());
2402  Operands.push_back(AArch64Operand::CreatePrefetch(
2403  prfop, PRFM.getValueOr(""), S, getContext()));
2404  return MatchOperand_Success;
2405  }
2406 
2407  if (Tok.isNot(AsmToken::Identifier)) {
2408  TokError("prefetch hint expected");
2409  return MatchOperand_ParseFail;
2410  }
2411 
2412  auto PRFM = LookupByName(Tok.getString());
2413  if (!PRFM) {
2414  TokError("prefetch hint expected");
2415  return MatchOperand_ParseFail;
2416  }
2417 
2418  Parser.Lex(); // Eat identifier token.
2419  Operands.push_back(AArch64Operand::CreatePrefetch(
2420  *PRFM, Tok.getString(), S, getContext()));
2421  return MatchOperand_Success;
2422 }
2423 
2424 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2426 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2427  MCAsmParser &Parser = getParser();
2428  SMLoc S = getLoc();
2429  const AsmToken &Tok = Parser.getTok();
2430  if (Tok.isNot(AsmToken::Identifier)) {
2431  TokError("invalid operand for instruction");
2432  return MatchOperand_ParseFail;
2433  }
2434 
2435  auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2436  if (!PSB) {
2437  TokError("invalid operand for instruction");
2438  return MatchOperand_ParseFail;
2439  }
2440 
2441  Parser.Lex(); // Eat identifier token.
2442  Operands.push_back(AArch64Operand::CreatePSBHint(
2443  PSB->Encoding, Tok.getString(), S, getContext()));
2444  return MatchOperand_Success;
2445 }
2446 
2447 /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2449 AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2450  MCAsmParser &Parser = getParser();
2451  SMLoc S = getLoc();
2452  const AsmToken &Tok = Parser.getTok();
2453  if (Tok.isNot(AsmToken::Identifier)) {
2454  TokError("invalid operand for instruction");
2455  return MatchOperand_ParseFail;
2456  }
2457 
2458  auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2459  if (!BTI) {
2460  TokError("invalid operand for instruction");
2461  return MatchOperand_ParseFail;
2462  }
2463 
2464  Parser.Lex(); // Eat identifier token.
2465  Operands.push_back(AArch64Operand::CreateBTIHint(
2466  BTI->Encoding, Tok.getString(), S, getContext()));
2467  return MatchOperand_Success;
2468 }
2469 
2470 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2471 /// instruction.
2473 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2474  MCAsmParser &Parser = getParser();
2475  SMLoc S = getLoc();
2476  const MCExpr *Expr;
2477 
2478  if (Parser.getTok().is(AsmToken::Hash)) {
2479  Parser.Lex(); // Eat hash token.
2480  }
2481 
2482  if (parseSymbolicImmVal(Expr))
2483  return MatchOperand_ParseFail;
2484 
2485  AArch64MCExpr::VariantKind ELFRefKind;
2486  MCSymbolRefExpr::VariantKind DarwinRefKind;
2487  int64_t Addend;
2488  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2489  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2490  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2491  // No modifier was specified at all; this is the syntax for an ELF basic
2492  // ADRP relocation (unfortunately).
2493  Expr =
2495  } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2496  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2497  Addend != 0) {
2498  Error(S, "gotpage label reference not allowed an addend");
2499  return MatchOperand_ParseFail;
2500  } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2501  DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2502  DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2503  ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2504  ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2505  ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2506  // The operand must be an @page or @gotpage qualified symbolref.
2507  Error(S, "page or gotpage label reference expected");
2508  return MatchOperand_ParseFail;
2509  }
2510  }
2511 
2512  // We have either a label reference possibly with addend or an immediate. The
2513  // addend is a raw value here. The linker will adjust it to only reference the
2514  // page.
2515  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2516  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2517 
2518  return MatchOperand_Success;
2519 }
2520 
2521 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2522 /// instruction.
2524 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2525  SMLoc S = getLoc();
2526  const MCExpr *Expr;
2527 
2528  // Leave anything with a bracket to the default for SVE
2529  if (getParser().getTok().is(AsmToken::LBrac))
2530  return MatchOperand_NoMatch;
2531 
2532  if (getParser().getTok().is(AsmToken::Hash))
2533  getParser().Lex(); // Eat hash token.
2534 
2535  if (parseSymbolicImmVal(Expr))
2536  return MatchOperand_ParseFail;
2537 
2538  AArch64MCExpr::VariantKind ELFRefKind;
2539  MCSymbolRefExpr::VariantKind DarwinRefKind;
2540  int64_t Addend;
2541  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2542  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2543  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2544  // No modifier was specified at all; this is the syntax for an ELF basic
2545  // ADR relocation (unfortunately).
2546  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2547  } else {
2548  Error(S, "unexpected adr label");
2549  return MatchOperand_ParseFail;
2550  }
2551  }
2552 
2553  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2554  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2555  return MatchOperand_Success;
2556 }
2557 
2558 /// tryParseFPImm - A floating point immediate expression operand.
2559 template<bool AddFPZeroAsLiteral>
2561 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2562  MCAsmParser &Parser = getParser();
2563  SMLoc S = getLoc();
2564 
2565  bool Hash = parseOptionalToken(AsmToken::Hash);
2566 
2567  // Handle negation, as that still comes through as a separate token.
2568  bool isNegative = parseOptionalToken(AsmToken::Minus);
2569 
2570  const AsmToken &Tok = Parser.getTok();
2571  if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2572  if (!Hash)
2573  return MatchOperand_NoMatch;
2574  TokError("invalid floating point immediate");
2575  return MatchOperand_ParseFail;
2576  }
2577 
2578  // Parse hexadecimal representation.
2579  if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2580  if (Tok.getIntVal() > 255 || isNegative) {
2581  TokError("encoded floating point value out of range");
2582  return MatchOperand_ParseFail;
2583  }
2584 
2585  APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2586  Operands.push_back(
2587  AArch64Operand::CreateFPImm(F, true, S, getContext()));
2588  } else {
2589  // Parse FP representation.
2590  APFloat RealVal(APFloat::IEEEdouble());
2591  auto Status =
2593  if (isNegative)
2594  RealVal.changeSign();
2595 
2596  if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2597  Operands.push_back(
2598  AArch64Operand::CreateToken("#0", false, S, getContext()));
2599  Operands.push_back(
2600  AArch64Operand::CreateToken(".0", false, S, getContext()));
2601  } else
2602  Operands.push_back(AArch64Operand::CreateFPImm(
2603  RealVal, Status == APFloat::opOK, S, getContext()));
2604  }
2605 
2606  Parser.Lex(); // Eat the token.
2607 
2608  return MatchOperand_Success;
2609 }
2610 
2611 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2612 /// a shift suffix, for example '#1, lsl #12'.
2614 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2615  MCAsmParser &Parser = getParser();
2616  SMLoc S = getLoc();
2617 
2618  if (Parser.getTok().is(AsmToken::Hash))
2619  Parser.Lex(); // Eat '#'
2620  else if (Parser.getTok().isNot(AsmToken::Integer))
2621  // Operand should start from # or should be integer, emit error otherwise.
2622  return MatchOperand_NoMatch;
2623 
2624  const MCExpr *Imm;
2625  if (parseSymbolicImmVal(Imm))
2626  return MatchOperand_ParseFail;
2627  else if (Parser.getTok().isNot(AsmToken::Comma)) {
2628  SMLoc E = Parser.getTok().getLoc();
2629  Operands.push_back(
2630  AArch64Operand::CreateImm(Imm, S, E, getContext()));
2631  return MatchOperand_Success;
2632  }
2633 
2634  // Eat ','
2635  Parser.Lex();
2636 
2637  // The optional operand must be "lsl #N" where N is non-negative.
2638  if (!Parser.getTok().is(AsmToken::Identifier) ||
2639  !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2640  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2641  return MatchOperand_ParseFail;
2642  }
2643 
2644  // Eat 'lsl'
2645  Parser.Lex();
2646 
2647  parseOptionalToken(AsmToken::Hash);
2648 
2649  if (Parser.getTok().isNot(AsmToken::Integer)) {
2650  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2651  return MatchOperand_ParseFail;
2652  }
2653 
2654  int64_t ShiftAmount = Parser.getTok().getIntVal();
2655 
2656  if (ShiftAmount < 0) {
2657  Error(Parser.getTok().getLoc(), "positive shift amount required");
2658  return MatchOperand_ParseFail;
2659  }
2660  Parser.Lex(); // Eat the number
2661 
2662  // Just in case the optional lsl #0 is used for immediates other than zero.
2663  if (ShiftAmount == 0 && Imm != 0) {
2664  SMLoc E = Parser.getTok().getLoc();
2665  Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2666  return MatchOperand_Success;
2667  }
2668 
2669  SMLoc E = Parser.getTok().getLoc();
2670  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2671  S, E, getContext()));
2672  return MatchOperand_Success;
2673 }
2674 
2675 /// parseCondCodeString - Parse a Condition Code string.
2676 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2678  .Case("eq", AArch64CC::EQ)
2679  .Case("ne", AArch64CC::NE)
2680  .Case("cs", AArch64CC::HS)
2681  .Case("hs", AArch64CC::HS)
2682  .Case("cc", AArch64CC::LO)
2683  .Case("lo", AArch64CC::LO)
2684  .Case("mi", AArch64CC::MI)
2685  .Case("pl", AArch64CC::PL)
2686  .Case("vs", AArch64CC::VS)
2687  .Case("vc", AArch64CC::VC)
2688  .Case("hi", AArch64CC::HI)
2689  .Case("ls", AArch64CC::LS)
2690  .Case("ge", AArch64CC::GE)
2691  .Case("lt", AArch64CC::LT)
2692  .Case("gt", AArch64CC::GT)
2693  .Case("le", AArch64CC::LE)
2694  .Case("al", AArch64CC::AL)
2695  .Case("nv", AArch64CC::NV)
2697 
2698  if (CC == AArch64CC::Invalid &&
2699  getSTI().getFeatureBits()[AArch64::FeatureSVE])
2701  .Case("none", AArch64CC::EQ)
2702  .Case("any", AArch64CC::NE)
2703  .Case("nlast", AArch64CC::HS)
2704  .Case("last", AArch64CC::LO)
2705  .Case("first", AArch64CC::MI)
2706  .Case("nfrst", AArch64CC::PL)
2707  .Case("pmore", AArch64CC::HI)
2708  .Case("plast", AArch64CC::LS)
2709  .Case("tcont", AArch64CC::GE)
2710  .Case("tstop", AArch64CC::LT)
2712 
2713  return CC;
2714 }
2715 
2716 /// parseCondCode - Parse a Condition Code operand.
2717 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2718  bool invertCondCode) {
2719  MCAsmParser &Parser = getParser();
2720  SMLoc S = getLoc();
2721  const AsmToken &Tok = Parser.getTok();
2722  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2723 
2724  StringRef Cond = Tok.getString();
2725  AArch64CC::CondCode CC = parseCondCodeString(Cond);
2726  if (CC == AArch64CC::Invalid)
2727  return TokError("invalid condition code");
2728  Parser.Lex(); // Eat identifier token.
2729 
2730  if (invertCondCode) {
2731  if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2732  return TokError("condition codes AL and NV are invalid for this instruction");
2734  }
2735 
2736  Operands.push_back(
2737  AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2738  return false;
2739 }
2740 
2741 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2742 /// them if present.
2744 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2745  MCAsmParser &Parser = getParser();
2746  const AsmToken &Tok = Parser.getTok();
2747  std::string LowerID = Tok.getString().lower();
2750  .Case("lsl", AArch64_AM::LSL)
2751  .Case("lsr", AArch64_AM::LSR)
2752  .Case("asr", AArch64_AM::ASR)
2753  .Case("ror", AArch64_AM::ROR)
2754  .Case("msl", AArch64_AM::MSL)
2755  .Case("uxtb", AArch64_AM::UXTB)
2756  .Case("uxth", AArch64_AM::UXTH)
2757  .Case("uxtw", AArch64_AM::UXTW)
2758  .Case("uxtx", AArch64_AM::UXTX)
2759  .Case("sxtb", AArch64_AM::SXTB)
2760  .Case("sxth", AArch64_AM::SXTH)
2761  .Case("sxtw", AArch64_AM::SXTW)
2762  .Case("sxtx", AArch64_AM::SXTX)
2764 
2765  if (ShOp == AArch64_AM::InvalidShiftExtend)
2766  return MatchOperand_NoMatch;
2767 
2768  SMLoc S = Tok.getLoc();
2769  Parser.Lex();
2770 
2771  bool Hash = parseOptionalToken(AsmToken::Hash);
2772 
2773  if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2774  if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2775  ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2776  ShOp == AArch64_AM::MSL) {
2777  // We expect a number here.
2778  TokError("expected #imm after shift specifier");
2779  return MatchOperand_ParseFail;
2780  }
2781 
2782  // "extend" type operations don't need an immediate, #0 is implicit.
2783  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2784  Operands.push_back(
2785  AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2786  return MatchOperand_Success;
2787  }
2788 
2789  // Make sure we do actually have a number, identifier or a parenthesized
2790  // expression.
2791  SMLoc E = Parser.getTok().getLoc();
2792  if (!Parser.getTok().is(AsmToken::Integer) &&
2793  !Parser.getTok().is(AsmToken::LParen) &&
2794  !Parser.getTok().is(AsmToken::Identifier)) {
2795  Error(E, "expected integer shift amount");
2796  return MatchOperand_ParseFail;
2797  }
2798 
2799  const MCExpr *ImmVal;
2800  if (getParser().parseExpression(ImmVal))
2801  return MatchOperand_ParseFail;
2802 
2803  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2804  if (!MCE) {
2805  Error(E, "expected constant '#imm' after shift specifier");
2806  return MatchOperand_ParseFail;
2807  }
2808 
2809  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2810  Operands.push_back(AArch64Operand::CreateShiftExtend(
2811  ShOp, MCE->getValue(), true, S, E, getContext()));
2812  return MatchOperand_Success;
2813 }
2814 
2815 static const struct Extension {
2816  const char *Name;
2818 } ExtensionMap[] = {
2819  {"crc", {AArch64::FeatureCRC}},
2820  {"sm4", {AArch64::FeatureSM4}},
2821  {"sha3", {AArch64::FeatureSHA3}},
2822  {"sha2", {AArch64::FeatureSHA2}},
2823  {"aes", {AArch64::FeatureAES}},
2824  {"crypto", {AArch64::FeatureCrypto}},
2825  {"fp", {AArch64::FeatureFPARMv8}},
2826  {"simd", {AArch64::FeatureNEON}},
2827  {"ras", {AArch64::FeatureRAS}},
2828  {"lse", {AArch64::FeatureLSE}},
2829  {"predres", {AArch64::FeaturePredRes}},
2830  {"ccdp", {AArch64::FeatureCacheDeepPersist}},
2831  {"mte", {AArch64::FeatureMTE}},
2832  {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
2833  {"pan-rwv", {AArch64::FeaturePAN_RWV}},
2834  {"ccpp", {AArch64::FeatureCCPP}},
2835  {"sve", {AArch64::FeatureSVE}},
2836  // FIXME: Unsupported extensions
2837  {"pan", {}},
2838  {"lor", {}},
2839  {"rdma", {}},
2840  {"profile", {}},
2841 };
2842 
2843 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2844  if (FBS[AArch64::HasV8_1aOps])
2845  Str += "ARMv8.1a";
2846  else if (FBS[AArch64::HasV8_2aOps])
2847  Str += "ARMv8.2a";
2848  else if (FBS[AArch64::HasV8_3aOps])
2849  Str += "ARMv8.3a";
2850  else if (FBS[AArch64::HasV8_4aOps])
2851  Str += "ARMv8.4a";
2852  else if (FBS[AArch64::HasV8_5aOps])
2853  Str += "ARMv8.5a";
2854  else {
2855  auto ext = std::find_if(std::begin(ExtensionMap),
2857  [&](const Extension& e)
2858  // Use & in case multiple features are enabled
2859  { return (FBS & e.Features) != FeatureBitset(); }
2860  );
2861 
2862  Str += ext != std::end(ExtensionMap) ? ext->Name : "(unknown)";
2863  }
2864 }
2865 
2866 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2867  SMLoc S) {
2868  const uint16_t Op2 = Encoding & 7;
2869  const uint16_t Cm = (Encoding & 0x78) >> 3;
2870  const uint16_t Cn = (Encoding & 0x780) >> 7;
2871  const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2872 
2873  const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2874 
2875  Operands.push_back(
2876  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2877  Operands.push_back(
2878  AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2879  Operands.push_back(
2880  AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2881  Expr = MCConstantExpr::create(Op2, getContext());
2882  Operands.push_back(
2883  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2884 }
2885 
2886 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2887 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2888 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2889  OperandVector &Operands) {
2890  if (Name.find('.') != StringRef::npos)
2891  return TokError("invalid operand");
2892 
2893  Mnemonic = Name;
2894  Operands.push_back(
2895  AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2896 
2897  MCAsmParser &Parser = getParser();
2898  const AsmToken &Tok = Parser.getTok();
2899  StringRef Op = Tok.getString();
2900  SMLoc S = Tok.getLoc();
2901 
2902  if (Mnemonic == "ic") {
2903  const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2904  if (!IC)
2905  return TokError("invalid operand for IC instruction");
2906  else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2907  std::string Str("IC " + std::string(IC->Name) + " requires ");
2909  return TokError(Str.c_str());
2910  }
2911  createSysAlias(IC->Encoding, Operands, S);
2912  } else if (Mnemonic == "dc") {
2913  const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2914  if (!DC)
2915  return TokError("invalid operand for DC instruction");
2916  else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2917  std::string Str("DC " + std::string(DC->Name) + " requires ");
2919  return TokError(Str.c_str());
2920  }
2921  createSysAlias(DC->Encoding, Operands, S);
2922  } else if (Mnemonic == "at") {
2923  const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2924  if (!AT)
2925  return TokError("invalid operand for AT instruction");
2926  else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2927  std::string Str("AT " + std::string(AT->Name) + " requires ");
2929  return TokError(Str.c_str());
2930  }
2931  createSysAlias(AT->Encoding, Operands, S);
2932  } else if (Mnemonic == "tlbi") {
2933  const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2934  if (!TLBI)
2935  return TokError("invalid operand for TLBI instruction");
2936  else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2937  std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2939  return TokError(Str.c_str());
2940  }
2941  createSysAlias(TLBI->Encoding, Operands, S);
2942  } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
2943  const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
2944  if (!PRCTX)
2945  return TokError("invalid operand for prediction restriction instruction");
2946  else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
2947  std::string Str(
2948  Mnemonic.upper() + std::string(PRCTX->Name) + " requires ");
2950  return TokError(Str.c_str());
2951  }
2952  uint16_t PRCTX_Op2 =
2953  Mnemonic == "cfp" ? 4 :
2954  Mnemonic == "dvp" ? 5 :
2955  Mnemonic == "cpp" ? 7 :
2956  0;
2957  assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction");
2958  createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
2959  }
2960 
2961  Parser.Lex(); // Eat operand.
2962 
2963  bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2964  bool HasRegister = false;
2965 
2966  // Check for the optional register operand.
2967  if (parseOptionalToken(AsmToken::Comma)) {
2968  if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2969  return TokError("expected register operand");
2970  HasRegister = true;
2971  }
2972 
2973  if (ExpectRegister && !HasRegister)
2974  return TokError("specified " + Mnemonic + " op requires a register");
2975  else if (!ExpectRegister && HasRegister)
2976  return TokError("specified " + Mnemonic + " op does not use a register");
2977 
2978  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2979  return true;
2980 
2981  return false;
2982 }
2983 
2985 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2986  MCAsmParser &Parser = getParser();
2987  const AsmToken &Tok = Parser.getTok();
2988 
2989  if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
2990  TokError("'csync' operand expected");
2991  return MatchOperand_ParseFail;
2992  // Can be either a #imm style literal or an option name
2993  } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
2994  // Immediate operand.
2995  const MCExpr *ImmVal;
2996  SMLoc ExprLoc = getLoc();
2997  if (getParser().parseExpression(ImmVal))
2998  return MatchOperand_ParseFail;
2999  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3000  if (!MCE) {
3001  Error(ExprLoc, "immediate value expected for barrier operand");
3002  return MatchOperand_ParseFail;
3003  }
3004  if (MCE->getValue() < 0 || MCE->getValue() > 15) {
3005  Error(ExprLoc, "barrier operand out of range");
3006  return MatchOperand_ParseFail;
3007  }
3008  auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
3009  Operands.push_back(AArch64Operand::CreateBarrier(
3010  MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
3011  return MatchOperand_Success;
3012  }
3013 
3014  if (Tok.isNot(AsmToken::Identifier)) {
3015  TokError("invalid operand for instruction");
3016  return MatchOperand_ParseFail;
3017  }
3018 
3019  auto TSB = AArch64TSB::lookupTSBByName(Tok.getString());
3020  // The only valid named option for ISB is 'sy'
3021  auto DB = AArch64DB::lookupDBByName(Tok.getString());
3022  if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3023  TokError("'sy' or #imm operand expected");
3024  return MatchOperand_ParseFail;
3025  // The only valid named option for TSB is 'csync'
3026  } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3027  TokError("'csync' operand expected");
3028  return MatchOperand_ParseFail;
3029  } else if (!DB && !TSB) {
3030  TokError("invalid barrier option name");
3031  return MatchOperand_ParseFail;
3032  }
3033 
3034  Operands.push_back(AArch64Operand::CreateBarrier(
3035  DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext()));
3036  Parser.Lex(); // Consume the option
3037 
3038  return MatchOperand_Success;
3039 }
3040 
3042 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3043  MCAsmParser &Parser = getParser();
3044  const AsmToken &Tok = Parser.getTok();
3045 
3046  if (Tok.isNot(AsmToken::Identifier))
3047  return MatchOperand_NoMatch;
3048 
3049  int MRSReg, MSRReg;
3050  auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3051  if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3052  MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3053  MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3054  } else
3055  MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3056 
3057  auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3058  unsigned PStateImm = -1;
3059  if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3060  PStateImm = PState->Encoding;
3061 
3062  Operands.push_back(
3063  AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3064  PStateImm, getContext()));
3065  Parser.Lex(); // Eat identifier
3066 
3067  return MatchOperand_Success;
3068 }
3069 
3070 /// tryParseNeonVectorRegister - Parse a vector register operand.
3071 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3072  MCAsmParser &Parser = getParser();
3073  if (Parser.getTok().isNot(AsmToken::Identifier))
3074  return true;
3075 
3076  SMLoc S = getLoc();
3077  // Check for a vector register specifier first.
3078  StringRef Kind;
3079  unsigned Reg;
3080  OperandMatchResultTy Res =
3081  tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3082  if (Res != MatchOperand_Success)
3083  return true;
3084 
3085  const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3086  if (!KindRes)
3087  return true;
3088 
3089  unsigned ElementWidth = KindRes->second;
3090  Operands.push_back(
3091  AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3092  S, getLoc(), getContext()));
3093 
3094  // If there was an explicit qualifier, that goes on as a literal text
3095  // operand.
3096  if (!Kind.empty())
3097  Operands.push_back(
3098  AArch64Operand::CreateToken(Kind, false, S, getContext()));
3099 
3100  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3101 }
3102 
3104 AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3105  SMLoc SIdx = getLoc();
3106  if (parseOptionalToken(AsmToken::LBrac)) {
3107  const MCExpr *ImmVal;
3108  if (getParser().parseExpression(ImmVal))
3109  return MatchOperand_NoMatch;
3110  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3111  if (!MCE) {
3112  TokError("immediate value expected for vector index");
3113  return MatchOperand_ParseFail;;
3114  }
3115 
3116  SMLoc E = getLoc();
3117 
3118  if (parseToken(AsmToken::RBrac, "']' expected"))
3119  return MatchOperand_ParseFail;;
3120 
3121  Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3122  E, getContext()));
3123  return MatchOperand_Success;
3124  }
3125 
3126  return MatchOperand_NoMatch;
3127 }
3128 
3129 // tryParseVectorRegister - Try to parse a vector register name with
3130 // optional kind specifier. If it is a register specifier, eat the token
3131 // and return it.
3133 AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3134  RegKind MatchKind) {
3135  MCAsmParser &Parser = getParser();
3136  const AsmToken &Tok = Parser.getTok();
3137 
3138  if (Tok.isNot(AsmToken::Identifier))
3139  return MatchOperand_NoMatch;
3140 
3141  StringRef Name = Tok.getString();
3142  // If there is a kind specifier, it's separated from the register name by
3143  // a '.'.
3144  size_t Start = 0, Next = Name.find('.');
3145  StringRef Head = Name.slice(Start, Next);
3146  unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3147 
3148  if (RegNum) {
3149  if (Next != StringRef::npos) {
3150  Kind = Name.slice(Next, StringRef::npos);
3151  if (!isValidVectorKind(Kind, MatchKind)) {
3152  TokError("invalid vector kind qualifier");
3153  return MatchOperand_ParseFail;
3154  }
3155  }
3156  Parser.Lex(); // Eat the register token.
3157 
3158  Reg = RegNum;
3159  return MatchOperand_Success;
3160  }
3161 
3162  return MatchOperand_NoMatch;
3163 }
3164 
3165 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3167 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3168  // Check for a SVE predicate register specifier first.
3169  const SMLoc S = getLoc();
3170  StringRef Kind;
3171  unsigned RegNum;
3172  auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3173  if (Res != MatchOperand_Success)
3174  return Res;
3175 
3176  const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3177  if (!KindRes)
3178  return MatchOperand_NoMatch;
3179 
3180  unsigned ElementWidth = KindRes->second;
3181  Operands.push_back(AArch64Operand::CreateVectorReg(
3182  RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3183  getLoc(), getContext()));
3184 
3185  // Not all predicates are followed by a '/m' or '/z'.
3186  MCAsmParser &Parser = getParser();
3187  if (Parser.getTok().isNot(AsmToken::Slash))
3188  return MatchOperand_Success;
3189 
3190  // But when they do they shouldn't have an element type suffix.
3191  if (!Kind.empty()) {
3192  Error(S, "not expecting size suffix");
3193  return MatchOperand_ParseFail;
3194  }
3195 
3196  // Add a literal slash as operand
3197  Operands.push_back(
3198  AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3199 
3200  Parser.Lex(); // Eat the slash.
3201 
3202  // Zeroing or merging?
3203  auto Pred = Parser.getTok().getString().lower();
3204  if (Pred != "z" && Pred != "m") {
3205  Error(getLoc(), "expecting 'm' or 'z' predication");
3206  return MatchOperand_ParseFail;
3207  }
3208 
3209  // Add zero/merge token.
3210  const char *ZM = Pred == "z" ? "z" : "m";
3211  Operands.push_back(
3212  AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3213 
3214  Parser.Lex(); // Eat zero/merge token.
3215  return MatchOperand_Success;
3216 }
3217 
3218 /// parseRegister - Parse a register operand.
3219 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3220  // Try for a Neon vector register.
3221  if (!tryParseNeonVectorRegister(Operands))
3222  return false;
3223 
3224  // Otherwise try for a scalar register.
3225  if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3226  return false;
3227 
3228  return true;
3229 }
3230 
3231 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3232  MCAsmParser &Parser = getParser();
3233  bool HasELFModifier = false;
3235 
3236  if (parseOptionalToken(AsmToken::Colon)) {
3237  HasELFModifier = true;
3238 
3239  if (Parser.getTok().isNot(AsmToken::Identifier))
3240  return TokError("expect relocation specifier in operand after ':'");
3241 
3242  std::string LowerCase = Parser.getTok().getIdentifier().lower();
3243  RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3244  .Case("lo12", AArch64MCExpr::VK_LO12)
3245  .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3246  .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3247  .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3248  .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3249  .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3250  .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3251  .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3252  .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3253  .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3254  .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3255  .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3256  .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3257  .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3258  .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3259  .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3260  .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3261  .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3262  .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3263  .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3264  .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3265  .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3266  .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3267  .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3268  .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3269  .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3270  .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3271  .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3273  .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3275  .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3276  .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3277  .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3279  .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3280  .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3282 
3283  if (RefKind == AArch64MCExpr::VK_INVALID)
3284  return TokError("expect relocation specifier in operand after ':'");
3285 
3286  Parser.Lex(); // Eat identifier
3287 
3288  if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3289  return true;
3290  }
3291 
3292  if (getParser().parseExpression(ImmVal))
3293  return true;
3294 
3295  if (HasELFModifier)
3296  ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3297 
3298  return false;
3299 }
3300 
3301 template <RegKind VectorKind>
3303 AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3304  bool ExpectMatch) {
3305  MCAsmParser &Parser = getParser();
3306  if (!Parser.getTok().is(AsmToken::LCurly))
3307  return MatchOperand_NoMatch;
3308 
3309  // Wrapper around parse function
3310  auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3311  bool NoMatchIsError) {
3312  auto RegTok = Parser.getTok();
3313  auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3314  if (ParseRes == MatchOperand_Success) {
3315  if (parseVectorKind(Kind, VectorKind))
3316  return ParseRes;
3317  llvm_unreachable("Expected a valid vector kind");
3318  }
3319 
3320  if (RegTok.isNot(AsmToken::Identifier) ||
3321  ParseRes == MatchOperand_ParseFail ||
3322  (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3323  Error(Loc, "vector register expected");
3324  return MatchOperand_ParseFail;
3325  }
3326 
3327  return MatchOperand_NoMatch;
3328  };
3329 
3330  SMLoc S = getLoc();
3331  auto LCurly = Parser.getTok();
3332  Parser.Lex(); // Eat left bracket token.
3333 
3334  StringRef Kind;
3335  unsigned FirstReg;
3336  auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3337 
3338  // Put back the original left bracket if there was no match, so that
3339  // different types of list-operands can be matched (e.g. SVE, Neon).
3340  if (ParseRes == MatchOperand_NoMatch)
3341  Parser.getLexer().UnLex(LCurly);
3342 
3343  if (ParseRes != MatchOperand_Success)
3344  return ParseRes;
3345 
3346  int64_t PrevReg = FirstReg;
3347  unsigned Count = 1;
3348 
3349  if (parseOptionalToken(AsmToken::Minus)) {
3350  SMLoc Loc = getLoc();
3351  StringRef NextKind;
3352 
3353  unsigned Reg;
3354  ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3355  if (ParseRes != MatchOperand_Success)
3356  return ParseRes;
3357 
3358  // Any Kind suffices must match on all regs in the list.
3359  if (Kind != NextKind) {
3360  Error(Loc, "mismatched register size suffix");
3361  return MatchOperand_ParseFail;
3362  }
3363 
3364  unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3365 
3366  if (Space == 0 || Space > 3) {
3367  Error(Loc, "invalid number of vectors");
3368  return MatchOperand_ParseFail;
3369  }
3370 
3371  Count += Space;
3372  }
3373  else {
3374  while (parseOptionalToken(AsmToken::Comma)) {
3375  SMLoc Loc = getLoc();
3376  StringRef NextKind;
3377  unsigned Reg;
3378  ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3379  if (ParseRes != MatchOperand_Success)
3380  return ParseRes;
3381 
3382  // Any Kind suffices must match on all regs in the list.
3383  if (Kind != NextKind) {
3384  Error(Loc, "mismatched register size suffix");
3385  return MatchOperand_ParseFail;
3386  }
3387 
3388  // Registers must be incremental (with wraparound at 31)
3389  if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3390  (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3391  Error(Loc, "registers must be sequential");
3392  return MatchOperand_ParseFail;
3393  }
3394 
3395  PrevReg = Reg;
3396  ++Count;
3397  }
3398  }
3399 
3400  if (parseToken(AsmToken::RCurly, "'}' expected"))
3401  return MatchOperand_ParseFail;
3402 
3403  if (Count > 4) {
3404  Error(S, "invalid number of vectors");
3405  return MatchOperand_ParseFail;
3406  }
3407 
3408  unsigned NumElements = 0;
3409  unsigned ElementWidth = 0;
3410  if (!Kind.empty()) {
3411  if (const auto &VK = parseVectorKind(Kind, VectorKind))
3412  std::tie(NumElements, ElementWidth) = *VK;
3413  }
3414 
3415  Operands.push_back(AArch64Operand::CreateVectorList(
3416  FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3417  getContext()));
3418 
3419  return MatchOperand_Success;
3420 }
3421 
3422 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3423 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3424  auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3425  if (ParseRes != MatchOperand_Success)
3426  return true;
3427 
3428  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3429 }
3430 
3432 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3433  SMLoc StartLoc = getLoc();
3434 
3435  unsigned RegNum;
3436  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3437  if (Res != MatchOperand_Success)
3438  return Res;
3439 
3440  if (!parseOptionalToken(AsmToken::Comma)) {
3441  Operands.push_back(AArch64Operand::CreateReg(
3442  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3443  return MatchOperand_Success;
3444  }
3445 
3446  parseOptionalToken(AsmToken::Hash);
3447 
3448  if (getParser().getTok().isNot(AsmToken::Integer)) {
3449  Error(getLoc(), "index must be absent or #0");
3450  return MatchOperand_ParseFail;
3451  }
3452 
3453  const MCExpr *ImmVal;
3454  if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3455  cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3456  Error(getLoc(), "index must be absent or #0");
3457  return MatchOperand_ParseFail;
3458  }
3459 
3460  Operands.push_back(AArch64Operand::CreateReg(
3461  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3462  return MatchOperand_Success;
3463 }
3464 
3465 template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3467 AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3468  SMLoc StartLoc = getLoc();
3469 
3470  unsigned RegNum;
3471  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3472  if (Res != MatchOperand_Success)
3473  return Res;
3474 
3475  // No shift/extend is the default.
3476  if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3477  Operands.push_back(AArch64Operand::CreateReg(
3478  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3479  return MatchOperand_Success;
3480  }
3481 
3482  // Eat the comma
3483  getParser().Lex();
3484 
3485  // Match the shift
3487  Res = tryParseOptionalShiftExtend(ExtOpnd);
3488  if (Res != MatchOperand_Success)
3489  return Res;
3490 
3491  auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3492  Operands.push_back(AArch64Operand::CreateReg(
3493  RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3494  Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3495  Ext->hasShiftExtendAmount()));
3496 
3497  return MatchOperand_Success;
3498 }
3499 
3500 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3501  MCAsmParser &Parser = getParser();
3502 
3503  // Some SVE instructions have a decoration after the immediate, i.e.
3504  // "mul vl". We parse them here and add tokens, which must be present in the
3505  // asm string in the tablegen instruction.
3506  bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3507  bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3508  if (!Parser.getTok().getString().equals_lower("mul") ||
3509  !(NextIsVL || NextIsHash))
3510  return true;
3511 
3512  Operands.push_back(
3513  AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3514  Parser.Lex(); // Eat the "mul"
3515 
3516  if (NextIsVL) {
3517  Operands.push_back(
3518  AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3519  Parser.Lex(); // Eat the "vl"
3520  return false;
3521  }
3522 
3523  if (NextIsHash) {
3524  Parser.Lex(); // Eat the #
3525  SMLoc S = getLoc();
3526 
3527  // Parse immediate operand.
3528  const MCExpr *ImmVal;
3529  if (!Parser.parseExpression(ImmVal))
3530  if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3531  Operands.push_back(AArch64Operand::CreateImm(
3532  MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3533  getContext()));
3534  return MatchOperand_Success;
3535  }
3536  }
3537 
3538  return Error(getLoc(), "expected 'vl' or '#<imm>'");
3539 }
3540 
3541 /// parseOperand - Parse a arm instruction operand. For now this parses the
3542 /// operand regardless of the mnemonic.
3543 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3544  bool invertCondCode) {
3545  MCAsmParser &Parser = getParser();
3546 
3547  OperandMatchResultTy ResTy =
3548  MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3549 
3550  // Check if the current operand has a custom associated parser, if so, try to
3551  // custom parse the operand, or fallback to the general approach.
3552  if (ResTy == MatchOperand_Success)
3553  return false;
3554  // If there wasn't a custom match, try the generic matcher below. Otherwise,
3555  // there was a match, but an error occurred, in which case, just return that
3556  // the operand parsing failed.
3557  if (ResTy == MatchOperand_ParseFail)
3558  return true;
3559 
3560  // Nothing custom, so do general case parsing.
3561  SMLoc S, E;
3562  switch (getLexer().getKind()) {
3563  default: {
3564  SMLoc S = getLoc();
3565  const MCExpr *Expr;
3566  if (parseSymbolicImmVal(Expr))
3567  return Error(S, "invalid operand");
3568 
3569  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3570  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3571  return false;
3572  }
3573  case AsmToken::LBrac: {
3574  SMLoc Loc = Parser.getTok().getLoc();
3575  Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3576  getContext()));
3577  Parser.Lex(); // Eat '['
3578 
3579  // There's no comma after a '[', so we can parse the next operand
3580  // immediately.
3581  return parseOperand(Operands, false, false);
3582  }
3583  case AsmToken::LCurly:
3584  return parseNeonVectorList(Operands);
3585  case AsmToken::Identifier: {
3586  // If we're expecting a Condition Code operand, then just parse that.
3587  if (isCondCode)
3588  return parseCondCode(Operands, invertCondCode);
3589 
3590  // If it's a register name, parse it.
3591  if (!parseRegister(Operands))
3592  return false;
3593 
3594  // See if this is a "mul vl" decoration or "mul #<int>" operand used
3595  // by SVE instructions.
3596  if (!parseOptionalMulOperand(Operands))
3597  return false;
3598 
3599  // This could be an optional "shift" or "extend" operand.
3600  OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3601  // We can only continue if no tokens were eaten.
3602  if (GotShift != MatchOperand_NoMatch)
3603  return GotShift;
3604 
3605  // This was not a register so parse other operands that start with an
3606  // identifier (like labels) as expressions and create them as immediates.
3607  const MCExpr *IdVal;
3608  S = getLoc();
3609  if (getParser().parseExpression(IdVal))
3610  return true;
3611  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3612  Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3613  return false;
3614  }
3615  case AsmToken::Integer:
3616  case AsmToken::Real:
3617  case AsmToken::Hash: {
3618  // #42 -> immediate.
3619  S = getLoc();
3620 
3621  parseOptionalToken(AsmToken::Hash);
3622 
3623  // Parse a negative sign
3624  bool isNegative = false;
3625  if (Parser.getTok().is(AsmToken::Minus)) {
3626  isNegative = true;
3627  // We need to consume this token only when we have a Real, otherwise
3628  // we let parseSymbolicImmVal take care of it
3629  if (Parser.getLexer().peekTok().is(AsmToken::Real))
3630  Parser.Lex();
3631  }
3632 
3633  // The only Real that should come through here is a literal #0.0 for
3634  // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3635  // so convert the value.
3636  const AsmToken &Tok = Parser.getTok();
3637  if (Tok.is(AsmToken::Real)) {
3638  APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3639  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3640  if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3641  Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3642  Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3643  return TokError("unexpected floating point literal");
3644  else if (IntVal != 0 || isNegative)
3645  return TokError("expected floating-point constant #0.0");
3646  Parser.Lex(); // Eat the token.
3647 
3648  Operands.push_back(
3649  AArch64Operand::CreateToken("#0", false, S, getContext()));
3650  Operands.push_back(
3651  AArch64Operand::CreateToken(".0", false, S, getContext()));
3652  return false;
3653  }
3654 
3655  const MCExpr *ImmVal;
3656  if (parseSymbolicImmVal(ImmVal))
3657  return true;
3658 
3659  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3660  Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3661  return false;
3662  }
3663  case AsmToken::Equal: {
3664  SMLoc Loc = getLoc();
3665  if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3666  return TokError("unexpected token in operand");
3667  Parser.Lex(); // Eat '='
3668  const MCExpr *SubExprVal;
3669  if (getParser().parseExpression(SubExprVal))
3670  return true;
3671 
3672  if (Operands.size() < 2 ||
3673  !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3674  return Error(Loc, "Only valid when first operand is register");
3675 
3676  bool IsXReg =
3677  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3678  Operands[1]->getReg());
3679 
3680  MCContext& Ctx = getContext();
3681  E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3682  // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3683  if (isa<MCConstantExpr>(SubExprVal)) {
3684  uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3685  uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3686  while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3687  ShiftAmt += 16;
3688  Imm >>= 16;
3689  }
3690  if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3691  Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3692  Operands.push_back(AArch64Operand::CreateImm(
3693  MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3694  if (ShiftAmt)
3695  Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3696  ShiftAmt, true, S, E, Ctx));
3697  return false;
3698  }
3699  APInt Simm = APInt(64, Imm << ShiftAmt);
3700  // check if the immediate is an unsigned or signed 32-bit int for W regs
3701  if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3702  return Error(Loc, "Immediate too large for register");
3703  }
3704  // If it is a label or an imm that cannot fit in a movz, put it into CP.
3705  const MCExpr *CPLoc =
3706  getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3707  Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3708  return false;
3709  }
3710  }
3711 }
3712 
3713 bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3714  const MCParsedAsmOperand &Op2) const {
3715  auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3716  auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3717  if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3718  AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3719  return MCTargetAsmParser::regsEqual(Op1, Op2);
3720 
3721  assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
3722  "Testing equality of non-scalar registers not supported");
3723 
3724  // Check if a registers match their sub/super register classes.
3725  if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3726  return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3727  if (AOp1.getRegEqualityTy() == EqualsSubReg)
3728  return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3729  if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3730  return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3731  if (AOp2.getRegEqualityTy() == EqualsSubReg)
3732  return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3733 
3734  return false;
3735 }
3736 
3737 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3738 /// operands.
3739 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3740  StringRef Name, SMLoc NameLoc,
3741  OperandVector &Operands) {
3742  MCAsmParser &Parser = getParser();
3743  Name = StringSwitch<StringRef>(Name.lower())
3744  .Case("beq", "b.eq")
3745  .Case("bne", "b.ne")
3746  .Case("bhs", "b.hs")
3747  .Case("bcs", "b.cs")
3748  .Case("blo", "b.lo")
3749  .Case("bcc", "b.cc")
3750  .Case("bmi", "b.mi")
3751  .Case("bpl", "b.pl")
3752  .Case("bvs", "b.vs")
3753  .Case("bvc", "b.vc")
3754  .Case("bhi", "b.hi")
3755  .Case("bls", "b.ls")
3756  .Case("bge", "b.ge")
3757  .Case("blt", "b.lt")
3758  .Case("bgt", "b.gt")
3759  .Case("ble", "b.le")
3760  .Case("bal", "b.al")
3761  .Case("bnv", "b.nv")
3762  .Default(Name);
3763 
3764  // First check for the AArch64-specific .req directive.
3765  if (Parser.getTok().is(AsmToken::Identifier) &&
3766  Parser.getTok().getIdentifier() == ".req") {
3767  parseDirectiveReq(Name, NameLoc);
3768  // We always return 'error' for this, as we're done with this
3769  // statement and don't need to match the 'instruction."
3770  return true;
3771  }
3772 
3773  // Create the leading tokens for the mnemonic, split by '.' characters.
3774  size_t Start = 0, Next = Name.find('.');
3775  StringRef Head = Name.slice(Start, Next);
3776 
3777  // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
3778  // the SYS instruction.
3779  if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
3780  Head == "cfp" || Head == "dvp" || Head == "cpp")
3781  return parseSysAlias(Head, NameLoc, Operands);
3782 
3783  Operands.push_back(
3784  AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3785  Mnemonic = Head;
3786 
3787  // Handle condition codes for a branch mnemonic
3788  if (Head == "b" && Next != StringRef::npos) {
3789  Start = Next;
3790  Next = Name.find('.', Start + 1);
3791  Head = Name.slice(Start + 1, Next);
3792 
3793  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3794  (Head.data() - Name.data()));
3795  AArch64CC::CondCode CC = parseCondCodeString(Head);
3796  if (CC == AArch64CC::Invalid)
3797  return Error(SuffixLoc, "invalid condition code");
3798  Operands.push_back(
3799  AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3800  Operands.push_back(
3801  AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3802  }
3803 
3804  // Add the remaining tokens in the mnemonic.
3805  while (Next != StringRef::npos) {
3806  Start = Next;
3807  Next = Name.find('.', Start + 1);
3808  Head = Name.slice(Start, Next);
3809  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3810  (Head.data() - Name.data()) + 1);
3811  Operands.push_back(
3812  AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3813  }
3814 
3815  // Conditional compare instructions have a Condition Code operand, which needs
3816  // to be parsed and an immediate operand created.
3817  bool condCodeFourthOperand =
3818  (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3819  Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3820  Head == "csinc" || Head == "csinv" || Head == "csneg");
3821 
3822  // These instructions are aliases to some of the conditional select
3823  // instructions. However, the condition code is inverted in the aliased
3824  // instruction.
3825  //
3826  // FIXME: Is this the correct way to handle these? Or should the parser
3827  // generate the aliased instructions directly?
3828  bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3829  bool condCodeThirdOperand =
3830  (Head == "cinc" || Head == "cinv" || Head == "cneg");
3831 
3832  // Read the remaining operands.
3833  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3834 
3835  unsigned N = 1;
3836  do {
3837  // Parse and remember the operand.
3838  if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3839  (N == 3 && condCodeThirdOperand) ||
3840  (N == 2 && condCodeSecondOperand),
3841  condCodeSecondOperand || condCodeThirdOperand)) {
3842  return true;
3843  }
3844 
3845  // After successfully parsing some operands there are two special cases to
3846  // consider (i.e. notional operands not separated by commas). Both are due
3847  // to memory specifiers:
3848  // + An RBrac will end an address for load/store/prefetch
3849  // + An '!' will indicate a pre-indexed operation.
3850  //
3851  // It's someone else's responsibility to make sure these tokens are sane
3852  // in the given context!
3853 
3854  SMLoc RLoc = Parser.getTok().getLoc();
3855  if (parseOptionalToken(AsmToken::RBrac))
3856  Operands.push_back(
3857  AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3858  SMLoc ELoc = Parser.getTok().getLoc();
3859  if (parseOptionalToken(AsmToken::Exclaim))
3860  Operands.push_back(
3861  AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3862 
3863  ++N;
3864  } while (parseOptionalToken(AsmToken::Comma));
3865  }
3866 
3867  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3868  return true;
3869 
3870  return false;
3871 }
3872 
3873 static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
3874  assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
3875  return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
3876  (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
3877  (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
3878  (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
3879  (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
3880  (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
3881 }
3882 
3883 // FIXME: This entire function is a giant hack to provide us with decent
3884 // operand range validation/diagnostics until TableGen/MC can be extended
3885 // to support autogeneration of this kind of validation.
3886 bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
3887  SmallVectorImpl<SMLoc> &Loc) {
3888  const MCRegisterInfo *RI = getContext().getRegisterInfo();
3889  const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
3890 
3891  // A prefix only applies to the instruction following it. Here we extract
3892  // prefix information for the next instruction before validating the current
3893  // one so that in the case of failure we don't erronously continue using the
3894  // current prefix.
3895  PrefixInfo Prefix = NextPrefix;
3896  NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
3897 
3898  // Before validating the instruction in isolation we run through the rules
3899  // applicable when it follows a prefix instruction.
3900  // NOTE: brk & hlt can be prefixed but require no additional validation.
3901  if (Prefix.isActive() &&
3902  (Inst.getOpcode() != AArch64::BRK) &&
3903  (Inst.getOpcode() != AArch64::HLT)) {
3904 
3905  // Prefixed intructions must have a destructive operand.
3908  return Error(IDLoc, "instruction is unpredictable when following a"
3909  " movprfx, suggest replacing movprfx with mov");
3910 
3911  // Destination operands must match.
3912  if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
3913  return Error(Loc[0], "instruction is unpredictable when following a"
3914  " movprfx writing to a different destination");
3915 
3916  // Destination operand must not be used in any other location.
3917  for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
3918  if (Inst.getOperand(i).isReg() &&
3919  (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
3920  isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
3921  return Error(Loc[0], "instruction is unpredictable when following a"
3922  " movprfx and destination also used as non-destructive"
3923  " source");
3924  }
3925 
3926  auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
3927  if (Prefix.isPredicated()) {
3928  int PgIdx = -1;
3929 
3930  // Find the instructions general predicate.
3931  for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
3932  if (Inst.getOperand(i).isReg() &&
3933  PPRRegClass.contains(Inst.getOperand(i).getReg())) {
3934  PgIdx = i;
3935  break;
3936  }
3937 
3938  // Instruction must be predicated if the movprfx is predicated.
3939  if (PgIdx == -1 ||
3941  return Error(IDLoc, "instruction is unpredictable when following a"
3942  " predicated movprfx, suggest using unpredicated movprfx");
3943 
3944  // Instruction must use same general predicate as the movprfx.
3945  if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
3946  return Error(IDLoc, "instruction is unpredictable when following a"
3947  " predicated movprfx using a different general predicate");
3948 
3949  // Instruction element type must match the movprfx.
3950  if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
3951  return Error(IDLoc, "instruction is unpredictable when following a"
3952  " predicated movprfx with a different element size");
3953  }
3954  }
3955 
3956  // Check for indexed addressing modes w/ the base register being the
3957  // same as a destination/source register or pair load where
3958  // the Rt == Rt2. All of those are undefined behaviour.
3959  switch (Inst.getOpcode()) {
3960  case AArch64::LDPSWpre:
3961  case AArch64::LDPWpost:
3962  case AArch64::LDPWpre:
3963  case AArch64::LDPXpost:
3964  case AArch64::LDPXpre: {
3965  unsigned Rt = Inst.getOperand(1).getReg();
3966  unsigned Rt2 = Inst.getOperand(2).getReg();
3967  unsigned Rn = Inst.getOperand(3).getReg();
3968  if (RI->isSubRegisterEq(Rn, Rt))
3969  return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3970  "is also a destination");
3971  if (RI->isSubRegisterEq(Rn, Rt2))
3972  return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3973  "is also a destination");
3975  }
3976  case AArch64::LDPDi:
3977  case AArch64::LDPQi:
3978  case AArch64::LDPSi:
3979  case AArch64::LDPSWi:
3980  case AArch64::LDPWi:
3981  case AArch64::LDPXi: {
3982  unsigned Rt = Inst.getOperand(0).getReg();
3983  unsigned Rt2 = Inst.getOperand(1).getReg();
3984  if (Rt == Rt2)
3985  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3986  break;
3987  }
3988  case AArch64::LDPDpost:
3989  case AArch64::LDPDpre:
3990  case AArch64::LDPQpost:
3991  case AArch64::LDPQpre:
3992  case AArch64::LDPSpost:
3993  case AArch64::LDPSpre:
3994  case AArch64::LDPSWpost: {
3995  unsigned Rt = Inst.getOperand(1).getReg();
3996  unsigned Rt2 = Inst.getOperand(2).getReg();
3997  if (Rt == Rt2)
3998  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3999  break;
4000  }
4001  case AArch64::STPDpost:
4002  case AArch64::STPDpre:
4003  case AArch64::STPQpost:
4004  case AArch64::STPQpre:
4005  case AArch64::STPSpost:
4006  case AArch64::STPSpre:
4007  case AArch64::STPWpost:
4008  case AArch64::STPWpre:
4009  case AArch64::STPXpost:
4010  case AArch64::STPXpre: {
4011  unsigned Rt = Inst.getOperand(1).getReg();
4012  unsigned Rt2 = Inst.getOperand(2).getReg();
4013  unsigned Rn = Inst.getOperand(3).getReg();
4014  if (RI->isSubRegisterEq(Rn, Rt))
4015  return Error(Loc[0], "unpredictable STP instruction, writeback base "
4016  "is also a source");
4017  if (RI->isSubRegisterEq(Rn, Rt2))
4018  return Error(Loc[1], "unpredictable STP instruction, writeback base "
4019  "is also a source");
4020  break;
4021  }
4022  case AArch64::LDRBBpre:
4023  case AArch64::LDRBpre:
4024  case AArch64::LDRHHpre:
4025  case AArch64::LDRHpre:
4026  case AArch64::LDRSBWpre:
4027  case AArch64::LDRSBXpre:
4028  case AArch64::LDRSHWpre:
4029  case AArch64::LDRSHXpre:
4030  case AArch64::LDRSWpre:
4031  case AArch64::LDRWpre:
4032  case AArch64::LDRXpre:
4033  case AArch64::LDRBBpost:
4034  case AArch64::LDRBpost:
4035  case AArch64::LDRHHpost:
4036  case AArch64::LDRHpost:
4037  case AArch64::LDRSBWpost:
4038  case AArch64::LDRSBXpost:
4039  case AArch64::LDRSHWpost:
4040  case AArch64::LDRSHXpost:
4041  case AArch64::LDRSWpost:
4042  case AArch64::LDRWpost:
4043  case AArch64::LDRXpost: {
4044  unsigned Rt = Inst.getOperand(1).getReg();
4045  unsigned Rn = Inst.getOperand(2).getReg();
4046  if (RI->isSubRegisterEq(Rn, Rt))
4047  return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4048  "is also a source");
4049  break;
4050  }
4051  case AArch64::STRBBpost:
4052  case AArch64::STRBpost:
4053  case AArch64::STRHHpost:
4054  case AArch64::STRHpost:
4055  case AArch64::STRWpost:
4056  case AArch64::STRXpost:
4057  case AArch64::STRBBpre:
4058  case AArch64::STRBpre:
4059  case AArch64::STRHHpre:
4060  case AArch64::STRHpre:
4061  case AArch64::STRWpre:
4062  case AArch64::STRXpre: {
4063  unsigned Rt = Inst.getOperand(1).getReg();
4064  unsigned Rn = Inst.getOperand(2).getReg();
4065  if (RI->isSubRegisterEq(Rn, Rt))
4066  return Error(Loc[0], "unpredictable STR instruction, writeback base "
4067  "is also a source");
4068  break;
4069  }
4070  case AArch64::STXRB:
4071  case AArch64::STXRH:
4072  case AArch64::STXRW:
4073  case AArch64::STXRX:
4074  case AArch64::STLXRB:
4075  case AArch64::STLXRH:
4076  case AArch64::STLXRW:
4077  case AArch64::STLXRX: {
4078  unsigned Rs = Inst.getOperand(0).getReg();
4079  unsigned Rt = Inst.getOperand(1).getReg();
4080  unsigned Rn = Inst.getOperand(2).getReg();
4081  if (RI->isSubRegisterEq(Rt, Rs) ||
4082  (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4083  return Error(Loc[0],
4084  "unpredictable STXR instruction, status is also a source");
4085  break;
4086  }
4087  case AArch64::STXPW:
4088  case AArch64::STXPX:
4089  case AArch64::STLXPW:
4090  case AArch64::STLXPX: {
4091  unsigned Rs = Inst.getOperand(0).getReg();
4092  unsigned Rt1 = Inst.getOperand(1).getReg();
4093  unsigned Rt2 = Inst.getOperand(2).getReg();
4094  unsigned Rn = Inst.getOperand(3).getReg();
4095  if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4096  (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4097  return Error(Loc[0],
4098  "unpredictable STXP instruction, status is also a source");
4099  break;
4100  }
4101  case AArch64::LDGV: {
4102  unsigned Rt = Inst.getOperand(0).getReg();
4103  unsigned Rn = Inst.getOperand(1).getReg();
4104  if (RI->isSubRegisterEq(Rt, Rn)) {
4105  return Error(Loc[0],
4106  "unpredictable LDGV instruction, writeback register is also "
4107  "the target register");
4108  }
4109  }
4110  }
4111 
4112 
4113  // Now check immediate ranges. Separate from the above as there is overlap
4114  // in the instructions being checked and this keeps the nested conditionals
4115  // to a minimum.
4116  switch (Inst.getOpcode()) {
4117  case AArch64::ADDSWri:
4118  case AArch64::ADDSXri:
4119  case AArch64::ADDWri:
4120  case AArch64::ADDXri:
4121  case AArch64::SUBSWri:
4122  case AArch64::SUBSXri:
4123  case AArch64::SUBWri:
4124  case AArch64::SUBXri: {
4125  // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4126  // some slight duplication here.
4127  if (Inst.getOperand(2).isExpr()) {
4128  const MCExpr *Expr = Inst.getOperand(2).getExpr();
4129  AArch64MCExpr::VariantKind ELFRefKind;
4130  MCSymbolRefExpr::VariantKind DarwinRefKind;
4131  int64_t Addend;
4132  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4133 
4134  // Only allow these with ADDXri.
4135  if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4136  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4137  Inst.getOpcode() == AArch64::ADDXri)
4138  return false;
4139 
4140  // Only allow these with ADDXri/ADDWri
4141  if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4142  ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4143  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4144  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4145  ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4146  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4147  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4148  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4149  ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4150  ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4151  (Inst.getOpcode() == AArch64::ADDXri ||
4152  Inst.getOpcode() == AArch64::ADDWri))
4153  return false;
4154 
4155  // Don't allow symbol refs in the immediate field otherwise
4156  // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4157  // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4158  // 'cmp w0, 'borked')
4159  return Error(Loc.back(), "invalid immediate expression");
4160  }
4161  // We don't validate more complex expressions here
4162  }
4163  return false;
4164  }
4165  default:
4166  return false;
4167  }
4168 }
4169 
4170 static std::string AArch64MnemonicSpellCheck(StringRef S, uint64_t FBS,
4171  unsigned VariantID = 0);
4172 
4173 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4174  uint64_t ErrorInfo,
4175  OperandVector &Operands) {
4176  switch (ErrCode) {
4177  case Match_InvalidTiedOperand: {
4179  static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4180  .getRegEqualityTy();
4181  switch (EqTy) {
4182  case RegConstraintEqualityTy::EqualsSubReg:
4183  return Error(Loc, "operand must be 64-bit form of destination register");
4184  case RegConstraintEqualityTy::EqualsSuperReg:
4185  return Error(Loc, "operand must be 32-bit form of destination register");
4186  case RegConstraintEqualityTy::EqualsReg:
4187  return Error(Loc, "operand must match destination register");
4188  }
4189  llvm_unreachable("Unknown RegConstraintEqualityTy");
4190  }
4191  case Match_MissingFeature:
4192  return Error(Loc,
4193  "instruction requires a CPU feature not currently enabled");
4194  case Match_InvalidOperand:
4195  return Error(Loc, "invalid operand for instruction");
4196  case Match_InvalidSuffix:
4197  return Error(Loc, "invalid type suffix for instruction");
4198  case Match_InvalidCondCode:
4199  return Error(Loc, "expected AArch64 condition code");
4200  case Match_AddSubRegExtendSmall:
4201  return Error(Loc,
4202  "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
4203  case Match_AddSubRegExtendLarge:
4204  return Error(Loc,
4205  "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4206  case Match_AddSubSecondSource:
4207  return Error(Loc,
4208  "expected compatible register, symbol or integer in range [0, 4095]");
4209  case Match_LogicalSecondSource:
4210  return Error(Loc, "expected compatible register or logical immediate");
4211  case Match_InvalidMovImm32Shift:
4212  return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4213  case Match_InvalidMovImm64Shift:
4214  return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4215  case Match_AddSubRegShift32:
4216  return Error(Loc,
4217  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4218  case Match_AddSubRegShift64:
4219  return Error(Loc,
4220  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4221  case Match_InvalidFPImm:
4222  return Error(Loc,
4223  "expected compatible register or floating-point constant");
4224  case Match_InvalidMemoryIndexedSImm6:
4225  return Error(Loc, "index must be an integer in range [-32, 31].");
4226  case Match_InvalidMemoryIndexedSImm5:
4227  return Error(Loc, "index must be an integer in range [-16, 15].");
4228  case Match_InvalidMemoryIndexed1SImm4:
4229  return Error(Loc, "index must be an integer in range [-8, 7].");
4230  case Match_InvalidMemoryIndexed2SImm4:
4231  return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4232  case Match_InvalidMemoryIndexed3SImm4:
4233  return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4234  case Match_InvalidMemoryIndexed4SImm4:
4235  return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4236  case Match_InvalidMemoryIndexed16SImm4:
4237  return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4238  case Match_InvalidMemoryIndexed1SImm6:
4239  return Error(Loc, "index must be an integer in range [-32, 31].");
4240  case Match_InvalidMemoryIndexedSImm8:
4241  return Error(Loc, "index must be an integer in range [-128, 127].");
4242  case Match_InvalidMemoryIndexedSImm9:
4243  return Error(Loc, "index must be an integer in range [-256, 255].");
4244  case Match_InvalidMemoryIndexed16SImm9:
4245  return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4246  case Match_InvalidMemoryIndexed8SImm10:
4247  return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4248  case Match_InvalidMemoryIndexed4SImm7:
4249  return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4250  case Match_InvalidMemoryIndexed8SImm7:
4251  return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4252  case Match_InvalidMemoryIndexed16SImm7:
4253  return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4254  case Match_InvalidMemoryIndexed8UImm5:
4255  return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4256  case Match_InvalidMemoryIndexed4UImm5:
4257  return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4258  case Match_InvalidMemoryIndexed2UImm5:
4259  return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4260  case Match_InvalidMemoryIndexed8UImm6:
4261  return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4262  case Match_InvalidMemoryIndexed16UImm6:
4263  return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
4264  case Match_InvalidMemoryIndexed4UImm6:
4265  return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4266  case Match_InvalidMemoryIndexed2UImm6:
4267  return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4268  case Match_InvalidMemoryIndexed1UImm6:
4269  return Error(Loc, "index must be in range [0, 63].");
4270  case Match_InvalidMemoryWExtend8:
4271  return Error(Loc,
4272  "expected 'uxtw' or 'sxtw' with optional shift of #0");
4273  case Match_InvalidMemoryWExtend16:
4274  return Error(Loc,
4275  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4276  case Match_InvalidMemoryWExtend32:
4277  return Error(Loc,
4278  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4279  case Match_InvalidMemoryWExtend64:
4280  return Error(Loc,
4281  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4282  case Match_InvalidMemoryWExtend128:
4283  return Error(Loc,
4284  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4285  case Match_InvalidMemoryXExtend8:
4286  return Error(Loc,
4287  "expected 'lsl' or 'sxtx' with optional shift of #0");
4288  case Match_InvalidMemoryXExtend16:
4289  return Error(Loc,
4290  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4291  case Match_InvalidMemoryXExtend32:
4292  return Error(Loc,
4293  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4294  case Match_InvalidMemoryXExtend64:
4295  return Error(Loc,
4296  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4297  case Match_InvalidMemoryXExtend128:
4298  return Error(Loc,
4299  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4300  case Match_InvalidMemoryIndexed1:
4301  return Error(Loc, "index must be an integer in range [0, 4095].");
4302  case Match_InvalidMemoryIndexed2:
4303  return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4304  case Match_InvalidMemoryIndexed4:
4305  return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4306  case Match_InvalidMemoryIndexed8:
4307  return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4308  case Match_InvalidMemoryIndexed16:
4309  return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4310  case Match_InvalidImm0_1:
4311  return Error(Loc, "immediate must be an integer in range [0, 1].");
4312  case Match_InvalidImm0_7:
4313  return Error(Loc, "immediate must be an integer in range [0, 7].");
4314  case Match_InvalidImm0_15:
4315  return Error(Loc, "immediate must be an integer in range [0, 15].");
4316  case Match_InvalidImm0_31:
4317  return Error(Loc, "immediate must be an integer in range [0, 31].");
4318  case Match_InvalidImm0_63:
4319  return Error(Loc, "immediate must be an integer in range [0, 63].");
4320  case Match_InvalidImm0_127:
4321  return Error(Loc, "immediate must be an integer in range [0, 127].");
4322  case Match_InvalidImm0_255:
4323  return Error(Loc, "immediate must be an integer in range [0, 255].");
4324  case Match_InvalidImm0_65535:
4325  return Error(Loc, "immediate must be an integer in range [0, 65535].");
4326  case Match_InvalidImm1_8:
4327  return Error(Loc, "immediate must be an integer in range [1, 8].");
4328  case Match_InvalidImm1_16:
4329  return Error(Loc, "immediate must be an integer in range [1, 16].");
4330  case Match_InvalidImm1_32:
4331  return Error(Loc, "immediate must be an integer in range [1, 32].");
4332  case Match_InvalidImm1_64:
4333  return Error(Loc, "immediate must be an integer in range [1, 64].");
4334  case Match_InvalidSVEAddSubImm8:
4335  return Error(Loc, "immediate must be an integer in range [0, 255]"
4336  " with a shift amount of 0");
4337  case Match_InvalidSVEAddSubImm16:
4338  case Match_InvalidSVEAddSubImm32:
4339  case Match_InvalidSVEAddSubImm64:
4340  return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4341  "multiple of 256 in range [256, 65280]");
4342  case Match_InvalidSVECpyImm8:
4343  return Error(Loc, "immediate must be an integer in range [-128, 255]"
4344  " with a shift amount of 0");
4345  case Match_InvalidSVECpyImm16:
4346  return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4347  "multiple of 256 in range [-32768, 65280]");
4348  case Match_InvalidSVECpyImm32:
4349  case Match_InvalidSVECpyImm64:
4350  return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4351  "multiple of 256 in range [-32768, 32512]");
4352  case Match_InvalidIndexRange1_1:
4353  return Error(Loc, "expected lane specifier '[1]'");
4354  case Match_InvalidIndexRange0_15:
4355  return Error(Loc, "vector lane must be an integer in range [0, 15].");
4356  case Match_InvalidIndexRange0_7:
4357  return Error(Loc, "vector lane must be an integer in range [0, 7].");
4358  case Match_InvalidIndexRange0_3:
4359  return Error(Loc, "vector lane must be an integer in range [0, 3].");
4360  case Match_InvalidIndexRange0_1:
4361  return Error(Loc, "vector lane must be an integer in range [0, 1].");
4362  case Match_InvalidSVEIndexRange0_63:
4363  return Error(Loc, "vector lane must be an integer in range [0, 63].");
4364  case Match_InvalidSVEIndexRange0_31:
4365  return Error(Loc, "vector lane must be an integer in range [0, 31].");
4366  case Match_InvalidSVEIndexRange0_15:
4367  return Error(Loc, "vector lane must be an integer in range [0, 15].");
4368  case Match_InvalidSVEIndexRange0_7:
4369  return Error(Loc, "vector lane must be an integer in range [0, 7].");
4370  case Match_InvalidSVEIndexRange0_3:
4371  return Error(Loc, "vector lane must be an integer in range [0, 3].");
4372  case Match_InvalidLabel:
4373  return Error(Loc, "expected label or encodable integer pc offset");
4374  case Match_MRS:
4375  return Error(Loc, "expected readable system register");
4376  case Match_MSR:
4377  return Error(Loc, "expected writable system register or pstate");
4378  case Match_InvalidComplexRotationEven:
4379  return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4380  case Match_InvalidComplexRotationOdd:
4381  return Error(Loc, "complex rotation must be 90 or 270.");
4382  case Match_MnemonicFail: {
4383  std::string Suggestion = AArch64MnemonicSpellCheck(
4384  ((AArch64Operand &)*Operands[0]).getToken(),
4385  ComputeAvailableFeatures(STI->getFeatureBits()));
4386  return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4387  }
4388  case Match_InvalidGPR64shifted8:
4389  return Error(Loc, "register must be x0..x30 or xzr, without shift");
4390  case Match_InvalidGPR64shifted16:
4391  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4392  case Match_InvalidGPR64shifted32:
4393  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4394  case Match_InvalidGPR64shifted64:
4395  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4396  case Match_InvalidGPR64NoXZRshifted8:
4397  return Error(Loc, "register must be x0..x30 without shift");
4398  case Match_InvalidGPR64NoXZRshifted16:
4399  return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4400  case Match_InvalidGPR64NoXZRshifted32:
4401  return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4402  case Match_InvalidGPR64NoXZRshifted64:
4403  return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4404  case Match_InvalidZPR32UXTW8:
4405  case Match_InvalidZPR32SXTW8:
4406  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4407  case Match_InvalidZPR32UXTW16:
4408  case Match_InvalidZPR32SXTW16:
4409  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4410  case Match_InvalidZPR32UXTW32:
4411  case Match_InvalidZPR32SXTW32:
4412  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4413  case Match_InvalidZPR32UXTW64:
4414  case Match_InvalidZPR32SXTW64:
4415  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4416  case Match_InvalidZPR64UXTW8:
4417  case Match_InvalidZPR64SXTW8:
4418  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4419  case Match_InvalidZPR64UXTW16:
4420  case Match_InvalidZPR64SXTW16:
4421  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4422  case Match_InvalidZPR64UXTW32:
4423  case Match_InvalidZPR64SXTW32:
4424  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4425  case Match_InvalidZPR64UXTW64:
4426  case Match_InvalidZPR64SXTW64:
4427  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4428  case Match_InvalidZPR32LSL8:
4429  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4430  case Match_InvalidZPR32LSL16:
4431  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4432  case Match_InvalidZPR32LSL32:
4433  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4434  case Match_InvalidZPR32LSL64:
4435  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4436  case Match_InvalidZPR64LSL8:
4437  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4438  case Match_InvalidZPR64LSL16:
4439  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4440  case Match_InvalidZPR64LSL32:
4441  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4442  case Match_InvalidZPR64LSL64:
4443  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4444  case Match_InvalidZPR0:
4445  return Error(Loc, "expected register without element width sufix");
4446  case Match_InvalidZPR8:
4447  case Match_InvalidZPR16:
4448  case Match_InvalidZPR32:
4449  case Match_InvalidZPR64:
4450  case Match_InvalidZPR128:
4451  return Error(Loc, "invalid element width");
4452  case Match_InvalidZPR_3b8:
4453  return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4454  case Match_InvalidZPR_3b16:
4455  return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4456  case Match_InvalidZPR_3b32:
4457  return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4458  case Match_InvalidZPR_4b16:
4459  return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4460  case Match_InvalidZPR_4b32:
4461  return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4462  case Match_InvalidZPR_4b64:
4463  return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4464  case Match_InvalidSVEPattern:
4465  return Error(Loc, "invalid predicate pattern");
4466  case Match_InvalidSVEPredicateAnyReg:
4467  case Match_InvalidSVEPredicateBReg:
4468  case Match_InvalidSVEPredicateHReg:
4469  case Match_InvalidSVEPredicateSReg:
4470  case Match_InvalidSVEPredicateDReg:
4471  return Error(Loc, "invalid predicate register.");
4472  case Match_InvalidSVEPredicate3bAnyReg:
4473  case Match_InvalidSVEPredicate3bBReg:
4474  case Match_InvalidSVEPredicate3bHReg:
4475  case Match_InvalidSVEPredicate3bSReg:
4476  case Match_InvalidSVEPredicate3bDReg:
4477  return Error(Loc, "restricted predicate has range [0, 7].");
4478  case Match_InvalidSVEExactFPImmOperandHalfOne:
4479  return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4480  case Match_InvalidSVEExactFPImmOperandHalfTwo:
4481  return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4482  case Match_InvalidSVEExactFPImmOperandZeroOne:
4483  return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4484  default:
4485  llvm_unreachable("unexpected error code!");
4486  }
4487 }
4488 
4489 static const char *getSubtargetFeatureName(uint64_t Val);
4490 
4491 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4492  OperandVector &Operands,
4493  MCStreamer &Out,
4494  uint64_t &ErrorInfo,
4495  bool MatchingInlineAsm) {
4496  assert(!Operands.empty() && "Unexpect empty operand list!");
4497  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4498  assert(Op.isToken() && "Leading operand should always be a mnemonic!");
4499 
4500  StringRef Tok = Op.getToken();
4501  unsigned NumOperands = Operands.size();
4502 
4503  if (NumOperands == 4 && Tok == "lsl") {
4504  AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4505  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4506  if (Op2.isScalarReg() && Op3.isImm()) {
4507  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4508  if (Op3CE) {
4509  uint64_t Op3Val = Op3CE->getValue();
4510  uint64_t NewOp3Val = 0;
4511  uint64_t NewOp4Val = 0;
4512  if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4513  Op2.getReg())) {
4514  NewOp3Val = (32 - Op3Val) & 0x1f;
4515  NewOp4Val = 31 - Op3Val;
4516  } else {
4517  NewOp3Val = (64 - Op3Val) & 0x3f;
4518  NewOp4Val = 63 - Op3Val;
4519  }
4520 
4521  const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4522  const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4523 
4524  Operands[0] = AArch64Operand::CreateToken(
4525  "ubfm", false, Op.getStartLoc(), getContext());
4526  Operands.push_back(AArch64Operand::CreateImm(
4527  NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4528  Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4529  Op3.getEndLoc(), getContext());
4530  }
4531  }
4532  } else if (NumOperands == 4 && Tok == "bfc") {
4533  // FIXME: Horrible hack to handle BFC->BFM alias.
4534  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4535  AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4536  AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4537 
4538  if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4539  const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4540  const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4541 
4542  if (LSBCE && WidthCE) {
4543  uint64_t LSB = LSBCE->getValue();
4544  uint64_t Width = WidthCE->getValue();
4545 
4546  uint64_t RegWidth = 0;
4547  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4548  Op1.getReg()))
4549  RegWidth = 64;
4550  else
4551  RegWidth = 32;
4552 
4553  if (LSB >= RegWidth)
4554  return Error(LSBOp.getStartLoc(),
4555  "expected integer in range [0, 31]");
4556  if (Width < 1 || Width > RegWidth)
4557  return Error(WidthOp.getStartLoc(),
4558  "expected integer in range [1, 32]");
4559 
4560  uint64_t ImmR = 0;
4561  if (RegWidth == 32)
4562  ImmR = (32 - LSB) & 0x1f;
4563  else
4564  ImmR = (64 - LSB) & 0x3f;
4565 
4566  uint64_t ImmS = Width - 1;
4567 
4568  if (ImmR != 0 && ImmS >= ImmR)
4569  return Error(WidthOp.getStartLoc(),
4570  "requested insert overflows register");
4571 
4572  const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4573  const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4574  Operands[0] = AArch64Operand::CreateToken(
4575  "bfm", false, Op.getStartLoc(), getContext());
4576  Operands[2] = AArch64Operand::CreateReg(
4577  RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4578  SMLoc(), SMLoc(), getContext());
4579  Operands[3] = AArch64Operand::CreateImm(
4580  ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4581  Operands.emplace_back(
4582  AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4583  WidthOp.getEndLoc(), getContext()));
4584  }
4585  }
4586  } else if (NumOperands == 5) {
4587  // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4588  // UBFIZ -> UBFM aliases.
4589  if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4590  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4591  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4592  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4593 
4594  if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4595  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4596  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4597 
4598  if (Op3CE && Op4CE) {
4599  uint64_t Op3Val = Op3CE->getValue();
4600  uint64_t Op4Val = Op4CE->getValue();
4601 
4602  uint64_t RegWidth = 0;
4603  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4604  Op1.getReg()))
4605  RegWidth = 64;
4606  else
4607  RegWidth = 32;
4608 
4609  if (Op3Val >= RegWidth)
4610  return Error(Op3.getStartLoc(),
4611  "expected integer in range [0, 31]");
4612  if (Op4Val < 1 || Op4Val > RegWidth)
4613  return Error(Op4.getStartLoc(),
4614  "expected integer in range [1, 32]");
4615 
4616  uint64_t NewOp3Val = 0;
4617  if (RegWidth == 32)
4618  NewOp3Val = (32 - Op3Val) & 0x1f;
4619  else
4620  NewOp3Val = (64 - Op3Val) & 0x3f;
4621 
4622  uint64_t NewOp4Val = Op4Val - 1;
4623 
4624  if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4625  return Error(Op4.getStartLoc(),
4626  "requested insert overflows register");
4627 
4628  const MCExpr *NewOp3 =
4629  MCConstantExpr::create(NewOp3Val, getContext());
4630  const MCExpr *NewOp4 =
4631  MCConstantExpr::create(NewOp4Val, getContext());
4632  Operands[3] = AArch64Operand::CreateImm(
4633  NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4634  Operands[4] = AArch64Operand::CreateImm(
4635  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4636  if (Tok == "bfi")
4637  Operands[0] = AArch64Operand::CreateToken(
4638  "bfm", false, Op.getStartLoc(), getContext());
4639  else if (Tok == "sbfiz")
4640  Operands[0] = AArch64Operand::CreateToken(
4641  "sbfm", false, Op.getStartLoc(), getContext());
4642  else if (Tok == "ubfiz")
4643  Operands[0] = AArch64Operand::CreateToken(
4644  "ubfm", false, Op.getStartLoc(), getContext());
4645  else
4646  llvm_unreachable("No valid mnemonic for alias?");
4647  }
4648  }
4649 
4650  // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4651  // UBFX -> UBFM aliases.
4652  } else if (NumOperands == 5 &&
4653  (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4654  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4655  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4656  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4657 
4658  if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4659  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4660  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4661 
4662  if (Op3CE && Op4CE) {
4663  uint64_t Op3Val = Op3CE->getValue();
4664  uint64_t Op4Val = Op4CE->getValue();
4665 
4666  uint64_t RegWidth = 0;
4667  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4668  Op1.getReg()))
4669  RegWidth = 64;
4670  else
4671  RegWidth = 32;
4672 
4673  if (Op3Val >= RegWidth)
4674  return Error(Op3.getStartLoc(),
4675  "expected integer in range [0, 31]");
4676  if (Op4Val < 1 || Op4Val > RegWidth)
4677  return Error(Op4.getStartLoc(),
4678  "expected integer in range [1, 32]");
4679 
4680  uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4681 
4682  if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4683  return Error(Op4.getStartLoc(),
4684  "requested extract overflows register");
4685 
4686  const MCExpr *NewOp4 =
4687  MCConstantExpr::create(NewOp4Val, getContext());
4688  Operands[4] = AArch64Operand::CreateImm(
4689  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4690  if (Tok == "bfxil")
4691  Operands[0] = AArch64Operand::CreateToken(
4692  "bfm", false, Op.getStartLoc(), getContext());
4693  else if (Tok == "sbfx")
4694  Operands[0] = AArch64Operand::CreateToken(
4695  "sbfm", false, Op.getStartLoc(), getContext());
4696  else if (Tok == "ubfx")
4697  Operands[0] = AArch64Operand::CreateToken(
4698  "ubfm", false, Op.getStartLoc(), getContext());
4699  else
4700  llvm_unreachable("No valid mnemonic for alias?");
4701  }
4702  }
4703  }
4704  }
4705 
4706  // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4707  // instruction for FP registers correctly in some rare circumstances. Convert
4708  // it to a safe instruction and warn (because silently changing someone's
4709  // assembly is rude).
4710  if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4711  NumOperands == 4 && Tok == "movi") {
4712  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4713  AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4714  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4715  if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4716  (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4717  StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4718  if (Suffix.lower() == ".2d" &&
4719  cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4720  Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4721  " correctly on this CPU, converting to equivalent movi.16b");
4722  // Switch the suffix to .16b.
4723  unsigned Idx = Op1.isToken() ? 1 : 2;
4724  Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4725  getContext());
4726  }
4727  }
4728  }
4729 
4730  // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4731  // InstAlias can't quite handle this since the reg classes aren't
4732  // subclasses.
4733  if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4734  // The source register can be Wn here, but the matcher expects a
4735  // GPR64. Twiddle it here if necessary.
4736  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4737  if (Op.isScalarReg()) {
4738  unsigned Reg = getXRegFromWReg(Op.getReg());
4739  Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4740  Op.getStartLoc(), Op.getEndLoc(),
4741  getContext());
4742  }
4743  }
4744  // FIXME: Likewise for sxt[bh] with a Xd dst operand
4745  else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4746  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4747  if (Op.isScalarReg() &&
4748  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4749  Op.getReg())) {
4750  // The source register can be Wn here, but the matcher expects a
4751  // GPR64. Twiddle it here if necessary.
4752  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4753  if (Op.isScalarReg()) {
4754  unsigned Reg = getXRegFromWReg(Op.getReg());
4755  Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4756  Op.getStartLoc(),
4757  Op.getEndLoc(), getContext());
4758  }
4759  }
4760  }
4761  // FIXME: Likewise for uxt[bh] with a Xd dst operand
4762  else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4763  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4764  if (Op.isScalarReg() &&
4765  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4766  Op.getReg())) {
4767  // The source register can be Wn here, but the matcher expects a
4768  // GPR32. Twiddle it here if necessary.
4769  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4770  if (Op.isScalarReg()) {
4771  unsigned Reg = getWRegFromXReg(Op.getReg());
4772  Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4773  Op.getStartLoc(),
4774  Op.getEndLoc(), getContext());
4775  }
4776  }
4777  }
4778 
4779  MCInst Inst;
4780  // First try to match against the secondary set of tables containing the
4781  // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4782  unsigned MatchResult =
4783  MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4784 
4785  // If that fails, try against the alternate table containing long-form NEON:
4786  // "fadd v0.2s, v1.2s, v2.2s"
4787  if (MatchResult != Match_Success) {
4788  // But first, save the short-form match result: we can use it in case the
4789  // long-form match also fails.
4790  auto ShortFormNEONErrorInfo = ErrorInfo;
4791  auto ShortFormNEONMatchResult = MatchResult;
4792 
4793  MatchResult =
4794  MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4795 
4796  // Now, both matches failed, and the long-form match failed on the mnemonic
4797  // suffix token operand. The short-form match failure is probably more
4798  // relevant: use it instead.
4799  if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4800  Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4801  ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4802  MatchResult = ShortFormNEONMatchResult;
4803  ErrorInfo = ShortFormNEONErrorInfo;
4804  }
4805  }
4806 
4807  switch (MatchResult) {
4808  case Match_Success: {
4809  // Perform range checking and other semantic validations
4810  SmallVector<SMLoc, 8> OperandLocs;
4811  NumOperands = Operands.size();
4812  for (unsigned i = 1; i < NumOperands; ++i)
4813  OperandLocs.push_back(Operands[i]->getStartLoc());
4814  if (validateInstruction(Inst, IDLoc, OperandLocs))
4815  return true;
4816 
4817  Inst.setLoc(IDLoc);
4818  Out.EmitInstruction(Inst, getSTI());
4819  return false;
4820  }
4821  case Match_MissingFeature: {
4822  assert(ErrorInfo && "Unknown missing feature!");
4823  // Special case the error message for the very common case where only
4824  // a single subtarget feature is missing (neon, e.g.).
4825  std::string Msg = "instruction requires:";
4826  uint64_t Mask = 1;
4827  for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4828  if (ErrorInfo & Mask) {
4829  Msg += " ";
4830  Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4831  }
4832  Mask <<= 1;
4833  }
4834  return Error(IDLoc, Msg);
4835  }
4836  case Match_MnemonicFail:
4837  return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
4838  case Match_InvalidOperand: {
4839  SMLoc ErrorLoc = IDLoc;
4840 
4841  if (ErrorInfo != ~0ULL) {
4842  if (ErrorInfo >= Operands.size())
4843  return Error(IDLoc, "too few operands for instruction",
4844  SMRange(IDLoc, getTok().getLoc()));
4845 
4846  ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4847  if (ErrorLoc == SMLoc())
4848  ErrorLoc = IDLoc;
4849  }
4850  // If the match failed on a suffix token operand, tweak the diagnostic
4851  // accordingly.
4852  if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4853  ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4854  MatchResult = Match_InvalidSuffix;
4855 
4856  return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4857  }
4858  case Match_InvalidTiedOperand:
4859  case Match_InvalidMemoryIndexed1:
4860  case Match_InvalidMemoryIndexed2:
4861  case Match_InvalidMemoryIndexed4:
4862  case Match_InvalidMemoryIndexed8:
4863  case Match_InvalidMemoryIndexed16:
4864  case Match_InvalidCondCode:
4865  case Match_AddSubRegExtendSmall:
4866  case Match_AddSubRegExtendLarge:
4867  case Match_AddSubSecondSource:
4868  case Match_LogicalSecondSource:
4869  case Match_AddSubRegShift32:
4870  case Match_AddSubRegShift64:
4871  case Match_InvalidMovImm32Shift:
4872  case Match_InvalidMovImm64Shift:
4873  case Match_InvalidFPImm:
4874  case Match_InvalidMemoryWExtend8:
4875  case Match_InvalidMemoryWExtend16:
4876  case Match_InvalidMemoryWExtend32:
4877  case Match_InvalidMemoryWExtend64:
4878  case Match_InvalidMemoryWExtend128:
4879  case Match_InvalidMemoryXExtend8:
4880  case Match_InvalidMemoryXExtend16:
4881  case Match_InvalidMemoryXExtend32:
4882  case Match_InvalidMemoryXExtend64:
4883  case Match_InvalidMemoryXExtend128:
4884  case Match_InvalidMemoryIndexed1SImm4:
4885  case Match_InvalidMemoryIndexed2SImm4:
4886  case Match_InvalidMemoryIndexed3SImm4:
4887  case Match_InvalidMemoryIndexed4SImm4:
4888  case Match_InvalidMemoryIndexed1SImm6:
4889  case Match_InvalidMemoryIndexed16SImm4:
4890  case Match_InvalidMemoryIndexed4SImm7:
4891  case Match_InvalidMemoryIndexed8SImm7:
4892  case Match_InvalidMemoryIndexed16SImm7:
4893  case Match_InvalidMemoryIndexed8UImm5:
4894  case Match_InvalidMemoryIndexed4UImm5:
4895  case Match_InvalidMemoryIndexed2UImm5:
4896  case Match_InvalidMemoryIndexed1UImm6:
4897  case Match_InvalidMemoryIndexed2UImm6:
4898  case Match_InvalidMemoryIndexed4UImm6:
4899  case Match_InvalidMemoryIndexed8UImm6:
4900  case Match_InvalidMemoryIndexed16UImm6:
4901  case Match_InvalidMemoryIndexedSImm6:
4902  case Match_InvalidMemoryIndexedSImm5:
4903  case Match_InvalidMemoryIndexedSImm8:
4904  case Match_InvalidMemoryIndexedSImm9:
4905  case Match_InvalidMemoryIndexed16SImm9:
4906  case Match_InvalidMemoryIndexed8SImm10:
4907  case Match_InvalidImm0_1:
4908  case Match_InvalidImm0_7:
4909  case Match_InvalidImm0_15:
4910  case Match_InvalidImm0_31:
4911  case Match_InvalidImm0_63:
4912  case Match_InvalidImm0_127:
4913  case Match_InvalidImm0_255:
4914  case Match_InvalidImm0_65535:
4915  case Match_InvalidImm1_8:
4916  case Match_InvalidImm1_16:
4917  case Match_InvalidImm1_32:
4918  case Match_InvalidImm1_64:
4919  case Match_InvalidSVEAddSubImm8:
4920  case Match_InvalidSVEAddSubImm16:
4921  case Match_InvalidSVEAddSubImm32:
4922  case Match_InvalidSVEAddSubImm64:
4923  case Match_InvalidSVECpyImm8:
4924  case Match_InvalidSVECpyImm16:
4925  case Match_InvalidSVECpyImm32:
4926  case Match_InvalidSVECpyImm64:
4927  case Match_InvalidIndexRange1_1:
4928  case Match_InvalidIndexRange0_15:
4929  case Match_InvalidIndexRange0_7:
4930  case Match_InvalidIndexRange0_3:
4931  case Match_InvalidIndexRange0_1:
4932  case Match_InvalidSVEIndexRange0_63:
4933  case Match_InvalidSVEIndexRange0_31:
4934  case Match_InvalidSVEIndexRange0_15:
4935  case Match_InvalidSVEIndexRange0_7:
4936  case Match_InvalidSVEIndexRange0_3:
4937  case Match_InvalidLabel:
4938  case Match_InvalidComplexRotationEven:
4939  case Match_InvalidComplexRotationOdd:
4940  case Match_InvalidGPR64shifted8:
4941  case Match_InvalidGPR64shifted16:
4942  case Match_InvalidGPR64shifted32:
4943  case Match_InvalidGPR64shifted64:
4944  case Match_InvalidGPR64NoXZRshifted8:
4945  case Match_InvalidGPR64NoXZRshifted16:
4946  case Match_InvalidGPR64NoXZRshifted32:
4947  case Match_InvalidGPR64NoXZRshifted64:
4948  case Match_InvalidZPR32UXTW8:
4949  case Match_InvalidZPR32UXTW16:
4950  case Match_InvalidZPR32UXTW32:
4951  case Match_InvalidZPR32UXTW64:
4952  case Match_InvalidZPR32SXTW8:
4953  case Match_InvalidZPR32SXTW16:
4954  case Match_InvalidZPR32SXTW32:
4955  case Match_InvalidZPR32SXTW64:
4956  case Match_InvalidZPR64UXTW8:
4957  case Match_InvalidZPR64SXTW8:
4958  case Match_InvalidZPR64UXTW16:
4959  case Match_InvalidZPR64SXTW16:
4960  case Match_InvalidZPR64UXTW32:
4961  case Match_InvalidZPR64SXTW32:
4962  case Match_InvalidZPR64UXTW64:
4963  case Match_InvalidZPR64SXTW64:
4964  case Match_InvalidZPR32LSL8:
4965  case Match_InvalidZPR32LSL16:
4966  case Match_InvalidZPR32LSL32:
4967  case Match_InvalidZPR32LSL64:
4968  case Match_InvalidZPR64LSL8:
4969  case Match_InvalidZPR64LSL16:
4970  case Match_InvalidZPR64LSL32:
4971  case Match_InvalidZPR64LSL64:
4972  case Match_InvalidZPR0:
4973  case Match_InvalidZPR8:
4974  case Match_InvalidZPR16:
4975  case Match_InvalidZPR32:
4976  case Match_InvalidZPR64:
4977  case Match_InvalidZPR128:
4978  case Match_InvalidZPR_3b8:
4979  case Match_InvalidZPR_3b16:
4980  case Match_InvalidZPR_3b32:
4981  case Match_InvalidZPR_4b16:
4982  case Match_InvalidZPR_4b32:
4983  case Match_InvalidZPR_4b64:
4984  case Match_InvalidSVEPredicateAnyReg:
4985  case Match_InvalidSVEPattern:
4986  case Match_InvalidSVEPredicateBReg:
4987  case Match_InvalidSVEPredicateHReg:
4988  case Match_InvalidSVEPredicateSReg:
4989  case Match_InvalidSVEPredicateDReg:
4990  case Match_InvalidSVEPredicate3bAnyReg:
4991  case Match_InvalidSVEPredicate3bBReg:
4992  case Match_InvalidSVEPredicate3bHReg:
4993  case Match_InvalidSVEPredicate3bSReg:
4994  case Match_InvalidSVEPredicate3bDReg:
4995  case Match_InvalidSVEExactFPImmOperandHalfOne:
4996  case Match_InvalidSVEExactFPImmOperandHalfTwo:
4997  case Match_InvalidSVEExactFPImmOperandZeroOne:
4998  case Match_MSR:
4999  case Match_MRS: {
5000  if (ErrorInfo >= Operands.size())
5001  return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
5002  // Any time we get here, there's nothing fancy to do. Just get the
5003  // operand SMLoc and display the diagnostic.
5004  SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5005  if (ErrorLoc == SMLoc())
5006  ErrorLoc = IDLoc;
5007  return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5008  }
5009  }
5010 
5011  llvm_unreachable("Implement any new match types added!");
5012 }
5013 
5014 /// ParseDirective parses the arm specific directives
5015 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5017  getContext().getObjectFileInfo()->getObjectFileType();
5018  bool IsMachO = Format == MCObjectFileInfo::IsMachO;
5019 
5020  StringRef IDVal = DirectiveID.getIdentifier();
5021  SMLoc Loc = DirectiveID.getLoc();
5022  if (IDVal == ".arch")
5023  parseDirectiveArch(Loc);
5024  else if (IDVal == ".cpu")
5025  parseDirectiveCPU(Loc);
5026  else if (IDVal == ".tlsdesccall")
5027  parseDirectiveTLSDescCall(Loc);
5028  else if (IDVal == ".ltorg" || IDVal == ".pool")
5029  parseDirectiveLtorg(Loc);
5030  else if (IDVal == ".unreq")
5031  parseDirectiveUnreq(Loc);
5032  else if (IDVal == ".inst")
5033  parseDirectiveInst(Loc);
5034  else if (IDVal == ".cfi_negate_ra_state")
5035  parseDirectiveCFINegateRAState();
5036  else if (IDVal == ".cfi_b_key_frame")
5037  parseDirectiveCFIBKeyFrame();
5038  else if (IDVal == ".arch_extension")
5039  parseDirectiveArchExtension(Loc);
5040  else if (IsMachO) {
5041  if (IDVal == MCLOHDirectiveName())
5042  parseDirectiveLOH(IDVal, Loc);
5043  else
5044  return true;
5045  } else
5046  return true;
5047  return false;
5048 }
5049 
5051  SmallVector<StringRef, 4> &RequestedExtensions) {
5052  const bool NoCrypto =
5053  (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5054  "nocrypto") != std::end(RequestedExtensions));
5055  const bool Crypto =
5056  (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5057  "crypto") != std::end(RequestedExtensions));
5058 
5059  if (!NoCrypto && Crypto) {
5060  switch (ArchKind) {
5061  default:
5062  // Map 'generic' (and others) to sha2 and aes, because
5063  // that was the traditional meaning of crypto.
5064  case AArch64::ArchKind::ARMV8_1A:
5065  case AArch64::ArchKind::ARMV8_2A:
5066  case AArch64::ArchKind::ARMV8_3A:
5067  RequestedExtensions.push_back("sha2");
5068  RequestedExtensions.push_back("aes");
5069  break;
5070  case AArch64::ArchKind::ARMV8_4A:
5071  case AArch64::ArchKind::ARMV8_5A:
5072  RequestedExtensions.push_back("sm4");
5073  RequestedExtensions.push_back("sha3");
5074  RequestedExtensions.push_back("sha2");
5075  RequestedExtensions.push_back("aes");
5076  break;
5077  }
5078  } else if (NoCrypto) {
5079  switch (ArchKind) {
5080  default:
5081  // Map 'generic' (and others) to sha2 and aes, because
5082  // that was the traditional meaning of crypto.
5083  case AArch64::ArchKind::ARMV8_1A:
5084  case AArch64::ArchKind::ARMV8_2A:
5085  case AArch64::ArchKind::ARMV8_3A:
5086  RequestedExtensions.push_back("nosha2");
5087  RequestedExtensions.push_back("noaes");
5088  break;
5089  case AArch64::ArchKind::ARMV8_4A:
5090  case AArch64::ArchKind::ARMV8_5A:
5091  RequestedExtensions.push_back("nosm4");
5092  RequestedExtensions.push_back("nosha3");
5093  RequestedExtensions.push_back("nosha2");
5094  RequestedExtensions.push_back("noaes");
5095  break;
5096  }
5097  }
5098 }
5099 
5100 /// parseDirectiveArch
5101 /// ::= .arch token
5102 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
5103  SMLoc ArchLoc = getLoc();
5104 
5105  StringRef Arch, ExtensionString;
5106  std::tie(Arch, ExtensionString) =
5107  getParser().parseStringToEndOfStatement().trim().split('+');
5108 
5110  if (ID == AArch64::ArchKind::INVALID)
5111  return Error(ArchLoc, "unknown arch name");
5112 
5113  if (parseToken(AsmToken::EndOfStatement))
5114  return true;
5115 
5116  // Get the architecture and extension features.
5117  std::vector<StringRef> AArch64Features;
5118  AArch64::getArchFeatures(ID, AArch64Features);
5120  AArch64Features);
5121 
5122  MCSubtargetInfo &STI = copySTI();
5123  std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
5124  STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
5125 
5126  SmallVector<StringRef, 4> RequestedExtensions;
5127  if (!ExtensionString.empty())
5128  ExtensionString.split(RequestedExtensions, '+');
5129 
5130  ExpandCryptoAEK(ID, RequestedExtensions);
5131 
5133  for (auto Name : RequestedExtensions) {
5134  bool EnableFeature = true;
5135 
5136  if (Name.startswith_lower("no")) {
5137  EnableFeature = false;
5138  Name = Name.substr(2);
5139  }
5140 
5141  for (const auto &Extension : ExtensionMap) {
5142  if (Extension.Name != Name)
5143  continue;
5144 
5145  if (Extension.Features.none())
5146  report_fatal_error("unsupported architectural extension: " + Name);
5147 
5148  FeatureBitset ToggleFeatures = EnableFeature
5149  ? (~Features & Extension.Features)
5150  : ( Features & Extension.Features);
5151  uint64_t Features =
5152  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5153  setAvailableFeatures(Features);
5154  break;
5155  }
5156  }
5157  return false;
5158 }
5159 
5160 /// parseDirectiveArchExtension
5161 /// ::= .arch_extension [no]feature
5162 bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
5163  MCAsmParser &Parser = getParser();
5164 
5165  if (getLexer().isNot(AsmToken::Identifier))
5166  return Error(getLexer().getLoc(), "expected architecture extension name");
5167 
5168  const AsmToken &Tok = Parser.getTok();
5169  StringRef Name = Tok.getString();
5170  SMLoc ExtLoc = Tok.getLoc();
5171  Lex();
5172 
5173  if (parseToken(AsmToken::EndOfStatement,
5174  "unexpected token in '.arch_extension' directive"))
5175  return true;
5176 
5177  bool EnableFeature = true;
5178  if (Name.startswith_lower("no")) {
5179  EnableFeature = false;
5180  Name = Name.substr(2);
5181  }
5182 
5183  MCSubtargetInfo &STI = copySTI();
5185  for (const auto &Extension : ExtensionMap) {
5186  if (Extension.Name != Name)
5187  continue;
5188 
5189  if (Extension.Features.none())
5190  return Error(ExtLoc, "unsupported architectural extension: " + Name);
5191 
5192  FeatureBitset ToggleFeatures = EnableFeature
5193  ? (~Features & Extension.Features)
5194  : (Features & Extension.Features);
5195  uint64_t Features =
5196  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5197  setAvailableFeatures(Features);
5198  return false;
5199  }
5200 
5201  return Error(ExtLoc, "unknown architectural extension: " + Name);
5202 }
5203 
5204 static SMLoc incrementLoc(SMLoc L, int Offset) {
5205  return SMLoc::getFromPointer(L.getPointer() + Offset);
5206 }
5207 
5208 /// parseDirectiveCPU
5209 /// ::= .cpu id
5210 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
5211  SMLoc CurLoc = getLoc();
5212 
5213  StringRef CPU, ExtensionString;
5214  std::tie(CPU, ExtensionString) =
5215  getParser().parseStringToEndOfStatement().trim().split('+');
5216 
5217  if (parseToken(AsmToken::EndOfStatement))
5218  return true;
5219 
5220  SmallVector<StringRef, 4> RequestedExtensions;
5221  if (!ExtensionString.empty())
5222  ExtensionString.split(RequestedExtensions, '+');
5223 
5224  // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5225  // once that is tablegen'ed
5226  if (!getSTI().isCPUStringValid(CPU)) {
5227  Error(CurLoc, "unknown CPU name");
5228  return false;
5229  }
5230 
5231  MCSubtargetInfo &STI = copySTI();
5232  STI.setDefaultFeatures(CPU, "");
5233  CurLoc = incrementLoc(CurLoc, CPU.size());
5234 
5235  ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
5236 
5238  for (auto Name : RequestedExtensions) {
5239  // Advance source location past '+'.
5240  CurLoc = incrementLoc(CurLoc, 1);
5241 
5242  bool EnableFeature = true;
5243 
5244  if (Name.startswith_lower("no")) {
5245  EnableFeature = false;
5246  Name = Name.substr(2);
5247  }
5248 
5249  bool FoundExtension = false;
5250  for (const auto &Extension : ExtensionMap) {
5251  if (Extension.Name != Name)
5252  continue;
5253 
5254  if (Extension.Features.none())
5255  report_fatal_error("unsupported architectural extension: " + Name);
5256 
5257  FeatureBitset ToggleFeatures = EnableFeature
5258  ? (~Features & Extension.Features)
5259  : ( Features & Extension.Features);
5260  uint64_t Features =
5261  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5262  setAvailableFeatures(Features);
5263  FoundExtension = true;
5264 
5265  break;
5266  }
5267 
5268  if (!FoundExtension)
5269  Error(CurLoc, "unsupported architectural extension");
5270 
5271  CurLoc = incrementLoc(CurLoc, Name.size());
5272  }
5273  return false;
5274 }
5275 
5276 /// parseDirectiveInst
5277 /// ::= .inst opcode [, ...]
5278 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
5279  if (getLexer().is(AsmToken::EndOfStatement))
5280  return Error(Loc, "expected expression following '.inst' directive");
5281 
5282  auto parseOp = [&]() -> bool {
5283  SMLoc L = getLoc();
5284  const MCExpr *Expr;
5285  if (check(getParser().parseExpression(Expr), L, "expected expression"))
5286  return true;
5287  const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5288  if (check(!Value, L, "expected constant expression"))
5289  return true;
5290  getTargetStreamer().emitInst(Value->getValue());
5291  return false;
5292  };
5293 
5294  if (parseMany(parseOp))
5295  return addErrorSuffix(" in '.inst' directive");
5296  return false;
5297 }
5298 
5299 // parseDirectiveTLSDescCall:
5300 // ::= .tlsdesccall symbol
5301 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
5302  StringRef Name;
5303  if (check(getParser().parseIdentifier(Name), L,
5304  "expected symbol after directive") ||
5305  parseToken(AsmToken::EndOfStatement))
5306  return true;
5307 
5308  MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5309  const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
5310  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
5311 
5312  MCInst Inst;
5313  Inst.setOpcode(AArch64::TLSDESCCALL);
5314  Inst.addOperand(MCOperand::createExpr(Expr));
5315 
5316  getParser().getStreamer().EmitInstruction(Inst, getSTI());
5317  return false;
5318 }
5319 
5320 /// ::= .loh <lohName | lohId> label1, ..., labelN
5321 /// The number of arguments depends on the loh identifier.
5322 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
5323  MCLOHType Kind;
5324  if (getParser().getTok().isNot(AsmToken::Identifier)) {
5325  if (getParser().getTok().isNot(AsmToken::Integer))
5326  return TokError("expected an identifier or a number in directive");
5327  // We successfully get a numeric value for the identifier.
5328  // Check if it is valid.
5329  int64_t Id = getParser().getTok().getIntVal();
5330  if (Id <= -1U && !isValidMCLOHType(Id))
5331  return TokError("invalid numeric identifier in directive");
5332  Kind = (MCLOHType)Id;
5333  } else {
5334  StringRef Name = getTok().getIdentifier();
5335  // We successfully parse an identifier.
5336  // Check if it is a recognized one.
5337  int Id = MCLOHNameToId(Name);
5338 
5339  if (Id == -1)
5340  return TokError("invalid identifier in directive");
5341  Kind = (MCLOHType)Id;
5342  }
5343  // Consume the identifier.
5344  Lex();
5345  // Get the number of arguments of this LOH.
5346  int NbArgs = MCLOHIdToNbArgs(Kind);
5347 
5348  assert(NbArgs != -1 && "Invalid number of arguments");
5349 
5351  for (int Idx = 0; Idx < NbArgs; ++Idx) {
5352  StringRef Name;
5353  if (getParser().parseIdentifier(Name))
5354  return TokError("expected identifier in directive");
5355  Args.push_back(getContext().getOrCreateSymbol(Name));
5356 
5357  if (Idx + 1 == NbArgs)
5358  break;
5359  if (parseToken(AsmToken::Comma,
5360  "unexpected token in '" + Twine(IDVal) + "' directive"))
5361  return true;
5362  }
5363  if (parseToken(AsmToken::EndOfStatement,
5364  "unexpected token in '" + Twine(IDVal) + "' directive"))
5365  return true;
5366 
5367  getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
5368  return false;
5369 }
5370 
5371 /// parseDirectiveLtorg
5372 /// ::= .ltorg | .pool
5373 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5374  if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5375  return true;
5376  getTargetStreamer().emitCurrentConstantPool();
5377  return false;
5378 }
5379 
5380 /// parseDirectiveReq
5381 /// ::= name .req registername
5382 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5383  MCAsmParser &Parser = getParser();
5384  Parser.Lex(); // Eat the '.req' token.
5385  SMLoc SRegLoc = getLoc();
5387  unsigned RegNum;
5388  OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5389 
5390  if (ParseRes != MatchOperand_Success) {
5391  StringRef Kind;
5392  RegisterKind = RegKind::NeonVector;
5393  ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5394 
5395  if (ParseRes == MatchOperand_ParseFail)
5396  return true;
5397 
5398  if (ParseRes == MatchOperand_Success && !Kind.empty())
5399  return Error(SRegLoc, "vector register without type specifier expected");
5400  }
5401 
5402  if (ParseRes != MatchOperand_Success) {
5403  StringRef Kind;
5404  RegisterKind = RegKind::SVEDataVector;
5405  ParseRes =
5406  tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5407 
5408  if (ParseRes == MatchOperand_ParseFail)
5409  return true;
5410 
5411  if (ParseRes == MatchOperand_Success && !Kind.empty())
5412  return Error(SRegLoc,
5413  "sve vector register without type specifier expected");
5414  }
5415 
5416  if (ParseRes != MatchOperand_Success) {
5417  StringRef Kind;
5418  RegisterKind = RegKind::SVEPredicateVector;
5419  ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
5420 
5421  if (ParseRes ==