LLVM  13.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
15 #include "AArch64InstrInfo.h"
16 #include "Utils/AArch64BaseInfo.h"
17 #include "llvm/ADT/APFloat.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/StringExtras.h"
23 #include "llvm/ADT/StringMap.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/StringSwitch.h"
26 #include "llvm/ADT/Twine.h"
27 #include "llvm/MC/MCContext.h"
28 #include "llvm/MC/MCExpr.h"
29 #include "llvm/MC/MCInst.h"
37 #include "llvm/MC/MCRegisterInfo.h"
38 #include "llvm/MC/MCStreamer.h"
40 #include "llvm/MC/MCSymbol.h"
43 #include "llvm/MC/MCValue.h"
44 #include "llvm/Support/Casting.h"
45 #include "llvm/Support/Compiler.h"
48 #include "llvm/Support/SMLoc.h"
52 #include <cassert>
53 #include <cctype>
54 #include <cstdint>
55 #include <cstdio>
56 #include <string>
57 #include <tuple>
58 #include <utility>
59 #include <vector>
60 
61 using namespace llvm;
62 
63 namespace {
64 
65 enum class RegKind {
66  Scalar,
67  NeonVector,
68  SVEDataVector,
69  SVEPredicateVector
70 };
71 
72 enum RegConstraintEqualityTy {
73  EqualsReg,
74  EqualsSuperReg,
75  EqualsSubReg
76 };
77 
78 class AArch64AsmParser : public MCTargetAsmParser {
79 private:
80  StringRef Mnemonic; ///< Instruction mnemonic.
81 
82  // Map of register aliases registers via the .req directive.
84 
85  class PrefixInfo {
86  public:
87  static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
88  PrefixInfo Prefix;
89  switch (Inst.getOpcode()) {
90  case AArch64::MOVPRFX_ZZ:
91  Prefix.Active = true;
92  Prefix.Dst = Inst.getOperand(0).getReg();
93  break;
94  case AArch64::MOVPRFX_ZPmZ_B:
95  case AArch64::MOVPRFX_ZPmZ_H:
96  case AArch64::MOVPRFX_ZPmZ_S:
97  case AArch64::MOVPRFX_ZPmZ_D:
98  Prefix.Active = true;
99  Prefix.Predicated = true;
100  Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
101  assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
102  "No destructive element size set for movprfx");
103  Prefix.Dst = Inst.getOperand(0).getReg();
104  Prefix.Pg = Inst.getOperand(2).getReg();
105  break;
106  case AArch64::MOVPRFX_ZPzZ_B:
107  case AArch64::MOVPRFX_ZPzZ_H:
108  case AArch64::MOVPRFX_ZPzZ_S:
109  case AArch64::MOVPRFX_ZPzZ_D:
110  Prefix.Active = true;
111  Prefix.Predicated = true;
112  Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
113  assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
114  "No destructive element size set for movprfx");
115  Prefix.Dst = Inst.getOperand(0).getReg();
116  Prefix.Pg = Inst.getOperand(1).getReg();
117  break;
118  default:
119  break;
120  }
121 
122  return Prefix;
123  }
124 
125  PrefixInfo() : Active(false), Predicated(false) {}
126  bool isActive() const { return Active; }
127  bool isPredicated() const { return Predicated; }
128  unsigned getElementSize() const {
129  assert(Predicated);
130  return ElementSize;
131  }
132  unsigned getDstReg() const { return Dst; }
133  unsigned getPgReg() const {
134  assert(Predicated);
135  return Pg;
136  }
137 
138  private:
139  bool Active;
140  bool Predicated;
141  unsigned ElementSize;
142  unsigned Dst;
143  unsigned Pg;
144  } NextPrefix;
145 
146  AArch64TargetStreamer &getTargetStreamer() {
147  MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
148  return static_cast<AArch64TargetStreamer &>(TS);
149  }
150 
151  SMLoc getLoc() const { return getParser().getTok().getLoc(); }
152 
153  bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
154  void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
155  AArch64CC::CondCode parseCondCodeString(StringRef Cond);
156  bool parseCondCode(OperandVector &Operands, bool invertCondCode);
157  unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
158  bool parseRegister(OperandVector &Operands);
159  bool parseSymbolicImmVal(const MCExpr *&ImmVal);
160  bool parseNeonVectorList(OperandVector &Operands);
161  bool parseOptionalMulOperand(OperandVector &Operands);
162  bool parseKeywordOperand(OperandVector &Operands);
163  bool parseOperand(OperandVector &Operands, bool isCondCode,
164  bool invertCondCode);
165  bool parseImmExpr(int64_t &Out);
166  bool parseComma();
167  bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
168  unsigned Last);
169 
170  bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
172 
173  bool parseDirectiveArch(SMLoc L);
174  bool parseDirectiveArchExtension(SMLoc L);
175  bool parseDirectiveCPU(SMLoc L);
176  bool parseDirectiveInst(SMLoc L);
177 
178  bool parseDirectiveTLSDescCall(SMLoc L);
179 
180  bool parseDirectiveLOH(StringRef LOH, SMLoc L);
181  bool parseDirectiveLtorg(SMLoc L);
182 
183  bool parseDirectiveReq(StringRef Name, SMLoc L);
184  bool parseDirectiveUnreq(SMLoc L);
185  bool parseDirectiveCFINegateRAState();
186  bool parseDirectiveCFIBKeyFrame();
187 
188  bool parseDirectiveVariantPCS(SMLoc L);
189 
190  bool parseDirectiveSEHAllocStack(SMLoc L);
191  bool parseDirectiveSEHPrologEnd(SMLoc L);
192  bool parseDirectiveSEHSaveR19R20X(SMLoc L);
193  bool parseDirectiveSEHSaveFPLR(SMLoc L);
194  bool parseDirectiveSEHSaveFPLRX(SMLoc L);
195  bool parseDirectiveSEHSaveReg(SMLoc L);
196  bool parseDirectiveSEHSaveRegX(SMLoc L);
197  bool parseDirectiveSEHSaveRegP(SMLoc L);
198  bool parseDirectiveSEHSaveRegPX(SMLoc L);
199  bool parseDirectiveSEHSaveLRPair(SMLoc L);
200  bool parseDirectiveSEHSaveFReg(SMLoc L);
201  bool parseDirectiveSEHSaveFRegX(SMLoc L);
202  bool parseDirectiveSEHSaveFRegP(SMLoc L);
203  bool parseDirectiveSEHSaveFRegPX(SMLoc L);
204  bool parseDirectiveSEHSetFP(SMLoc L);
205  bool parseDirectiveSEHAddFP(SMLoc L);
206  bool parseDirectiveSEHNop(SMLoc L);
207  bool parseDirectiveSEHSaveNext(SMLoc L);
208  bool parseDirectiveSEHEpilogStart(SMLoc L);
209  bool parseDirectiveSEHEpilogEnd(SMLoc L);
210  bool parseDirectiveSEHTrapFrame(SMLoc L);
211  bool parseDirectiveSEHMachineFrame(SMLoc L);
212  bool parseDirectiveSEHContext(SMLoc L);
213  bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
214 
215  bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
217  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
219  uint64_t &ErrorInfo,
220  bool MatchingInlineAsm) override;
221 /// @name Auto-generated Match Functions
222 /// {
223 
224 #define GET_ASSEMBLER_HEADER
225 #include "AArch64GenAsmMatcher.inc"
226 
227  /// }
228 
229  OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
230  OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
231  RegKind MatchKind);
232  OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
233  OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
234  OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands);
235  OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
237  OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
238  template <bool IsSVEPrefetch = false>
239  OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
240  OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
241  OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
242  OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
243  OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
244  template<bool AddFPZeroAsLiteral>
246  OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
247  OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
248  bool tryParseNeonVectorRegister(OperandVector &Operands);
249  OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
250  OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
251  template <bool ParseShiftExtend,
252  RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
253  OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
254  template <bool ParseShiftExtend, bool ParseSuffix>
255  OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
256  OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
257  template <RegKind VectorKind>
258  OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
259  bool ExpectMatch = false);
260  OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
261  OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands);
262 
263 public:
264  enum AArch64MatchResultTy {
265  Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
266 #define GET_OPERAND_DIAGNOSTIC_TYPES
267 #include "AArch64GenAsmMatcher.inc"
268  };
269  bool IsILP32;
270 
271  AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
272  const MCInstrInfo &MII, const MCTargetOptions &Options)
273  : MCTargetAsmParser(Options, STI, MII) {
274  IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
276  MCStreamer &S = getParser().getStreamer();
277  if (S.getTargetStreamer() == nullptr)
279 
280  // Alias .hword/.word/.[dx]word to the target-independent
281  // .2byte/.4byte/.8byte directives as they have the same form and
282  // semantics:
283  /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
284  Parser.addAliasForDirective(".hword", ".2byte");
285  Parser.addAliasForDirective(".word", ".4byte");
286  Parser.addAliasForDirective(".dword", ".8byte");
287  Parser.addAliasForDirective(".xword", ".8byte");
288 
289  // Initialize the set of available features.
290  setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
291  }
292 
293  bool regsEqual(const MCParsedAsmOperand &Op1,
294  const MCParsedAsmOperand &Op2) const override;
295  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
296  SMLoc NameLoc, OperandVector &Operands) override;
297  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
298  OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
299  SMLoc &EndLoc) override;
300  bool ParseDirective(AsmToken DirectiveID) override;
301  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
302  unsigned Kind) override;
303 
304  static bool classifySymbolRef(const MCExpr *Expr,
305  AArch64MCExpr::VariantKind &ELFRefKind,
306  MCSymbolRefExpr::VariantKind &DarwinRefKind,
307  int64_t &Addend);
308 };
309 
310 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
311 /// instruction.
312 class AArch64Operand : public MCParsedAsmOperand {
313 private:
314  enum KindTy {
315  k_Immediate,
316  k_ShiftedImm,
317  k_CondCode,
318  k_Register,
319  k_VectorList,
320  k_VectorIndex,
321  k_Token,
322  k_SysReg,
323  k_SysCR,
324  k_Prefetch,
325  k_ShiftExtend,
326  k_FPImm,
327  k_Barrier,
328  k_PSBHint,
329  k_BTIHint,
330  } Kind;
331 
332  SMLoc StartLoc, EndLoc;
333 
334  struct TokOp {
335  const char *Data;
336  unsigned Length;
337  bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
338  };
339 
340  // Separate shift/extend operand.
341  struct ShiftExtendOp {
343  unsigned Amount;
344  bool HasExplicitAmount;
345  };
346 
347  struct RegOp {
348  unsigned RegNum;
349  RegKind Kind;
350  int ElementWidth;
351 
352  // The register may be allowed as a different register class,
353  // e.g. for GPR64as32 or GPR32as64.
354  RegConstraintEqualityTy EqualityTy;
355 
356  // In some cases the shift/extend needs to be explicitly parsed together
357  // with the register, rather than as a separate operand. This is needed
358  // for addressing modes where the instruction as a whole dictates the
359  // scaling/extend, rather than specific bits in the instruction.
360  // By parsing them as a single operand, we avoid the need to pass an
361  // extra operand in all CodeGen patterns (because all operands need to
362  // have an associated value), and we avoid the need to update TableGen to
363  // accept operands that have no associated bits in the instruction.
364  //
365  // An added benefit of parsing them together is that the assembler
366  // can give a sensible diagnostic if the scaling is not correct.
367  //
368  // The default is 'lsl #0' (HasExplicitAmount = false) if no
369  // ShiftExtend is specified.
370  ShiftExtendOp ShiftExtend;
371  };
372 
373  struct VectorListOp {
374  unsigned RegNum;
375  unsigned Count;
376  unsigned NumElements;
377  unsigned ElementWidth;
378  RegKind RegisterKind;
379  };
380 
381  struct VectorIndexOp {
382  int Val;
383  };
384 
385  struct ImmOp {
386  const MCExpr *Val;
387  };
388 
389  struct ShiftedImmOp {
390  const MCExpr *Val;
391  unsigned ShiftAmount;
392  };
393 
394  struct CondCodeOp {
396  };
397 
398  struct FPImmOp {
399  uint64_t Val; // APFloat value bitcasted to uint64_t.
400  bool IsExact; // describes whether parsed value was exact.
401  };
402 
403  struct BarrierOp {
404  const char *Data;
405  unsigned Length;
406  unsigned Val; // Not the enum since not all values have names.
407  bool HasnXSModifier;
408  };
409 
410  struct SysRegOp {
411  const char *Data;
412  unsigned Length;
413  uint32_t MRSReg;
414  uint32_t MSRReg;
415  uint32_t PStateField;
416  };
417 
418  struct SysCRImmOp {
419  unsigned Val;
420  };
421 
422  struct PrefetchOp {
423  const char *Data;
424  unsigned Length;
425  unsigned Val;
426  };
427 
428  struct PSBHintOp {
429  const char *Data;
430  unsigned Length;
431  unsigned Val;
432  };
433 
434  struct BTIHintOp {
435  const char *Data;
436  unsigned Length;
437  unsigned Val;
438  };
439 
440  union {
441  struct TokOp Tok;
442  struct RegOp Reg;
443  struct VectorListOp VectorList;
444  struct VectorIndexOp VectorIndex;
445  struct ImmOp Imm;
446  struct ShiftedImmOp ShiftedImm;
447  struct CondCodeOp CondCode;
448  struct FPImmOp FPImm;
449  struct BarrierOp Barrier;
450  struct SysRegOp SysReg;
451  struct SysCRImmOp SysCRImm;
452  struct PrefetchOp Prefetch;
453  struct PSBHintOp PSBHint;
454  struct BTIHintOp BTIHint;
455  struct ShiftExtendOp ShiftExtend;
456  };
457 
458  // Keep the MCContext around as the MCExprs may need manipulated during
459  // the add<>Operands() calls.
460  MCContext &Ctx;
461 
462 public:
463  AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
464 
465  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
466  Kind = o.Kind;
467  StartLoc = o.StartLoc;
468  EndLoc = o.EndLoc;
469  switch (Kind) {
470  case k_Token:
471  Tok = o.Tok;
472  break;
473  case k_Immediate:
474  Imm = o.Imm;
475  break;
476  case k_ShiftedImm:
477  ShiftedImm = o.ShiftedImm;
478  break;
479  case k_CondCode:
480  CondCode = o.CondCode;
481  break;
482  case k_FPImm:
483  FPImm = o.FPImm;
484  break;
485  case k_Barrier:
486  Barrier = o.Barrier;
487  break;
488  case k_Register:
489  Reg = o.Reg;
490  break;
491  case k_VectorList:
492  VectorList = o.VectorList;
493  break;
494  case k_VectorIndex:
495  VectorIndex = o.VectorIndex;
496  break;
497  case k_SysReg:
498  SysReg = o.SysReg;
499  break;
500  case k_SysCR:
501  SysCRImm = o.SysCRImm;
502  break;
503  case k_Prefetch:
504  Prefetch = o.Prefetch;
505  break;
506  case k_PSBHint:
507  PSBHint = o.PSBHint;
508  break;
509  case k_BTIHint:
510  BTIHint = o.BTIHint;
511  break;
512  case k_ShiftExtend:
513  ShiftExtend = o.ShiftExtend;
514  break;
515  }
516  }
517 
518  /// getStartLoc - Get the location of the first token of this operand.
519  SMLoc getStartLoc() const override { return StartLoc; }
520  /// getEndLoc - Get the location of the last token of this operand.
521  SMLoc getEndLoc() const override { return EndLoc; }
522 
523  StringRef getToken() const {
524  assert(Kind == k_Token && "Invalid access!");
525  return StringRef(Tok.Data, Tok.Length);
526  }
527 
528  bool isTokenSuffix() const {
529  assert(Kind == k_Token && "Invalid access!");
530  return Tok.IsSuffix;
531  }
532 
533  const MCExpr *getImm() const {
534  assert(Kind == k_Immediate && "Invalid access!");
535  return Imm.Val;
536  }
537 
538  const MCExpr *getShiftedImmVal() const {
539  assert(Kind == k_ShiftedImm && "Invalid access!");
540  return ShiftedImm.Val;
541  }
542 
543  unsigned getShiftedImmShift() const {
544  assert(Kind == k_ShiftedImm && "Invalid access!");
545  return ShiftedImm.ShiftAmount;
546  }
547 
549  assert(Kind == k_CondCode && "Invalid access!");
550  return CondCode.Code;
551  }
552 
553  APFloat getFPImm() const {
554  assert (Kind == k_FPImm && "Invalid access!");
555  return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
556  }
557 
558  bool getFPImmIsExact() const {
559  assert (Kind == k_FPImm && "Invalid access!");
560  return FPImm.IsExact;
561  }
562 
563  unsigned getBarrier() const {
564  assert(Kind == k_Barrier && "Invalid access!");
565  return Barrier.Val;
566  }
567 
568  StringRef getBarrierName() const {
569  assert(Kind == k_Barrier && "Invalid access!");
570  return StringRef(Barrier.Data, Barrier.Length);
571  }
572 
573  bool getBarriernXSModifier() const {
574  assert(Kind == k_Barrier && "Invalid access!");
575  return Barrier.HasnXSModifier;
576  }
577 
578  unsigned getReg() const override {
579  assert(Kind == k_Register && "Invalid access!");
580  return Reg.RegNum;
581  }
582 
583  RegConstraintEqualityTy getRegEqualityTy() const {
584  assert(Kind == k_Register && "Invalid access!");
585  return Reg.EqualityTy;
586  }
587 
588  unsigned getVectorListStart() const {
589  assert(Kind == k_VectorList && "Invalid access!");
590  return VectorList.RegNum;
591  }
592 
593  unsigned getVectorListCount() const {
594  assert(Kind == k_VectorList && "Invalid access!");
595  return VectorList.Count;
596  }
597 
598  int getVectorIndex() const {
599  assert(Kind == k_VectorIndex && "Invalid access!");
600  return VectorIndex.Val;
601  }
602 
603  StringRef getSysReg() const {
604  assert(Kind == k_SysReg && "Invalid access!");
605  return StringRef(SysReg.Data, SysReg.Length);
606  }
607 
608  unsigned getSysCR() const {
609  assert(Kind == k_SysCR && "Invalid access!");
610  return SysCRImm.Val;
611  }
612 
613  unsigned getPrefetch() const {
614  assert(Kind == k_Prefetch && "Invalid access!");
615  return Prefetch.Val;
616  }
617 
618  unsigned getPSBHint() const {
619  assert(Kind == k_PSBHint && "Invalid access!");
620  return PSBHint.Val;
621  }
622 
623  StringRef getPSBHintName() const {
624  assert(Kind == k_PSBHint && "Invalid access!");
625  return StringRef(PSBHint.Data, PSBHint.Length);
626  }
627 
628  unsigned getBTIHint() const {
629  assert(Kind == k_BTIHint && "Invalid access!");
630  return BTIHint.Val;
631  }
632 
633  StringRef getBTIHintName() const {
634  assert(Kind == k_BTIHint && "Invalid access!");
635  return StringRef(BTIHint.Data, BTIHint.Length);
636  }
637 
638  StringRef getPrefetchName() const {
639  assert(Kind == k_Prefetch && "Invalid access!");
640  return StringRef(Prefetch.Data, Prefetch.Length);
641  }
642 
643  AArch64_AM::ShiftExtendType getShiftExtendType() const {
644  if (Kind == k_ShiftExtend)
645  return ShiftExtend.Type;
646  if (Kind == k_Register)
647  return Reg.ShiftExtend.Type;
648  llvm_unreachable("Invalid access!");
649  }
650 
651  unsigned getShiftExtendAmount() const {
652  if (Kind == k_ShiftExtend)
653  return ShiftExtend.Amount;
654  if (Kind == k_Register)
655  return Reg.ShiftExtend.Amount;
656  llvm_unreachable("Invalid access!");
657  }
658 
659  bool hasShiftExtendAmount() const {
660  if (Kind == k_ShiftExtend)
661  return ShiftExtend.HasExplicitAmount;
662  if (Kind == k_Register)
663  return Reg.ShiftExtend.HasExplicitAmount;
664  llvm_unreachable("Invalid access!");
665  }
666 
667  bool isImm() const override { return Kind == k_Immediate; }
668  bool isMem() const override { return false; }
669 
670  bool isUImm6() const {
671  if (!isImm())
672  return false;
673  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
674  if (!MCE)
675  return false;
676  int64_t Val = MCE->getValue();
677  return (Val >= 0 && Val < 64);
678  }
679 
680  template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
681 
682  template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
683  return isImmScaled<Bits, Scale>(true);
684  }
685 
686  template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
687  return isImmScaled<Bits, Scale>(false);
688  }
689 
690  template <int Bits, int Scale>
691  DiagnosticPredicate isImmScaled(bool Signed) const {
692  if (!isImm())
694 
695  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
696  if (!MCE)
698 
699  int64_t MinVal, MaxVal;
700  if (Signed) {
701  int64_t Shift = Bits - 1;
702  MinVal = (int64_t(1) << Shift) * -Scale;
703  MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
704  } else {
705  MinVal = 0;
706  MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
707  }
708 
709  int64_t Val = MCE->getValue();
710  if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
712 
714  }
715 
716  DiagnosticPredicate isSVEPattern() const {
717  if (!isImm())
719  auto *MCE = dyn_cast<MCConstantExpr>(getImm());
720  if (!MCE)
722  int64_t Val = MCE->getValue();
723  if (Val >= 0 && Val < 32)
726  }
727 
728  bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
729  AArch64MCExpr::VariantKind ELFRefKind;
730  MCSymbolRefExpr::VariantKind DarwinRefKind;
731  int64_t Addend;
732  if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
733  Addend)) {
734  // If we don't understand the expression, assume the best and
735  // let the fixup and relocation code deal with it.
736  return true;
737  }
738 
739  if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
740  ELFRefKind == AArch64MCExpr::VK_LO12 ||
741  ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
742  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
743  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
744  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
745  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
746  ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
747  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
748  ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
749  ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
750  ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
751  // Note that we don't range-check the addend. It's adjusted modulo page
752  // size when converted, so there is no "out of range" condition when using
753  // @pageoff.
754  return true;
755  } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
756  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
757  // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
758  return Addend == 0;
759  }
760 
761  return false;
762  }
763 
764  template <int Scale> bool isUImm12Offset() const {
765  if (!isImm())
766  return false;
767 
768  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
769  if (!MCE)
770  return isSymbolicUImm12Offset(getImm());
771 
772  int64_t Val = MCE->getValue();
773  return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
774  }
775 
776  template <int N, int M>
777  bool isImmInRange() const {
778  if (!isImm())
779  return false;
780  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
781  if (!MCE)
782  return false;
783  int64_t Val = MCE->getValue();
784  return (Val >= N && Val <= M);
785  }
786 
787  // NOTE: Also used for isLogicalImmNot as anything that can be represented as
788  // a logical immediate can always be represented when inverted.
789  template <typename T>
790  bool isLogicalImm() const {
791  if (!isImm())
792  return false;
793  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
794  if (!MCE)
795  return false;
796 
797  int64_t Val = MCE->getValue();
798  // Avoid left shift by 64 directly.
799  uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
800  // Allow all-0 or all-1 in top bits to permit bitwise NOT.
801  if ((Val & Upper) && (Val & Upper) != Upper)
802  return false;
803 
804  return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
805  }
806 
807  bool isShiftedImm() const { return Kind == k_ShiftedImm; }
808 
809  /// Returns the immediate value as a pair of (imm, shift) if the immediate is
810  /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
811  /// immediate that can be shifted by 'Shift'.
812  template <unsigned Width>
813  Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
814  if (isShiftedImm() && Width == getShiftedImmShift())
815  if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
816  return std::make_pair(CE->getValue(), Width);
817 
818  if (isImm())
819  if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
820  int64_t Val = CE->getValue();
821  if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
822  return std::make_pair(Val >> Width, Width);
823  else
824  return std::make_pair(Val, 0u);
825  }
826 
827  return {};
828  }
829 
830  bool isAddSubImm() const {
831  if (!isShiftedImm() && !isImm())
832  return false;
833 
834  const MCExpr *Expr;
835 
836  // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
837  if (isShiftedImm()) {
838  unsigned Shift = ShiftedImm.ShiftAmount;
839  Expr = ShiftedImm.Val;
840  if (Shift != 0 && Shift != 12)
841  return false;
842  } else {
843  Expr = getImm();
844  }
845 
846  AArch64MCExpr::VariantKind ELFRefKind;
847  MCSymbolRefExpr::VariantKind DarwinRefKind;
848  int64_t Addend;
849  if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
850  DarwinRefKind, Addend)) {
851  return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
852  || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
853  || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
854  || ELFRefKind == AArch64MCExpr::VK_LO12
855  || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
856  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
857  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
858  || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
859  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
860  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
861  || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
862  || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
863  || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
864  }
865 
866  // If it's a constant, it should be a real immediate in range.
867  if (auto ShiftedVal = getShiftedVal<12>())
868  return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
869 
870  // If it's an expression, we hope for the best and let the fixup/relocation
871  // code deal with it.
872  return true;
873  }
874 
875  bool isAddSubImmNeg() const {
876  if (!isShiftedImm() && !isImm())
877  return false;
878 
879  // Otherwise it should be a real negative immediate in range.
880  if (auto ShiftedVal = getShiftedVal<12>())
881  return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
882 
883  return false;
884  }
885 
886  // Signed value in the range -128 to +127. For element widths of
887  // 16 bits or higher it may also be a signed multiple of 256 in the
888  // range -32768 to +32512.
889  // For element-width of 8 bits a range of -128 to 255 is accepted,
890  // since a copy of a byte can be either signed/unsigned.
891  template <typename T>
893  if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
895 
896  bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
897  std::is_same<int8_t, T>::value;
898  if (auto ShiftedImm = getShiftedVal<8>())
899  if (!(IsByte && ShiftedImm->second) &&
900  AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
901  << ShiftedImm->second))
903 
905  }
906 
907  // Unsigned value in the range 0 to 255. For element widths of
908  // 16 bits or higher it may also be a signed multiple of 256 in the
909  // range 0 to 65280.
910  template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
911  if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
913 
914  bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
915  std::is_same<int8_t, T>::value;
916  if (auto ShiftedImm = getShiftedVal<8>())
917  if (!(IsByte && ShiftedImm->second) &&
918  AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
919  << ShiftedImm->second))
921 
923  }
924 
925  template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
926  if (isLogicalImm<T>() && !isSVECpyImm<T>())
929  }
930 
931  bool isCondCode() const { return Kind == k_CondCode; }
932 
933  bool isSIMDImmType10() const {
934  if (!isImm())
935  return false;
936  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
937  if (!MCE)
938  return false;
940  }
941 
942  template<int N>
943  bool isBranchTarget() const {
944  if (!isImm())
945  return false;
946  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
947  if (!MCE)
948  return true;
949  int64_t Val = MCE->getValue();
950  if (Val & 0x3)
951  return false;
952  assert(N > 0 && "Branch target immediate cannot be 0 bits!");
953  return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
954  }
955 
956  bool
957  isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
958  if (!isImm())
959  return false;
960 
961  AArch64MCExpr::VariantKind ELFRefKind;
962  MCSymbolRefExpr::VariantKind DarwinRefKind;
963  int64_t Addend;
964  if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
965  DarwinRefKind, Addend)) {
966  return false;
967  }
968  if (DarwinRefKind != MCSymbolRefExpr::VK_None)
969  return false;
970 
971  for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
972  if (ELFRefKind == AllowedModifiers[i])
973  return true;
974  }
975 
976  return false;
977  }
978 
979  bool isMovWSymbolG3() const {
980  return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
981  }
982 
983  bool isMovWSymbolG2() const {
984  return isMovWSymbol(
989  }
990 
991  bool isMovWSymbolG1() const {
992  return isMovWSymbol(
998  }
999 
1000  bool isMovWSymbolG0() const {
1001  return isMovWSymbol(
1007  }
1008 
1009  template<int RegWidth, int Shift>
1010  bool isMOVZMovAlias() const {
1011  if (!isImm()) return false;
1012 
1013  const MCExpr *E = getImm();
1014  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1015  uint64_t Value = CE->getValue();
1016 
1017  return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1018  }
1019  // Only supports the case of Shift being 0 if an expression is used as an
1020  // operand
1021  return !Shift && E;
1022  }
1023 
1024  template<int RegWidth, int Shift>
1025  bool isMOVNMovAlias() const {
1026  if (!isImm()) return false;
1027 
1028  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1029  if (!CE) return false;
1030  uint64_t Value = CE->getValue();
1031 
1032  return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1033  }
1034 
1035  bool isFPImm() const {
1036  return Kind == k_FPImm &&
1037  AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1038  }
1039 
1040  bool isBarrier() const {
1041  return Kind == k_Barrier && !getBarriernXSModifier();
1042  }
1043  bool isBarriernXS() const {
1044  return Kind == k_Barrier && getBarriernXSModifier();
1045  }
1046  bool isSysReg() const { return Kind == k_SysReg; }
1047 
1048  bool isMRSSystemRegister() const {
1049  if (!isSysReg()) return false;
1050 
1051  return SysReg.MRSReg != -1U;
1052  }
1053 
1054  bool isMSRSystemRegister() const {
1055  if (!isSysReg()) return false;
1056  return SysReg.MSRReg != -1U;
1057  }
1058 
1059  bool isSystemPStateFieldWithImm0_1() const {
1060  if (!isSysReg()) return false;
1061  return (SysReg.PStateField == AArch64PState::PAN ||
1062  SysReg.PStateField == AArch64PState::DIT ||
1063  SysReg.PStateField == AArch64PState::UAO ||
1064  SysReg.PStateField == AArch64PState::SSBS);
1065  }
1066 
1067  bool isSystemPStateFieldWithImm0_15() const {
1068  if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1069  return SysReg.PStateField != -1U;
1070  }
1071 
1072  bool isReg() const override {
1073  return Kind == k_Register;
1074  }
1075 
1076  bool isScalarReg() const {
1077  return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1078  }
1079 
1080  bool isNeonVectorReg() const {
1081  return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1082  }
1083 
1084  bool isNeonVectorRegLo() const {
1085  return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1086  (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1087  Reg.RegNum) ||
1088  AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1089  Reg.RegNum));
1090  }
1091 
1092  template <unsigned Class> bool isSVEVectorReg() const {
1093  RegKind RK;
1094  switch (Class) {
1095  case AArch64::ZPRRegClassID:
1096  case AArch64::ZPR_3bRegClassID:
1097  case AArch64::ZPR_4bRegClassID:
1098  RK = RegKind::SVEDataVector;
1099  break;
1100  case AArch64::PPRRegClassID:
1101  case AArch64::PPR_3bRegClassID:
1102  RK = RegKind::SVEPredicateVector;
1103  break;
1104  default:
1105  llvm_unreachable("Unsupport register class");
1106  }
1107 
1108  return (Kind == k_Register && Reg.Kind == RK) &&
1109  AArch64MCRegisterClasses[Class].contains(getReg());
1110  }
1111 
1112  template <unsigned Class> bool isFPRasZPR() const {
1113  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1114  AArch64MCRegisterClasses[Class].contains(getReg());
1115  }
1116 
1117  template <int ElementWidth, unsigned Class>
1118  DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1119  if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1121 
1122  if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1124 
1126  }
1127 
1128  template <int ElementWidth, unsigned Class>
1129  DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1130  if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1132 
1133  if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1135 
1137  }
1138 
1139  template <int ElementWidth, unsigned Class,
1140  AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1141  bool ShiftWidthAlwaysSame>
1142  DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1143  auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1144  if (!VectorMatch.isMatch())
1146 
1147  // Give a more specific diagnostic when the user has explicitly typed in
1148  // a shift-amount that does not match what is expected, but for which
1149  // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1150  bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1151  if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1152  ShiftExtendTy == AArch64_AM::SXTW) &&
1153  !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1155 
1156  if (MatchShift && ShiftExtendTy == getShiftExtendType())
1158 
1160  }
1161 
1162  bool isGPR32as64() const {
1163  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1164  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1165  }
1166 
1167  bool isGPR64as32() const {
1168  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1169  AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1170  }
1171 
1172  bool isGPR64x8() const {
1173  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1174  AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1175  Reg.RegNum);
1176  }
1177 
1178  bool isWSeqPair() const {
1179  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1180  AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1181  Reg.RegNum);
1182  }
1183 
1184  bool isXSeqPair() const {
1185  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1186  AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1187  Reg.RegNum);
1188  }
1189 
1190  template<int64_t Angle, int64_t Remainder>
1191  DiagnosticPredicate isComplexRotation() const {
1192  if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1193 
1194  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1195  if (!CE) return DiagnosticPredicateTy::NoMatch;
1196  uint64_t Value = CE->getValue();
1197 
1198  if (Value % Angle == Remainder && Value <= 270)
1201  }
1202 
1203  template <unsigned RegClassID> bool isGPR64() const {
1204  return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1205  AArch64MCRegisterClasses[RegClassID].contains(getReg());
1206  }
1207 
1208  template <unsigned RegClassID, int ExtWidth>
1209  DiagnosticPredicate isGPR64WithShiftExtend() const {
1210  if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1212 
1213  if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1214  getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1217  }
1218 
1219  /// Is this a vector list with the type implicit (presumably attached to the
1220  /// instruction itself)?
1221  template <RegKind VectorKind, unsigned NumRegs>
1222  bool isImplicitlyTypedVectorList() const {
1223  return Kind == k_VectorList && VectorList.Count == NumRegs &&
1224  VectorList.NumElements == 0 &&
1225  VectorList.RegisterKind == VectorKind;
1226  }
1227 
1228  template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1229  unsigned ElementWidth>
1230  bool isTypedVectorList() const {
1231  if (Kind != k_VectorList)
1232  return false;
1233  if (VectorList.Count != NumRegs)
1234  return false;
1235  if (VectorList.RegisterKind != VectorKind)
1236  return false;
1237  if (VectorList.ElementWidth != ElementWidth)
1238  return false;
1239  return VectorList.NumElements == NumElements;
1240  }
1241 
1242  template <int Min, int Max>
1243  DiagnosticPredicate isVectorIndex() const {
1244  if (Kind != k_VectorIndex)
1246  if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1249  }
1250 
1251  bool isToken() const override { return Kind == k_Token; }
1252 
1253  bool isTokenEqual(StringRef Str) const {
1254  return Kind == k_Token && getToken() == Str;
1255  }
1256  bool isSysCR() const { return Kind == k_SysCR; }
1257  bool isPrefetch() const { return Kind == k_Prefetch; }
1258  bool isPSBHint() const { return Kind == k_PSBHint; }
1259  bool isBTIHint() const { return Kind == k_BTIHint; }
1260  bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1261  bool isShifter() const {
1262  if (!isShiftExtend())
1263  return false;
1264 
1265  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1266  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1267  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1268  ST == AArch64_AM::MSL);
1269  }
1270 
1271  template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1272  if (Kind != k_FPImm)
1274 
1275  if (getFPImmIsExact()) {
1276  // Lookup the immediate from table of supported immediates.
1277  auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1278  assert(Desc && "Unknown enum value");
1279 
1280  // Calculate its FP value.
1281  APFloat RealVal(APFloat::IEEEdouble());
1282  auto StatusOrErr =
1283  RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1284  if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1285  llvm_unreachable("FP immediate is not exact");
1286 
1287  if (getFPImm().bitwiseIsEqual(RealVal))
1289  }
1290 
1292  }
1293 
1294  template <unsigned ImmA, unsigned ImmB>
1295  DiagnosticPredicate isExactFPImm() const {
1297  if ((Res = isExactFPImm<ImmA>()))
1299  if ((Res = isExactFPImm<ImmB>()))
1301  return Res;
1302  }
1303 
1304  bool isExtend() const {
1305  if (!isShiftExtend())
1306  return false;
1307 
1308  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1309  return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1310  ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1311  ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1312  ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1313  ET == AArch64_AM::LSL) &&
1314  getShiftExtendAmount() <= 4;
1315  }
1316 
1317  bool isExtend64() const {
1318  if (!isExtend())
1319  return false;
1320  // Make sure the extend expects a 32-bit source register.
1321  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1322  return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1323  ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1324  ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1325  }
1326 
1327  bool isExtendLSL64() const {
1328  if (!isExtend())
1329  return false;
1330  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1331  return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1332  ET == AArch64_AM::LSL) &&
1333  getShiftExtendAmount() <= 4;
1334  }
1335 
1336  template<int Width> bool isMemXExtend() const {
1337  if (!isExtend())
1338  return false;
1339  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1340  return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1341  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1342  getShiftExtendAmount() == 0);
1343  }
1344 
1345  template<int Width> bool isMemWExtend() const {
1346  if (!isExtend())
1347  return false;
1348  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1349  return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1350  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1351  getShiftExtendAmount() == 0);
1352  }
1353 
1354  template <unsigned width>
1355  bool isArithmeticShifter() const {
1356  if (!isShifter())
1357  return false;
1358 
1359  // An arithmetic shifter is LSL, LSR, or ASR.
1360  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1361  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1362  ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1363  }
1364 
1365  template <unsigned width>
1366  bool isLogicalShifter() const {
1367  if (!isShifter())
1368  return false;
1369 
1370  // A logical shifter is LSL, LSR, ASR or ROR.
1371  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1372  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1373  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1374  getShiftExtendAmount() < width;
1375  }
1376 
1377  bool isMovImm32Shifter() const {
1378  if (!isShifter())
1379  return false;
1380 
1381  // A MOVi shifter is LSL of 0, 16, 32, or 48.
1382  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1383  if (ST != AArch64_AM::LSL)
1384  return false;
1385  uint64_t Val = getShiftExtendAmount();
1386  return (Val == 0 || Val == 16);
1387  }
1388 
1389  bool isMovImm64Shifter() const {
1390  if (!isShifter())
1391  return false;
1392 
1393  // A MOVi shifter is LSL of 0 or 16.
1394  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1395  if (ST != AArch64_AM::LSL)
1396  return false;
1397  uint64_t Val = getShiftExtendAmount();
1398  return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1399  }
1400 
1401  bool isLogicalVecShifter() const {
1402  if (!isShifter())
1403  return false;
1404 
1405  // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1406  unsigned Shift = getShiftExtendAmount();
1407  return getShiftExtendType() == AArch64_AM::LSL &&
1408  (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1409  }
1410 
1411  bool isLogicalVecHalfWordShifter() const {
1412  if (!isLogicalVecShifter())
1413  return false;
1414 
1415  // A logical vector shifter is a left shift by 0 or 8.
1416  unsigned Shift = getShiftExtendAmount();
1417  return getShiftExtendType() == AArch64_AM::LSL &&
1418  (Shift == 0 || Shift == 8);
1419  }
1420 
1421  bool isMoveVecShifter() const {
1422  if (!isShiftExtend())
1423  return false;
1424 
1425  // A logical vector shifter is a left shift by 8 or 16.
1426  unsigned Shift = getShiftExtendAmount();
1427  return getShiftExtendType() == AArch64_AM::MSL &&
1428  (Shift == 8 || Shift == 16);
1429  }
1430 
1431  // Fallback unscaled operands are for aliases of LDR/STR that fall back
1432  // to LDUR/STUR when the offset is not legal for the former but is for
1433  // the latter. As such, in addition to checking for being a legal unscaled
1434  // address, also check that it is not a legal scaled address. This avoids
1435  // ambiguity in the matcher.
1436  template<int Width>
1437  bool isSImm9OffsetFB() const {
1438  return isSImm<9>() && !isUImm12Offset<Width / 8>();
1439  }
1440 
1441  bool isAdrpLabel() const {
1442  // Validation was handled during parsing, so we just sanity check that
1443  // something didn't go haywire.
1444  if (!isImm())
1445  return false;
1446 
1447  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1448  int64_t Val = CE->getValue();
1449  int64_t Min = - (4096 * (1LL << (21 - 1)));
1450  int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1451  return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1452  }
1453 
1454  return true;
1455  }
1456 
1457  bool isAdrLabel() const {
1458  // Validation was handled during parsing, so we just sanity check that
1459  // something didn't go haywire.
1460  if (!isImm())
1461  return false;
1462 
1463  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1464  int64_t Val = CE->getValue();
1465  int64_t Min = - (1LL << (21 - 1));
1466  int64_t Max = ((1LL << (21 - 1)) - 1);
1467  return Val >= Min && Val <= Max;
1468  }
1469 
1470  return true;
1471  }
1472 
1473  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1474  // Add as immediates when possible. Null MCExpr = 0.
1475  if (!Expr)
1477  else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1478  Inst.addOperand(MCOperand::createImm(CE->getValue()));
1479  else
1480  Inst.addOperand(MCOperand::createExpr(Expr));
1481  }
1482 
1483  void addRegOperands(MCInst &Inst, unsigned N) const {
1484  assert(N == 1 && "Invalid number of operands!");
1486  }
1487 
1488  void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1489  assert(N == 1 && "Invalid number of operands!");
1490  assert(
1491  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1492 
1493  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1494  uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1495  RI->getEncodingValue(getReg()));
1496 
1498  }
1499 
1500  void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1501  assert(N == 1 && "Invalid number of operands!");
1502  assert(
1503  AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1504 
1505  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1506  uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1507  RI->getEncodingValue(getReg()));
1508 
1510  }
1511 
1512  template <int Width>
1513  void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1514  unsigned Base;
1515  switch (Width) {
1516  case 8: Base = AArch64::B0; break;
1517  case 16: Base = AArch64::H0; break;
1518  case 32: Base = AArch64::S0; break;
1519  case 64: Base = AArch64::D0; break;
1520  case 128: Base = AArch64::Q0; break;
1521  default:
1522  llvm_unreachable("Unsupported width");
1523  }
1524  Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1525  }
1526 
1527  void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1528  assert(N == 1 && "Invalid number of operands!");
1529  assert(
1530  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1531  Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1532  }
1533 
1534  void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1535  assert(N == 1 && "Invalid number of operands!");
1536  assert(
1537  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1539  }
1540 
1541  void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1542  assert(N == 1 && "Invalid number of operands!");
1544  }
1545 
1546  enum VecListIndexType {
1547  VecListIdx_DReg = 0,
1548  VecListIdx_QReg = 1,
1549  VecListIdx_ZReg = 2,
1550  };
1551 
1552  template <VecListIndexType RegTy, unsigned NumRegs>
1553  void addVectorListOperands(MCInst &Inst, unsigned N) const {
1554  assert(N == 1 && "Invalid number of operands!");
1555  static const unsigned FirstRegs[][5] = {
1556  /* DReg */ { AArch64::Q0,
1557  AArch64::D0, AArch64::D0_D1,
1558  AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1559  /* QReg */ { AArch64::Q0,
1560  AArch64::Q0, AArch64::Q0_Q1,
1561  AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1562  /* ZReg */ { AArch64::Z0,
1563  AArch64::Z0, AArch64::Z0_Z1,
1564  AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1565  };
1566 
1567  assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1568  " NumRegs must be <= 4 for ZRegs");
1569 
1570  unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1571  Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1572  FirstRegs[(unsigned)RegTy][0]));
1573  }
1574 
1575  void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1576  assert(N == 1 && "Invalid number of operands!");
1577  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1578  }
1579 
1580  template <unsigned ImmIs0, unsigned ImmIs1>
1581  void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1582  assert(N == 1 && "Invalid number of operands!");
1583  assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1584  Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1585  }
1586 
1587  void addImmOperands(MCInst &Inst, unsigned N) const {
1588  assert(N == 1 && "Invalid number of operands!");
1589  // If this is a pageoff symrefexpr with an addend, adjust the addend
1590  // to be only the page-offset portion. Otherwise, just add the expr
1591  // as-is.
1592  addExpr(Inst, getImm());
1593  }
1594 
1595  template <int Shift>
1596  void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1597  assert(N == 2 && "Invalid number of operands!");
1598  if (auto ShiftedVal = getShiftedVal<Shift>()) {
1599  Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1600  Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1601  } else if (isShiftedImm()) {
1602  addExpr(Inst, getShiftedImmVal());
1603  Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1604  } else {
1605  addExpr(Inst, getImm());
1607  }
1608  }
1609 
1610  template <int Shift>
1611  void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1612  assert(N == 2 && "Invalid number of operands!");
1613  if (auto ShiftedVal = getShiftedVal<Shift>()) {
1614  Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1615  Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1616  } else
1617  llvm_unreachable("Not a shifted negative immediate");
1618  }
1619 
1620  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1621  assert(N == 1 && "Invalid number of operands!");
1623  }
1624 
1625  void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1626  assert(N == 1 && "Invalid number of operands!");
1627  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1628  if (!MCE)
1629  addExpr(Inst, getImm());
1630  else
1631  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1632  }
1633 
1634  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1635  addImmOperands(Inst, N);
1636  }
1637 
1638  template<int Scale>
1639  void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1640  assert(N == 1 && "Invalid number of operands!");
1641  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1642 
1643  if (!MCE) {
1644  Inst.addOperand(MCOperand::createExpr(getImm()));
1645  return;
1646  }
1647  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1648  }
1649 
1650  void addUImm6Operands(MCInst &Inst, unsigned N) const {
1651  assert(N == 1 && "Invalid number of operands!");
1652  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1654  }
1655 
1656  template <int Scale>
1657  void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1658  assert(N == 1 && "Invalid number of operands!");
1659  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1660  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1661  }
1662 
1663  template <typename T>
1664  void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1665  assert(N == 1 && "Invalid number of operands!");
1666  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1667  std::make_unsigned_t<T> Val = MCE->getValue();
1668  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1669  Inst.addOperand(MCOperand::createImm(encoding));
1670  }
1671 
1672  template <typename T>
1673  void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1674  assert(N == 1 && "Invalid number of operands!");
1675  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1676  std::make_unsigned_t<T> Val = ~MCE->getValue();
1677  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1678  Inst.addOperand(MCOperand::createImm(encoding));
1679  }
1680 
1681  void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1682  assert(N == 1 && "Invalid number of operands!");
1683  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1684  uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1685  Inst.addOperand(MCOperand::createImm(encoding));
1686  }
1687 
1688  void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1689  // Branch operands don't encode the low bits, so shift them off
1690  // here. If it's a label, however, just put it on directly as there's
1691  // not enough information now to do anything.
1692  assert(N == 1 && "Invalid number of operands!");
1693  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1694  if (!MCE) {
1695  addExpr(Inst, getImm());
1696  return;
1697  }
1698  assert(MCE && "Invalid constant immediate operand!");
1699  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1700  }
1701 
1702  void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1703  // Branch operands don't encode the low bits, so shift them off
1704  // here. If it's a label, however, just put it on directly as there's
1705  // not enough information now to do anything.
1706  assert(N == 1 && "Invalid number of operands!");
1707  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1708  if (!MCE) {
1709  addExpr(Inst, getImm());
1710  return;
1711  }
1712  assert(MCE && "Invalid constant immediate operand!");
1713  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1714  }
1715 
1716  void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1717  // Branch operands don't encode the low bits, so shift them off
1718  // here. If it's a label, however, just put it on directly as there's
1719  // not enough information now to do anything.
1720  assert(N == 1 && "Invalid number of operands!");
1721  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1722  if (!MCE) {
1723  addExpr(Inst, getImm());
1724  return;
1725  }
1726  assert(MCE && "Invalid constant immediate operand!");
1727  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1728  }
1729 
1730  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1731  assert(N == 1 && "Invalid number of operands!");
1733  AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1734  }
1735 
1736  void addBarrierOperands(MCInst &Inst, unsigned N) const {
1737  assert(N == 1 && "Invalid number of operands!");
1738  Inst.addOperand(MCOperand::createImm(getBarrier()));
1739  }
1740 
1741  void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
1742  assert(N == 1 && "Invalid number of operands!");
1743  Inst.addOperand(MCOperand::createImm(getBarrier()));
1744  }
1745 
1746  void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1747  assert(N == 1 && "Invalid number of operands!");
1748 
1749  Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1750  }
1751 
1752  void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1753  assert(N == 1 && "Invalid number of operands!");
1754 
1755  Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1756  }
1757 
1758  void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1759  assert(N == 1 && "Invalid number of operands!");
1760 
1761  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1762  }
1763 
1764  void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1765  assert(N == 1 && "Invalid number of operands!");
1766 
1767  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1768  }
1769 
1770  void addSysCROperands(MCInst &Inst, unsigned N) const {
1771  assert(N == 1 && "Invalid number of operands!");
1772  Inst.addOperand(MCOperand::createImm(getSysCR()));
1773  }
1774 
1775  void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1776  assert(N == 1 && "Invalid number of operands!");
1777  Inst.addOperand(MCOperand::createImm(getPrefetch()));
1778  }
1779 
1780  void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1781  assert(N == 1 && "Invalid number of operands!");
1782  Inst.addOperand(MCOperand::createImm(getPSBHint()));
1783  }
1784 
1785  void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1786  assert(N == 1 && "Invalid number of operands!");
1787  Inst.addOperand(MCOperand::createImm(getBTIHint()));
1788  }
1789 
1790  void addShifterOperands(MCInst &Inst, unsigned N) const {
1791  assert(N == 1 && "Invalid number of operands!");
1792  unsigned Imm =
1793  AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1794  Inst.addOperand(MCOperand::createImm(Imm));
1795  }
1796 
1797  void addExtendOperands(MCInst &Inst, unsigned N) const {
1798  assert(N == 1 && "Invalid number of operands!");
1799  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1800  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1801  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1802  Inst.addOperand(MCOperand::createImm(Imm));
1803  }
1804 
1805  void addExtend64Operands(MCInst &Inst, unsigned N) const {
1806  assert(N == 1 && "Invalid number of operands!");
1807  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1808  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1809  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1810  Inst.addOperand(MCOperand::createImm(Imm));
1811  }
1812 
1813  void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1814  assert(N == 2 && "Invalid number of operands!");
1815  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1816  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1817  Inst.addOperand(MCOperand::createImm(IsSigned));
1818  Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1819  }
1820 
1821  // For 8-bit load/store instructions with a register offset, both the
1822  // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1823  // they're disambiguated by whether the shift was explicit or implicit rather
1824  // than its size.
1825  void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1826  assert(N == 2 && "Invalid number of operands!");
1827  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1828  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1829  Inst.addOperand(MCOperand::createImm(IsSigned));
1830  Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1831  }
1832 
1833  template<int Shift>
1834  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1835  assert(N == 1 && "Invalid number of operands!");
1836 
1837  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1838  if (CE) {
1839  uint64_t Value = CE->getValue();
1840  Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1841  } else {
1842  addExpr(Inst, getImm());
1843  }
1844  }
1845 
1846  template<int Shift>
1847  void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1848  assert(N == 1 && "Invalid number of operands!");
1849 
1850  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1851  uint64_t Value = CE->getValue();
1852  Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1853  }
1854 
1855  void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1856  assert(N == 1 && "Invalid number of operands!");
1857  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1858  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1859  }
1860 
1861  void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1862  assert(N == 1 && "Invalid number of operands!");
1863  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1864  Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1865  }
1866 
1867  void print(raw_ostream &OS) const override;
1868 
1869  static std::unique_ptr<AArch64Operand>
1870  CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1871  auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1872  Op->Tok.Data = Str.data();
1873  Op->Tok.Length = Str.size();
1874  Op->Tok.IsSuffix = IsSuffix;
1875  Op->StartLoc = S;
1876  Op->EndLoc = S;
1877  return Op;
1878  }
1879 
1880  static std::unique_ptr<AArch64Operand>
1881  CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1882  RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1884  unsigned ShiftAmount = 0,
1885  unsigned HasExplicitAmount = false) {
1886  auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1887  Op->Reg.RegNum = RegNum;
1888  Op->Reg.Kind = Kind;
1889  Op->Reg.ElementWidth = 0;
1890  Op->Reg.EqualityTy = EqTy;
1891  Op->Reg.ShiftExtend.Type = ExtTy;
1892  Op->Reg.ShiftExtend.Amount = ShiftAmount;
1893  Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1894  Op->StartLoc = S;
1895  Op->EndLoc = E;
1896  return Op;
1897  }
1898 
1899  static std::unique_ptr<AArch64Operand>
1900  CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1901  SMLoc S, SMLoc E, MCContext &Ctx,
1903  unsigned ShiftAmount = 0,
1904  unsigned HasExplicitAmount = false) {
1905  assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
1906  Kind == RegKind::SVEPredicateVector) &&
1907  "Invalid vector kind");
1908  auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1909  HasExplicitAmount);
1910  Op->Reg.ElementWidth = ElementWidth;
1911  return Op;
1912  }
1913 
1914  static std::unique_ptr<AArch64Operand>
1915  CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1916  unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1917  MCContext &Ctx) {
1918  auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
1919  Op->VectorList.RegNum = RegNum;
1920  Op->VectorList.Count = Count;
1921  Op->VectorList.NumElements = NumElements;
1922  Op->VectorList.ElementWidth = ElementWidth;
1923  Op->VectorList.RegisterKind = RegisterKind;
1924  Op->StartLoc = S;
1925  Op->EndLoc = E;
1926  return Op;
1927  }
1928 
1929  static std::unique_ptr<AArch64Operand>
1930  CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1931  auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1932  Op->VectorIndex.Val = Idx;
1933  Op->StartLoc = S;
1934  Op->EndLoc = E;
1935  return Op;
1936  }
1937 
1938  static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1939  SMLoc E, MCContext &Ctx) {
1940  auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
1941  Op->Imm.Val = Val;
1942  Op->StartLoc = S;
1943  Op->EndLoc = E;
1944  return Op;
1945  }
1946 
1947  static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1948  unsigned ShiftAmount,
1949  SMLoc S, SMLoc E,
1950  MCContext &Ctx) {
1951  auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1952  Op->ShiftedImm .Val = Val;
1953  Op->ShiftedImm.ShiftAmount = ShiftAmount;
1954  Op->StartLoc = S;
1955  Op->EndLoc = E;
1956  return Op;
1957  }
1958 
1959  static std::unique_ptr<AArch64Operand>
1960  CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1961  auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
1962  Op->CondCode.Code = Code;
1963  Op->StartLoc = S;
1964  Op->EndLoc = E;
1965  return Op;
1966  }
1967 
1968  static std::unique_ptr<AArch64Operand>
1969  CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1970  auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
1971  Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1972  Op->FPImm.IsExact = IsExact;
1973  Op->StartLoc = S;
1974  Op->EndLoc = S;
1975  return Op;
1976  }
1977 
1978  static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1979  StringRef Str,
1980  SMLoc S,
1981  MCContext &Ctx,
1982  bool HasnXSModifier) {
1983  auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
1984  Op->Barrier.Val = Val;
1985  Op->Barrier.Data = Str.data();
1986  Op->Barrier.Length = Str.size();
1987  Op->Barrier.HasnXSModifier = HasnXSModifier;
1988  Op->StartLoc = S;
1989  Op->EndLoc = S;
1990  return Op;
1991  }
1992 
1993  static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1994  uint32_t MRSReg,
1995  uint32_t MSRReg,
1996  uint32_t PStateField,
1997  MCContext &Ctx) {
1998  auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
1999  Op->SysReg.Data = Str.data();
2000  Op->SysReg.Length = Str.size();
2001  Op->SysReg.MRSReg = MRSReg;
2002  Op->SysReg.MSRReg = MSRReg;
2003  Op->SysReg.PStateField = PStateField;
2004  Op->StartLoc = S;
2005  Op->EndLoc = S;
2006  return Op;
2007  }
2008 
2009  static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2010  SMLoc E, MCContext &Ctx) {
2011  auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2012  Op->SysCRImm.Val = Val;
2013  Op->StartLoc = S;
2014  Op->EndLoc = E;
2015  return Op;
2016  }
2017 
2018  static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2019  StringRef Str,
2020  SMLoc S,
2021  MCContext &Ctx) {
2022  auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2023  Op->Prefetch.Val = Val;
2024  Op->Barrier.Data = Str.data();
2025  Op->Barrier.Length = Str.size();
2026  Op->StartLoc = S;
2027  Op->EndLoc = S;
2028  return Op;
2029  }
2030 
2031  static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2032  StringRef Str,
2033  SMLoc S,
2034  MCContext &Ctx) {
2035  auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2036  Op->PSBHint.Val = Val;
2037  Op->PSBHint.Data = Str.data();
2038  Op->PSBHint.Length = Str.size();
2039  Op->StartLoc = S;
2040  Op->EndLoc = S;
2041  return Op;
2042  }
2043 
2044  static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2045  StringRef Str,
2046  SMLoc S,
2047  MCContext &Ctx) {
2048  auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2049  Op->BTIHint.Val = Val << 1 | 32;
2050  Op->BTIHint.Data = Str.data();
2051  Op->BTIHint.Length = Str.size();
2052  Op->StartLoc = S;
2053  Op->EndLoc = S;
2054  return Op;
2055  }
2056 
2057  static std::unique_ptr<AArch64Operand>
2058  CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2059  bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2060  auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2061  Op->ShiftExtend.Type = ShOp;
2062  Op->ShiftExtend.Amount = Val;
2063  Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2064  Op->StartLoc = S;
2065  Op->EndLoc = E;
2066  return Op;
2067  }
2068 };
2069 
2070 } // end anonymous namespace.
2071 
2072 void AArch64Operand::print(raw_ostream &OS) const {
2073  switch (Kind) {
2074  case k_FPImm:
2075  OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2076  if (!getFPImmIsExact())
2077  OS << " (inexact)";
2078  OS << ">";
2079  break;
2080  case k_Barrier: {
2081  StringRef Name = getBarrierName();
2082  if (!Name.empty())
2083  OS << "<barrier " << Name << ">";
2084  else
2085  OS << "<barrier invalid #" << getBarrier() << ">";
2086  break;
2087  }
2088  case k_Immediate:
2089  OS << *getImm();
2090  break;
2091  case k_ShiftedImm: {
2092  unsigned Shift = getShiftedImmShift();
2093  OS << "<shiftedimm ";
2094  OS << *getShiftedImmVal();
2095  OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2096  break;
2097  }
2098  case k_CondCode:
2099  OS << "<condcode " << getCondCode() << ">";
2100  break;
2101  case k_VectorList: {
2102  OS << "<vectorlist ";
2103  unsigned Reg = getVectorListStart();
2104  for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2105  OS << Reg + i << " ";
2106  OS << ">";
2107  break;
2108  }
2109  case k_VectorIndex:
2110  OS << "<vectorindex " << getVectorIndex() << ">";
2111  break;
2112  case k_SysReg:
2113  OS << "<sysreg: " << getSysReg() << '>';
2114  break;
2115  case k_Token:
2116  OS << "'" << getToken() << "'";
2117  break;
2118  case k_SysCR:
2119  OS << "c" << getSysCR();
2120  break;
2121  case k_Prefetch: {
2122  StringRef Name = getPrefetchName();
2123  if (!Name.empty())
2124  OS << "<prfop " << Name << ">";
2125  else
2126  OS << "<prfop invalid #" << getPrefetch() << ">";
2127  break;
2128  }
2129  case k_PSBHint:
2130  OS << getPSBHintName();
2131  break;
2132  case k_BTIHint:
2133  OS << getBTIHintName();
2134  break;
2135  case k_Register:
2136  OS << "<register " << getReg() << ">";
2137  if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2138  break;
2140  case k_ShiftExtend:
2141  OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2142  << getShiftExtendAmount();
2143  if (!hasShiftExtendAmount())
2144  OS << "<imp>";
2145  OS << '>';
2146  break;
2147  }
2148 }
2149 
2150 /// @name Auto-generated Match Functions
2151 /// {
2152 
2153 static unsigned MatchRegisterName(StringRef Name);
2154 
2155 /// }
2156 
2157 static unsigned MatchNeonVectorRegName(StringRef Name) {
2158  return StringSwitch<unsigned>(Name.lower())
2159  .Case("v0", AArch64::Q0)
2160  .Case("v1", AArch64::Q1)
2161  .Case("v2", AArch64::Q2)
2162  .Case("v3", AArch64::Q3)
2163  .Case("v4", AArch64::Q4)
2164  .Case("v5", AArch64::Q5)
2165  .Case("v6", AArch64::Q6)
2166  .Case("v7", AArch64::Q7)
2167  .Case("v8", AArch64::Q8)
2168  .Case("v9", AArch64::Q9)
2169  .Case("v10", AArch64::Q10)
2170  .Case("v11", AArch64::Q11)
2171  .Case("v12", AArch64::Q12)
2172  .Case("v13", AArch64::Q13)
2173  .Case("v14", AArch64::Q14)
2174  .Case("v15", AArch64::Q15)
2175  .Case("v16", AArch64::Q16)
2176  .Case("v17", AArch64::Q17)
2177  .Case("v18", AArch64::Q18)
2178  .Case("v19", AArch64::Q19)
2179  .Case("v20", AArch64::Q20)
2180  .Case("v21", AArch64::Q21)
2181  .Case("v22", AArch64::Q22)
2182  .Case("v23", AArch64::Q23)
2183  .Case("v24", AArch64::Q24)
2184  .Case("v25", AArch64::Q25)
2185  .Case("v26", AArch64::Q26)
2186  .Case("v27", AArch64::Q27)
2187  .Case("v28", AArch64::Q28)
2188  .Case("v29", AArch64::Q29)
2189  .Case("v30", AArch64::Q30)
2190  .Case("v31", AArch64::Q31)
2191  .Default(0);
2192 }
2193 
2194 /// Returns an optional pair of (#elements, element-width) if Suffix
2195 /// is a valid vector kind. Where the number of elements in a vector
2196 /// or the vector width is implicit or explicitly unknown (but still a
2197 /// valid suffix kind), 0 is used.
2198 static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2199  RegKind VectorKind) {
2200  std::pair<int, int> Res = {-1, -1};
2201 
2202  switch (VectorKind) {
2203  case RegKind::NeonVector:
2204  Res =
2205  StringSwitch<std::pair<int, int>>(Suffix.lower())
2206  .Case("", {0, 0})
2207  .Case(".1d", {1, 64})
2208  .Case(".1q", {1, 128})
2209  // '.2h' needed for fp16 scalar pairwise reductions
2210  .Case(".2h", {2, 16})
2211  .Case(".2s", {2, 32})
2212  .Case(".2d", {2, 64})
2213  // '.4b' is another special case for the ARMv8.2a dot product
2214  // operand
2215  .Case(".4b", {4, 8})
2216  .Case(".4h", {4, 16})
2217  .Case(".4s", {4, 32})
2218  .Case(".8b", {8, 8})
2219  .Case(".8h", {8, 16})
2220  .Case(".16b", {16, 8})
2221  // Accept the width neutral ones, too, for verbose syntax. If those
2222  // aren't used in the right places, the token operand won't match so
2223  // all will work out.
2224  .Case(".b", {0, 8})
2225  .Case(".h", {0, 16})
2226  .Case(".s", {0, 32})
2227  .Case(".d", {0, 64})
2228  .Default({-1, -1});
2229  break;
2230  case RegKind::SVEPredicateVector:
2231  case RegKind::SVEDataVector:
2232  Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2233  .Case("", {0, 0})
2234  .Case(".b", {0, 8})
2235  .Case(".h", {0, 16})
2236  .Case(".s", {0, 32})
2237  .Case(".d", {0, 64})
2238  .Case(".q", {0, 128})
2239  .Default({-1, -1});
2240  break;
2241  default:
2242  llvm_unreachable("Unsupported RegKind");
2243  }
2244 
2245  if (Res == std::make_pair(-1, -1))
2246  return Optional<std::pair<int, int>>();
2247 
2248  return Optional<std::pair<int, int>>(Res);
2249 }
2250 
2251 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2252  return parseVectorKind(Suffix, VectorKind).hasValue();
2253 }
2254 
2255 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2256  return StringSwitch<unsigned>(Name.lower())
2257  .Case("z0", AArch64::Z0)
2258  .Case("z1", AArch64::Z1)
2259  .Case("z2", AArch64::Z2)
2260  .Case("z3", AArch64::Z3)
2261  .Case("z4", AArch64::Z4)
2262  .Case("z5", AArch64::Z5)
2263  .Case("z6", AArch64::Z6)
2264  .Case("z7", AArch64::Z7)
2265  .Case("z8", AArch64::Z8)
2266  .Case("z9", AArch64::Z9)
2267  .Case("z10", AArch64::Z10)
2268  .Case("z11", AArch64::Z11)
2269  .Case("z12", AArch64::Z12)
2270  .Case("z13", AArch64::Z13)
2271  .Case("z14", AArch64::Z14)
2272  .Case("z15", AArch64::Z15)
2273  .Case("z16", AArch64::Z16)
2274  .Case("z17", AArch64::Z17)
2275  .Case("z18", AArch64::Z18)
2276  .Case("z19", AArch64::Z19)
2277  .Case("z20", AArch64::Z20)
2278  .Case("z21", AArch64::Z21)
2279  .Case("z22", AArch64::Z22)
2280  .Case("z23", AArch64::Z23)
2281  .Case("z24", AArch64::Z24)
2282  .Case("z25", AArch64::Z25)
2283  .Case("z26", AArch64::Z26)
2284  .Case("z27", AArch64::Z27)
2285  .Case("z28", AArch64::Z28)
2286  .Case("z29", AArch64::Z29)
2287  .Case("z30", AArch64::Z30)
2288  .Case("z31", AArch64::Z31)
2289  .Default(0);
2290 }
2291 
2292 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2293  return StringSwitch<unsigned>(Name.lower())
2294  .Case("p0", AArch64::P0)
2295  .Case("p1", AArch64::P1)
2296  .Case("p2", AArch64::P2)
2297  .Case("p3", AArch64::P3)
2298  .Case("p4", AArch64::P4)
2299  .Case("p5", AArch64::P5)
2300  .Case("p6", AArch64::P6)
2301  .Case("p7", AArch64::P7)
2302  .Case("p8", AArch64::P8)
2303  .Case("p9", AArch64::P9)
2304  .Case("p10", AArch64::P10)
2305  .Case("p11", AArch64::P11)
2306  .Case("p12", AArch64::P12)
2307  .Case("p13", AArch64::P13)
2308  .Case("p14", AArch64::P14)
2309  .Case("p15", AArch64::P15)
2310  .Default(0);
2311 }
2312 
2313 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2314  SMLoc &EndLoc) {
2315  return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
2316 }
2317 
2318 OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2319  SMLoc &StartLoc,
2320  SMLoc &EndLoc) {
2321  StartLoc = getLoc();
2322  auto Res = tryParseScalarRegister(RegNo);
2323  EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2324  return Res;
2325 }
2326 
2327 // Matches a register name or register alias previously defined by '.req'
2328 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2329  RegKind Kind) {
2330  unsigned RegNum = 0;
2331  if ((RegNum = matchSVEDataVectorRegName(Name)))
2332  return Kind == RegKind::SVEDataVector ? RegNum : 0;
2333 
2334  if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2335  return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2336 
2337  if ((RegNum = MatchNeonVectorRegName(Name)))
2338  return Kind == RegKind::NeonVector ? RegNum : 0;
2339 
2340  // The parsed register must be of RegKind Scalar
2341  if ((RegNum = MatchRegisterName(Name)))
2342  return Kind == RegKind::Scalar ? RegNum : 0;
2343 
2344  if (!RegNum) {
2345  // Handle a few common aliases of registers.
2346  if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2347  .Case("fp", AArch64::FP)
2348  .Case("lr", AArch64::LR)
2349  .Case("x31", AArch64::XZR)
2350  .Case("w31", AArch64::WZR)
2351  .Default(0))
2352  return Kind == RegKind::Scalar ? RegNum : 0;
2353 
2354  // Check for aliases registered via .req. Canonicalize to lower case.
2355  // That's more consistent since register names are case insensitive, and
2356  // it's how the original entry was passed in from MC/MCParser/AsmParser.
2357  auto Entry = RegisterReqs.find(Name.lower());
2358  if (Entry == RegisterReqs.end())
2359  return 0;
2360 
2361  // set RegNum if the match is the right kind of register
2362  if (Kind == Entry->getValue().first)
2363  RegNum = Entry->getValue().second;
2364  }
2365  return RegNum;
2366 }
2367 
2368 /// tryParseScalarRegister - Try to parse a register name. The token must be an
2369 /// Identifier when called, and if it is a register name the token is eaten and
2370 /// the register is added to the operand list.
2372 AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2373  MCAsmParser &Parser = getParser();
2374  const AsmToken &Tok = Parser.getTok();
2375  if (Tok.isNot(AsmToken::Identifier))
2376  return MatchOperand_NoMatch;
2377 
2378  std::string lowerCase = Tok.getString().lower();
2379  unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2380  if (Reg == 0)
2381  return MatchOperand_NoMatch;
2382 
2383  RegNum = Reg;
2384  Parser.Lex(); // Eat identifier token.
2385  return MatchOperand_Success;
2386 }
2387 
2388 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2390 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2391  MCAsmParser &Parser = getParser();
2392  SMLoc S = getLoc();
2393 
2394  if (Parser.getTok().isNot(AsmToken::Identifier)) {
2395  Error(S, "Expected cN operand where 0 <= N <= 15");
2396  return MatchOperand_ParseFail;
2397  }
2398 
2399  StringRef Tok = Parser.getTok().getIdentifier();
2400  if (Tok[0] != 'c' && Tok[0] != 'C') {
2401  Error(S, "Expected cN operand where 0 <= N <= 15");
2402  return MatchOperand_ParseFail;
2403  }
2404 
2405  uint32_t CRNum;
2406  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2407  if (BadNum || CRNum > 15) {
2408  Error(S, "Expected cN operand where 0 <= N <= 15");
2409  return MatchOperand_ParseFail;
2410  }
2411 
2412  Parser.Lex(); // Eat identifier token.
2413  Operands.push_back(
2414  AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2415  return MatchOperand_Success;
2416 }
2417 
2418 /// tryParsePrefetch - Try to parse a prefetch operand.
2419 template <bool IsSVEPrefetch>
2421 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2422  MCAsmParser &Parser = getParser();
2423  SMLoc S = getLoc();
2424  const AsmToken &Tok = Parser.getTok();
2425 
2426  auto LookupByName = [](StringRef N) {
2427  if (IsSVEPrefetch) {
2428  if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2429  return Optional<unsigned>(Res->Encoding);
2430  } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2431  return Optional<unsigned>(Res->Encoding);
2432  return Optional<unsigned>();
2433  };
2434 
2435  auto LookupByEncoding = [](unsigned E) {
2436  if (IsSVEPrefetch) {
2437  if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2438  return Optional<StringRef>(Res->Name);
2439  } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2440  return Optional<StringRef>(Res->Name);
2441  return Optional<StringRef>();
2442  };
2443  unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2444 
2445  // Either an identifier for named values or a 5-bit immediate.
2446  // Eat optional hash.
2447  if (parseOptionalToken(AsmToken::Hash) ||
2448  Tok.is(AsmToken::Integer)) {
2449  const MCExpr *ImmVal;
2450  if (getParser().parseExpression(ImmVal))
2451  return MatchOperand_ParseFail;
2452 
2453  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2454  if (!MCE) {
2455  TokError("immediate value expected for prefetch operand");
2456  return MatchOperand_ParseFail;
2457  }
2458  unsigned prfop = MCE->getValue();
2459  if (prfop > MaxVal) {
2460  TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2461  "] expected");
2462  return MatchOperand_ParseFail;
2463  }
2464 
2465  auto PRFM = LookupByEncoding(MCE->getValue());
2466  Operands.push_back(AArch64Operand::CreatePrefetch(
2467  prfop, PRFM.getValueOr(""), S, getContext()));
2468  return MatchOperand_Success;
2469  }
2470 
2471  if (Tok.isNot(AsmToken::Identifier)) {
2472  TokError("prefetch hint expected");
2473  return MatchOperand_ParseFail;
2474  }
2475 
2476  auto PRFM = LookupByName(Tok.getString());
2477  if (!PRFM) {
2478  TokError("prefetch hint expected");
2479  return MatchOperand_ParseFail;
2480  }
2481 
2482  Operands.push_back(AArch64Operand::CreatePrefetch(
2483  *PRFM, Tok.getString(), S, getContext()));
2484  Parser.Lex(); // Eat identifier token.
2485  return MatchOperand_Success;
2486 }
2487 
2488 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2490 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2491  MCAsmParser &Parser = getParser();
2492  SMLoc S = getLoc();
2493  const AsmToken &Tok = Parser.getTok();
2494  if (Tok.isNot(AsmToken::Identifier)) {
2495  TokError("invalid operand for instruction");
2496  return MatchOperand_ParseFail;
2497  }
2498 
2499  auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2500  if (!PSB) {
2501  TokError("invalid operand for instruction");
2502  return MatchOperand_ParseFail;
2503  }
2504 
2505  Operands.push_back(AArch64Operand::CreatePSBHint(
2506  PSB->Encoding, Tok.getString(), S, getContext()));
2507  Parser.Lex(); // Eat identifier token.
2508  return MatchOperand_Success;
2509 }
2510 
2511 /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2513 AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2514  MCAsmParser &Parser = getParser();
2515  SMLoc S = getLoc();
2516  const AsmToken &Tok = Parser.getTok();
2517  if (Tok.isNot(AsmToken::Identifier)) {
2518  TokError("invalid operand for instruction");
2519  return MatchOperand_ParseFail;
2520  }
2521 
2522  auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2523  if (!BTI) {
2524  TokError("invalid operand for instruction");
2525  return MatchOperand_ParseFail;
2526  }
2527 
2528  Operands.push_back(AArch64Operand::CreateBTIHint(
2529  BTI->Encoding, Tok.getString(), S, getContext()));
2530  Parser.Lex(); // Eat identifier token.
2531  return MatchOperand_Success;
2532 }
2533 
2534 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2535 /// instruction.
2537 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2538  MCAsmParser &Parser = getParser();
2539  SMLoc S = getLoc();
2540  const MCExpr *Expr = nullptr;
2541 
2542  if (Parser.getTok().is(AsmToken::Hash)) {
2543  Parser.Lex(); // Eat hash token.
2544  }
2545 
2546  if (parseSymbolicImmVal(Expr))
2547  return MatchOperand_ParseFail;
2548 
2549  AArch64MCExpr::VariantKind ELFRefKind;
2550  MCSymbolRefExpr::VariantKind DarwinRefKind;
2551  int64_t Addend;
2552  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2553  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2554  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2555  // No modifier was specified at all; this is the syntax for an ELF basic
2556  // ADRP relocation (unfortunately).
2557  Expr =
2559  } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2560  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2561  Addend != 0) {
2562  Error(S, "gotpage label reference not allowed an addend");
2563  return MatchOperand_ParseFail;
2564  } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2565  DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2566  DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2567  ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2568  ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2569  ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
2570  ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2571  ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2572  // The operand must be an @page or @gotpage qualified symbolref.
2573  Error(S, "page or gotpage label reference expected");
2574  return MatchOperand_ParseFail;
2575  }
2576  }
2577 
2578  // We have either a label reference possibly with addend or an immediate. The
2579  // addend is a raw value here. The linker will adjust it to only reference the
2580  // page.
2581  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2582  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2583 
2584  return MatchOperand_Success;
2585 }
2586 
2587 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2588 /// instruction.
2590 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2591  SMLoc S = getLoc();
2592  const MCExpr *Expr = nullptr;
2593 
2594  // Leave anything with a bracket to the default for SVE
2595  if (getParser().getTok().is(AsmToken::LBrac))
2596  return MatchOperand_NoMatch;
2597 
2598  if (getParser().getTok().is(AsmToken::Hash))
2599  getParser().Lex(); // Eat hash token.
2600 
2601  if (parseSymbolicImmVal(Expr))
2602  return MatchOperand_ParseFail;
2603 
2604  AArch64MCExpr::VariantKind ELFRefKind;
2605  MCSymbolRefExpr::VariantKind DarwinRefKind;
2606  int64_t Addend;
2607  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2608  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2609  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2610  // No modifier was specified at all; this is the syntax for an ELF basic
2611  // ADR relocation (unfortunately).
2612  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2613  } else {
2614  Error(S, "unexpected adr label");
2615  return MatchOperand_ParseFail;
2616  }
2617  }
2618 
2619  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2620  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2621  return MatchOperand_Success;
2622 }
2623 
2624 /// tryParseFPImm - A floating point immediate expression operand.
2625 template<bool AddFPZeroAsLiteral>
2627 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2628  MCAsmParser &Parser = getParser();
2629  SMLoc S = getLoc();
2630 
2631  bool Hash = parseOptionalToken(AsmToken::Hash);
2632 
2633  // Handle negation, as that still comes through as a separate token.
2634  bool isNegative = parseOptionalToken(AsmToken::Minus);
2635 
2636  const AsmToken &Tok = Parser.getTok();
2637  if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2638  if (!Hash)
2639  return MatchOperand_NoMatch;
2640  TokError("invalid floating point immediate");
2641  return MatchOperand_ParseFail;
2642  }
2643 
2644  // Parse hexadecimal representation.
2645  if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2646  if (Tok.getIntVal() > 255 || isNegative) {
2647  TokError("encoded floating point value out of range");
2648  return MatchOperand_ParseFail;
2649  }
2650 
2651  APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2652  Operands.push_back(
2653  AArch64Operand::CreateFPImm(F, true, S, getContext()));
2654  } else {
2655  // Parse FP representation.
2656  APFloat RealVal(APFloat::IEEEdouble());
2657  auto StatusOrErr =
2658  RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2659  if (errorToBool(StatusOrErr.takeError())) {
2660  TokError("invalid floating point representation");
2661  return MatchOperand_ParseFail;
2662  }
2663 
2664  if (isNegative)
2665  RealVal.changeSign();
2666 
2667  if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2668  Operands.push_back(
2669  AArch64Operand::CreateToken("#0", false, S, getContext()));
2670  Operands.push_back(
2671  AArch64Operand::CreateToken(".0", false, S, getContext()));
2672  } else
2673  Operands.push_back(AArch64Operand::CreateFPImm(
2674  RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
2675  }
2676 
2677  Parser.Lex(); // Eat the token.
2678 
2679  return MatchOperand_Success;
2680 }
2681 
2682 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2683 /// a shift suffix, for example '#1, lsl #12'.
2685 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2686  MCAsmParser &Parser = getParser();
2687  SMLoc S = getLoc();
2688 
2689  if (Parser.getTok().is(AsmToken::Hash))
2690  Parser.Lex(); // Eat '#'
2691  else if (Parser.getTok().isNot(AsmToken::Integer))
2692  // Operand should start from # or should be integer, emit error otherwise.
2693  return MatchOperand_NoMatch;
2694 
2695  const MCExpr *Imm = nullptr;
2696  if (parseSymbolicImmVal(Imm))
2697  return MatchOperand_ParseFail;
2698  else if (Parser.getTok().isNot(AsmToken::Comma)) {
2699  SMLoc E = Parser.getTok().getLoc();
2700  Operands.push_back(
2701  AArch64Operand::CreateImm(Imm, S, E, getContext()));
2702  return MatchOperand_Success;
2703  }
2704 
2705  // Eat ','
2706  Parser.Lex();
2707 
2708  // The optional operand must be "lsl #N" where N is non-negative.
2709  if (!Parser.getTok().is(AsmToken::Identifier) ||
2710  !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2711  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2712  return MatchOperand_ParseFail;
2713  }
2714 
2715  // Eat 'lsl'
2716  Parser.Lex();
2717 
2718  parseOptionalToken(AsmToken::Hash);
2719 
2720  if (Parser.getTok().isNot(AsmToken::Integer)) {
2721  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2722  return MatchOperand_ParseFail;
2723  }
2724 
2725  int64_t ShiftAmount = Parser.getTok().getIntVal();
2726 
2727  if (ShiftAmount < 0) {
2728  Error(Parser.getTok().getLoc(), "positive shift amount required");
2729  return MatchOperand_ParseFail;
2730  }
2731  Parser.Lex(); // Eat the number
2732 
2733  // Just in case the optional lsl #0 is used for immediates other than zero.
2734  if (ShiftAmount == 0 && Imm != nullptr) {
2735  SMLoc E = Parser.getTok().getLoc();
2736  Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2737  return MatchOperand_Success;
2738  }
2739 
2740  SMLoc E = Parser.getTok().getLoc();
2741  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2742  S, E, getContext()));
2743  return MatchOperand_Success;
2744 }
2745 
2746 /// parseCondCodeString - Parse a Condition Code string.
2747 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2749  .Case("eq", AArch64CC::EQ)
2750  .Case("ne", AArch64CC::NE)
2751  .Case("cs", AArch64CC::HS)
2752  .Case("hs", AArch64CC::HS)
2753  .Case("cc", AArch64CC::LO)
2754  .Case("lo", AArch64CC::LO)
2755  .Case("mi", AArch64CC::MI)
2756  .Case("pl", AArch64CC::PL)
2757  .Case("vs", AArch64CC::VS)
2758  .Case("vc", AArch64CC::VC)
2759  .Case("hi", AArch64CC::HI)
2760  .Case("ls", AArch64CC::LS)
2761  .Case("ge", AArch64CC::GE)
2762  .Case("lt", AArch64CC::LT)
2763  .Case("gt", AArch64CC::GT)
2764  .Case("le", AArch64CC::LE)
2765  .Case("al", AArch64CC::AL)
2766  .Case("nv", AArch64CC::NV)
2768 
2769  if (CC == AArch64CC::Invalid &&
2770  getSTI().getFeatureBits()[AArch64::FeatureSVE])
2772  .Case("none", AArch64CC::EQ)
2773  .Case("any", AArch64CC::NE)
2774  .Case("nlast", AArch64CC::HS)
2775  .Case("last", AArch64CC::LO)
2776  .Case("first", AArch64CC::MI)
2777  .Case("nfrst", AArch64CC::PL)
2778  .Case("pmore", AArch64CC::HI)
2779  .Case("plast", AArch64CC::LS)
2780  .Case("tcont", AArch64CC::GE)
2781  .Case("tstop", AArch64CC::LT)
2783 
2784  return CC;
2785 }
2786 
2787 /// parseCondCode - Parse a Condition Code operand.
2788 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2789  bool invertCondCode) {
2790  MCAsmParser &Parser = getParser();
2791  SMLoc S = getLoc();
2792  const AsmToken &Tok = Parser.getTok();
2793  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2794 
2795  StringRef Cond = Tok.getString();
2796  AArch64CC::CondCode CC = parseCondCodeString(Cond);
2797  if (CC == AArch64CC::Invalid)
2798  return TokError("invalid condition code");
2799  Parser.Lex(); // Eat identifier token.
2800 
2801  if (invertCondCode) {
2802  if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2803  return TokError("condition codes AL and NV are invalid for this instruction");
2805  }
2806 
2807  Operands.push_back(
2808  AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2809  return false;
2810 }
2811 
2812 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2813 /// them if present.
2815 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2816  MCAsmParser &Parser = getParser();
2817  const AsmToken &Tok = Parser.getTok();
2818  std::string LowerID = Tok.getString().lower();
2821  .Case("lsl", AArch64_AM::LSL)
2822  .Case("lsr", AArch64_AM::LSR)
2823  .Case("asr", AArch64_AM::ASR)
2824  .Case("ror", AArch64_AM::ROR)
2825  .Case("msl", AArch64_AM::MSL)
2826  .Case("uxtb", AArch64_AM::UXTB)
2827  .Case("uxth", AArch64_AM::UXTH)
2828  .Case("uxtw", AArch64_AM::UXTW)
2829  .Case("uxtx", AArch64_AM::UXTX)
2830  .Case("sxtb", AArch64_AM::SXTB)
2831  .Case("sxth", AArch64_AM::SXTH)
2832  .Case("sxtw", AArch64_AM::SXTW)
2833  .Case("sxtx", AArch64_AM::SXTX)
2835 
2836  if (ShOp == AArch64_AM::InvalidShiftExtend)
2837  return MatchOperand_NoMatch;
2838 
2839  SMLoc S = Tok.getLoc();
2840  Parser.Lex();
2841 
2842  bool Hash = parseOptionalToken(AsmToken::Hash);
2843 
2844  if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2845  if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2846  ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2847  ShOp == AArch64_AM::MSL) {
2848  // We expect a number here.
2849  TokError("expected #imm after shift specifier");
2850  return MatchOperand_ParseFail;
2851  }
2852 
2853  // "extend" type operations don't need an immediate, #0 is implicit.
2854  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2855  Operands.push_back(
2856  AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2857  return MatchOperand_Success;
2858  }
2859 
2860  // Make sure we do actually have a number, identifier or a parenthesized
2861  // expression.
2862  SMLoc E = Parser.getTok().getLoc();
2863  if (!Parser.getTok().is(AsmToken::Integer) &&
2864  !Parser.getTok().is(AsmToken::LParen) &&
2865  !Parser.getTok().is(AsmToken::Identifier)) {
2866  Error(E, "expected integer shift amount");
2867  return MatchOperand_ParseFail;
2868  }
2869 
2870  const MCExpr *ImmVal;
2871  if (getParser().parseExpression(ImmVal))
2872  return MatchOperand_ParseFail;
2873 
2874  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2875  if (!MCE) {
2876  Error(E, "expected constant '#imm' after shift specifier");
2877  return MatchOperand_ParseFail;
2878  }
2879 
2880  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2881  Operands.push_back(AArch64Operand::CreateShiftExtend(
2882  ShOp, MCE->getValue(), true, S, E, getContext()));
2883  return MatchOperand_Success;
2884 }
2885 
2886 static const struct Extension {
2887  const char *Name;
2889 } ExtensionMap[] = {
2890  {"crc", {AArch64::FeatureCRC}},
2891  {"sm4", {AArch64::FeatureSM4}},
2892  {"sha3", {AArch64::FeatureSHA3}},
2893  {"sha2", {AArch64::FeatureSHA2}},
2894  {"aes", {AArch64::FeatureAES}},
2895  {"crypto", {AArch64::FeatureCrypto}},
2896  {"fp", {AArch64::FeatureFPARMv8}},
2897  {"simd", {AArch64::FeatureNEON}},
2898  {"ras", {AArch64::FeatureRAS}},
2899  {"lse", {AArch64::FeatureLSE}},
2900  {"predres", {AArch64::FeaturePredRes}},
2901  {"ccdp", {AArch64::FeatureCacheDeepPersist}},
2902  {"mte", {AArch64::FeatureMTE}},
2903  {"memtag", {AArch64::FeatureMTE}},
2904  {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
2905  {"pan", {AArch64::FeaturePAN}},
2906  {"pan-rwv", {AArch64::FeaturePAN_RWV}},
2907  {"ccpp", {AArch64::FeatureCCPP}},
2908  {"rcpc", {AArch64::FeatureRCPC}},
2909  {"rng", {AArch64::FeatureRandGen}},
2910  {"sve", {AArch64::FeatureSVE}},
2911  {"sve2", {AArch64::FeatureSVE2}},
2912  {"sve2-aes", {AArch64::FeatureSVE2AES}},
2913  {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
2914  {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
2915  {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
2916  {"ls64", {AArch64::FeatureLS64}},
2917  {"xs", {AArch64::FeatureXS}},
2918  {"pauth", {AArch64::FeaturePAuth}},
2919  {"flagm", {AArch64::FeatureFlagM}},
2920  // FIXME: Unsupported extensions
2921  {"lor", {}},
2922  {"rdma", {}},
2923  {"profile", {}},
2924 };
2925 
2926 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2927  if (FBS[AArch64::HasV8_1aOps])
2928  Str += "ARMv8.1a";
2929  else if (FBS[AArch64::HasV8_2aOps])
2930  Str += "ARMv8.2a";
2931  else if (FBS[AArch64::HasV8_3aOps])
2932  Str += "ARMv8.3a";
2933  else if (FBS[AArch64::HasV8_4aOps])
2934  Str += "ARMv8.4a";
2935  else if (FBS[AArch64::HasV8_5aOps])
2936  Str += "ARMv8.5a";
2937  else if (FBS[AArch64::HasV8_6aOps])
2938  Str += "ARMv8.6a";
2939  else if (FBS[AArch64::HasV8_7aOps])
2940  Str += "ARMv8.7a";
2941  else {
2942  SmallVector<std::string, 2> ExtMatches;
2943  for (const auto& Ext : ExtensionMap) {
2944  // Use & in case multiple features are enabled
2945  if ((FBS & Ext.Features) != FeatureBitset())
2946  ExtMatches.push_back(Ext.Name);
2947  }
2948  Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
2949  }
2950 }
2951 
2952 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2953  SMLoc S) {
2954  const uint16_t Op2 = Encoding & 7;
2955  const uint16_t Cm = (Encoding & 0x78) >> 3;
2956  const uint16_t Cn = (Encoding & 0x780) >> 7;
2957  const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2958 
2959  const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2960 
2961  Operands.push_back(
2962  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2963  Operands.push_back(
2964  AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2965  Operands.push_back(
2966  AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2967  Expr = MCConstantExpr::create(Op2, getContext());
2968  Operands.push_back(
2969  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2970 }
2971 
2972 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2973 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2974 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2976  if (Name.find('.') != StringRef::npos)
2977  return TokError("invalid operand");
2978 
2979  Mnemonic = Name;
2980  Operands.push_back(
2981  AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2982 
2983  MCAsmParser &Parser = getParser();
2984  const AsmToken &Tok = Parser.getTok();
2985  StringRef Op = Tok.getString();
2986  SMLoc S = Tok.getLoc();
2987 
2988  if (Mnemonic == "ic") {
2989  const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2990  if (!IC)
2991  return TokError("invalid operand for IC instruction");
2992  else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2993  std::string Str("IC " + std::string(IC->Name) + " requires: ");
2995  return TokError(Str.c_str());
2996  }
2997  createSysAlias(IC->Encoding, Operands, S);
2998  } else if (Mnemonic == "dc") {
2999  const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3000  if (!DC)
3001  return TokError("invalid operand for DC instruction");
3002  else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3003  std::string Str("DC " + std::string(DC->Name) + " requires: ");
3004  setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3005  return TokError(Str.c_str());
3006  }
3007  createSysAlias(DC->Encoding, Operands, S);
3008  } else if (Mnemonic == "at") {
3009  const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3010  if (!AT)
3011  return TokError("invalid operand for AT instruction");
3012  else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3013  std::string Str("AT " + std::string(AT->Name) + " requires: ");
3015  return TokError(Str.c_str());
3016  }
3017  createSysAlias(AT->Encoding, Operands, S);
3018  } else if (Mnemonic == "tlbi") {
3019  const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3020  if (!TLBI)
3021  return TokError("invalid operand for TLBI instruction");
3022  else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3023  std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3025  return TokError(Str.c_str());
3026  }
3027  createSysAlias(TLBI->Encoding, Operands, S);
3028  } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
3029  const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
3030  if (!PRCTX)
3031  return TokError("invalid operand for prediction restriction instruction");
3032  else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
3033  std::string Str(
3034  Mnemonic.upper() + std::string(PRCTX->Name) + " requires: ");
3036  return TokError(Str.c_str());
3037  }
3038  uint16_t PRCTX_Op2 =
3039  Mnemonic == "cfp" ? 4 :
3040  Mnemonic == "dvp" ? 5 :
3041  Mnemonic == "cpp" ? 7 :
3042  0;
3043  assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction");
3044  createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
3045  }
3046 
3047  Parser.Lex(); // Eat operand.
3048 
3049  bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3050  bool HasRegister = false;
3051 
3052  // Check for the optional register operand.
3053  if (parseOptionalToken(AsmToken::Comma)) {
3054  if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3055  return TokError("expected register operand");
3056  HasRegister = true;
3057  }
3058 
3059  if (ExpectRegister && !HasRegister)
3060  return TokError("specified " + Mnemonic + " op requires a register");
3061  else if (!ExpectRegister && HasRegister)
3062  return TokError("specified " + Mnemonic + " op does not use a register");
3063 
3064  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3065  return true;
3066 
3067  return false;
3068 }
3069 
3071 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3072  MCAsmParser &Parser = getParser();
3073  const AsmToken &Tok = Parser.getTok();
3074 
3075  if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3076  TokError("'csync' operand expected");
3077  return MatchOperand_ParseFail;
3078  } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3079  // Immediate operand.
3080  const MCExpr *ImmVal;
3081  SMLoc ExprLoc = getLoc();
3082  AsmToken IntTok = Tok;
3083  if (getParser().parseExpression(ImmVal))
3084  return MatchOperand_ParseFail;
3085  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3086  if (!MCE) {
3087  Error(ExprLoc, "immediate value expected for barrier operand");
3088  return MatchOperand_ParseFail;
3089  }
3090  int64_t Value = MCE->getValue();
3091  if (Mnemonic == "dsb" && Value > 15) {
3092  // This case is a no match here, but it might be matched by the nXS
3093  // variant. Deliberately not unlex the optional '#' as it is not necessary
3094  // to characterize an integer immediate.
3095  Parser.getLexer().UnLex(IntTok);
3096  return MatchOperand_NoMatch;
3097  }
3098  if (Value < 0 || Value > 15) {
3099  Error(ExprLoc, "barrier operand out of range");
3100  return MatchOperand_ParseFail;
3101  }
3102  auto DB = AArch64DB::lookupDBByEncoding(Value);
3103  Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3104  ExprLoc, getContext(),
3105  false /*hasnXSModifier*/));
3106  return MatchOperand_Success;
3107  }
3108 
3109  if (Tok.isNot(AsmToken::Identifier)) {
3110  TokError("invalid operand for instruction");
3111  return MatchOperand_ParseFail;
3112  }
3113 
3114  StringRef Operand = Tok.getString();
3115  auto TSB = AArch64TSB::lookupTSBByName(Operand);
3116  auto DB = AArch64DB::lookupDBByName(Operand);
3117  // The only valid named option for ISB is 'sy'
3118  if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3119  TokError("'sy' or #imm operand expected");
3120  return MatchOperand_ParseFail;
3121  // The only valid named option for TSB is 'csync'
3122  } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3123  TokError("'csync' operand expected");
3124  return MatchOperand_ParseFail;
3125  } else if (!DB && !TSB) {
3126  if (Mnemonic == "dsb") {
3127  // This case is a no match here, but it might be matched by the nXS
3128  // variant.
3129  return MatchOperand_NoMatch;
3130  }
3131  TokError("invalid barrier option name");
3132  return MatchOperand_ParseFail;
3133  }
3134 
3135  Operands.push_back(AArch64Operand::CreateBarrier(
3136  DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3137  getContext(), false /*hasnXSModifier*/));
3138  Parser.Lex(); // Consume the option
3139 
3140  return MatchOperand_Success;
3141 }
3142 
3144 AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3145  MCAsmParser &Parser = getParser();
3146  const AsmToken &Tok = Parser.getTok();
3147 
3148  assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
3149  if (Mnemonic != "dsb")
3150  return MatchOperand_ParseFail;
3151 
3152  if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3153  // Immediate operand.
3154  const MCExpr *ImmVal;
3155  SMLoc ExprLoc = getLoc();
3156  if (getParser().parseExpression(ImmVal))
3157  return MatchOperand_ParseFail;
3158  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3159  if (!MCE) {
3160  Error(ExprLoc, "immediate value expected for barrier operand");
3161  return MatchOperand_ParseFail;
3162  }
3163  int64_t Value = MCE->getValue();
3164  // v8.7-A DSB in the nXS variant accepts only the following immediate
3165  // values: 16, 20, 24, 28.
3166  if (Value != 16 && Value != 20 && Value != 24 && Value != 28) {
3167  Error(ExprLoc, "barrier operand out of range");
3168  return MatchOperand_ParseFail;
3169  }
3170  auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
3171  Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
3172  ExprLoc, getContext(),
3173  true /*hasnXSModifier*/));
3174  return MatchOperand_Success;
3175  }
3176 
3177  if (Tok.isNot(AsmToken::Identifier)) {
3178  TokError("invalid operand for instruction");
3179  return MatchOperand_ParseFail;
3180  }
3181 
3182  StringRef Operand = Tok.getString();
3183  auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
3184 
3185  if (!DB) {
3186  TokError("invalid barrier option name");
3187  return MatchOperand_ParseFail;
3188  }
3189 
3190  Operands.push_back(
3191  AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
3192  getContext(), true /*hasnXSModifier*/));
3193  Parser.Lex(); // Consume the option
3194 
3195  return MatchOperand_Success;
3196 }
3197 
3199 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3200  MCAsmParser &Parser = getParser();
3201  const AsmToken &Tok = Parser.getTok();
3202 
3203  if (Tok.isNot(AsmToken::Identifier))
3204  return MatchOperand_NoMatch;
3205 
3206  int MRSReg, MSRReg;
3207  auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3208  if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3209  MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3210  MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3211  } else
3212  MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3213 
3214  auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3215  unsigned PStateImm = -1;
3216  if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3217  PStateImm = PState->Encoding;
3218 
3219  Operands.push_back(
3220  AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3221  PStateImm, getContext()));
3222  Parser.Lex(); // Eat identifier
3223 
3224  return MatchOperand_Success;
3225 }
3226 
3227 /// tryParseNeonVectorRegister - Parse a vector register operand.
3228 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3229  MCAsmParser &Parser = getParser();
3230  if (Parser.getTok().isNot(AsmToken::Identifier))
3231  return true;
3232 
3233  SMLoc S = getLoc();
3234  // Check for a vector register specifier first.
3235  StringRef Kind;
3236  unsigned Reg;
3237  OperandMatchResultTy Res =
3238  tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3239  if (Res != MatchOperand_Success)
3240  return true;
3241 
3242  const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3243  if (!KindRes)
3244  return true;
3245 
3246  unsigned ElementWidth = KindRes->second;
3247  Operands.push_back(
3248  AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3249  S, getLoc(), getContext()));
3250 
3251  // If there was an explicit qualifier, that goes on as a literal text
3252  // operand.
3253  if (!Kind.empty())
3254  Operands.push_back(
3255  AArch64Operand::CreateToken(Kind, false, S, getContext()));
3256 
3257  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3258 }
3259 
3261 AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3262  SMLoc SIdx = getLoc();
3263  if (parseOptionalToken(AsmToken::LBrac)) {
3264  const MCExpr *ImmVal;
3265  if (getParser().parseExpression(ImmVal))
3266  return MatchOperand_NoMatch;
3267  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3268  if (!MCE) {
3269  TokError("immediate value expected for vector index");
3270  return MatchOperand_ParseFail;;
3271  }
3272 
3273  SMLoc E = getLoc();
3274 
3275  if (parseToken(AsmToken::RBrac, "']' expected"))
3276  return MatchOperand_ParseFail;;
3277 
3278  Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3279  E, getContext()));
3280  return MatchOperand_Success;
3281  }
3282 
3283  return MatchOperand_NoMatch;
3284 }
3285 
3286 // tryParseVectorRegister - Try to parse a vector register name with
3287 // optional kind specifier. If it is a register specifier, eat the token
3288 // and return it.
3290 AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3291  RegKind MatchKind) {
3292  MCAsmParser &Parser = getParser();
3293  const AsmToken &Tok = Parser.getTok();
3294 
3295  if (Tok.isNot(AsmToken::Identifier))
3296  return MatchOperand_NoMatch;
3297 
3298  StringRef Name = Tok.getString();
3299  // If there is a kind specifier, it's separated from the register name by
3300  // a '.'.
3301  size_t Start = 0, Next = Name.find('.');
3302  StringRef Head = Name.slice(Start, Next);
3303  unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3304 
3305  if (RegNum) {
3306  if (Next != StringRef::npos) {
3307  Kind = Name.slice(Next, StringRef::npos);
3308  if (!isValidVectorKind(Kind, MatchKind)) {
3309  TokError("invalid vector kind qualifier");
3310  return MatchOperand_ParseFail;
3311  }
3312  }
3313  Parser.Lex(); // Eat the register token.
3314 
3315  Reg = RegNum;
3316  return MatchOperand_Success;
3317  }
3318 
3319  return MatchOperand_NoMatch;
3320 }
3321 
3322 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3324 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3325  // Check for a SVE predicate register specifier first.
3326  const SMLoc S = getLoc();
3327  StringRef Kind;
3328  unsigned RegNum;
3329  auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3330  if (Res != MatchOperand_Success)
3331  return Res;
3332 
3333  const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3334  if (!KindRes)
3335  return MatchOperand_NoMatch;
3336 
3337  unsigned ElementWidth = KindRes->second;
3338  Operands.push_back(AArch64Operand::CreateVectorReg(
3339  RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3340  getLoc(), getContext()));
3341 
3342  // Not all predicates are followed by a '/m' or '/z'.
3343  MCAsmParser &Parser = getParser();
3344  if (Parser.getTok().isNot(AsmToken::Slash))
3345  return MatchOperand_Success;
3346 
3347  // But when they do they shouldn't have an element type suffix.
3348  if (!Kind.empty()) {
3349  Error(S, "not expecting size suffix");
3350  return MatchOperand_ParseFail;
3351  }
3352 
3353  // Add a literal slash as operand
3354  Operands.push_back(
3355  AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3356 
3357  Parser.Lex(); // Eat the slash.
3358 
3359  // Zeroing or merging?
3360  auto Pred = Parser.getTok().getString().lower();
3361  if (Pred != "z" && Pred != "m") {
3362  Error(getLoc(), "expecting 'm' or 'z' predication");
3363  return MatchOperand_ParseFail;
3364  }
3365 
3366  // Add zero/merge token.
3367  const char *ZM = Pred == "z" ? "z" : "m";
3368  Operands.push_back(
3369  AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3370 
3371  Parser.Lex(); // Eat zero/merge token.
3372  return MatchOperand_Success;
3373 }
3374 
3375 /// parseRegister - Parse a register operand.
3376 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3377  // Try for a Neon vector register.
3378  if (!tryParseNeonVectorRegister(Operands))
3379  return false;
3380 
3381  // Otherwise try for a scalar register.
3382  if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3383  return false;
3384 
3385  return true;
3386 }
3387 
3388 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3389  MCAsmParser &Parser = getParser();
3390  bool HasELFModifier = false;
3392 
3393  if (parseOptionalToken(AsmToken::Colon)) {
3394  HasELFModifier = true;
3395 
3396  if (Parser.getTok().isNot(AsmToken::Identifier))
3397  return TokError("expect relocation specifier in operand after ':'");
3398 
3399  std::string LowerCase = Parser.getTok().getIdentifier().lower();
3400  RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3401  .Case("lo12", AArch64MCExpr::VK_LO12)
3402  .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3403  .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3404  .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3405  .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3406  .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3407  .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3408  .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3409  .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3410  .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3411  .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3412  .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3413  .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3414  .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3415  .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3416  .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3417  .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3418  .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3419  .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3420  .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3421  .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3422  .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3423  .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3424  .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3425  .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3426  .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3427  .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3428  .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3429  .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3430  .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3431  .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3432  .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3433  .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3434  .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3435  .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3436  .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3438  .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
3439  .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3441  .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3442  .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3443  .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3445  .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3446  .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3448 
3449  if (RefKind == AArch64MCExpr::VK_INVALID)
3450  return TokError("expect relocation specifier in operand after ':'");
3451 
3452  Parser.Lex(); // Eat identifier
3453 
3454  if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3455  return true;
3456  }
3457 
3458  if (getParser().parseExpression(ImmVal))
3459  return true;
3460 
3461  if (HasELFModifier)
3462  ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3463 
3464  return false;
3465 }
3466 
3467 template <RegKind VectorKind>
3469 AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3470  bool ExpectMatch) {
3471  MCAsmParser &Parser = getParser();
3472  if (!Parser.getTok().is(AsmToken::LCurly))
3473  return MatchOperand_NoMatch;
3474 
3475  // Wrapper around parse function
3476  auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3477  bool NoMatchIsError) {
3478  auto RegTok = Parser.getTok();
3479  auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3480  if (ParseRes == MatchOperand_Success) {
3481  if (parseVectorKind(Kind, VectorKind))
3482  return ParseRes;
3483  llvm_unreachable("Expected a valid vector kind");
3484  }
3485 
3486  if (RegTok.isNot(AsmToken::Identifier) ||
3487  ParseRes == MatchOperand_ParseFail ||
3488  (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3489  Error(Loc, "vector register expected");
3490  return MatchOperand_ParseFail;
3491  }
3492 
3493  return MatchOperand_NoMatch;
3494  };
3495 
3496  SMLoc S = getLoc();
3497  auto LCurly = Parser.getTok();
3498  Parser.Lex(); // Eat left bracket token.
3499 
3500  StringRef Kind;
3501  unsigned FirstReg;
3502  auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3503 
3504  // Put back the original left bracket if there was no match, so that
3505  // different types of list-operands can be matched (e.g. SVE, Neon).
3506  if (ParseRes == MatchOperand_NoMatch)
3507  Parser.getLexer().UnLex(LCurly);
3508 
3509  if (ParseRes != MatchOperand_Success)
3510  return ParseRes;
3511 
3512  int64_t PrevReg = FirstReg;
3513  unsigned Count = 1;
3514 
3515  if (parseOptionalToken(AsmToken::Minus)) {
3516  SMLoc Loc = getLoc();
3517  StringRef NextKind;
3518 
3519  unsigned Reg;
3520  ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3521  if (ParseRes != MatchOperand_Success)
3522  return ParseRes;
3523 
3524  // Any Kind suffices must match on all regs in the list.
3525  if (Kind != NextKind) {
3526  Error(Loc, "mismatched register size suffix");
3527  return MatchOperand_ParseFail;
3528  }
3529 
3530  unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3531 
3532  if (Space == 0 || Space > 3) {
3533  Error(Loc, "invalid number of vectors");
3534  return MatchOperand_ParseFail;
3535  }
3536 
3537  Count += Space;
3538  }
3539  else {
3540  while (parseOptionalToken(AsmToken::Comma)) {
3541  SMLoc Loc = getLoc();
3542  StringRef NextKind;
3543  unsigned Reg;
3544  ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3545  if (ParseRes != MatchOperand_Success)
3546  return ParseRes;
3547 
3548  // Any Kind suffices must match on all regs in the list.
3549  if (Kind != NextKind) {
3550  Error(Loc, "mismatched register size suffix");
3551  return MatchOperand_ParseFail;
3552  }
3553 
3554  // Registers must be incremental (with wraparound at 31)
3555  if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3556  (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3557  Error(Loc, "registers must be sequential");
3558  return MatchOperand_ParseFail;
3559  }
3560 
3561  PrevReg = Reg;
3562  ++Count;
3563  }
3564  }
3565 
3566  if (parseToken(AsmToken::RCurly, "'}' expected"))
3567  return MatchOperand_ParseFail;
3568 
3569  if (Count > 4) {
3570  Error(S, "invalid number of vectors");
3571  return MatchOperand_ParseFail;
3572  }
3573 
3574  unsigned NumElements = 0;
3575  unsigned ElementWidth = 0;
3576  if (!Kind.empty()) {
3577  if (const auto &VK = parseVectorKind(Kind, VectorKind))
3578  std::tie(NumElements, ElementWidth) = *VK;
3579  }
3580 
3581  Operands.push_back(AArch64Operand::CreateVectorList(
3582  FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3583  getContext()));
3584 
3585  return MatchOperand_Success;
3586 }
3587 
3588 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3589 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3590  auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3591  if (ParseRes != MatchOperand_Success)
3592  return true;
3593 
3594  return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3595 }
3596 
3598 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3599  SMLoc StartLoc = getLoc();
3600 
3601  unsigned RegNum;
3602  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3603  if (Res != MatchOperand_Success)
3604  return Res;
3605 
3606  if (!parseOptionalToken(AsmToken::Comma)) {
3607  Operands.push_back(AArch64Operand::CreateReg(
3608  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3609  return MatchOperand_Success;
3610  }
3611 
3612  parseOptionalToken(AsmToken::Hash);
3613 
3614  if (getParser().getTok().isNot(AsmToken::Integer)) {
3615  Error(getLoc(), "index must be absent or #0");
3616  return MatchOperand_ParseFail;
3617  }
3618 
3619  const MCExpr *ImmVal;
3620  if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3621  cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3622  Error(getLoc(), "index must be absent or #0");
3623  return MatchOperand_ParseFail;
3624  }
3625 
3626  Operands.push_back(AArch64Operand::CreateReg(
3627  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3628  return MatchOperand_Success;
3629 }
3630 
3631 template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3633 AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3634  SMLoc StartLoc = getLoc();
3635 
3636  unsigned RegNum;
3637  OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3638  if (Res != MatchOperand_Success)
3639  return Res;
3640 
3641  // No shift/extend is the default.
3642  if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3643  Operands.push_back(AArch64Operand::CreateReg(
3644  RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3645  return MatchOperand_Success;
3646  }
3647 
3648  // Eat the comma
3649  getParser().Lex();
3650 
3651  // Match the shift
3653  Res = tryParseOptionalShiftExtend(ExtOpnd);
3654  if (Res != MatchOperand_Success)
3655  return Res;
3656 
3657  auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3658  Operands.push_back(AArch64Operand::CreateReg(
3659  RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3660  Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3661  Ext->hasShiftExtendAmount()));
3662 
3663  return MatchOperand_Success;
3664 }
3665 
3666 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3667  MCAsmParser &Parser = getParser();
3668 
3669  // Some SVE instructions have a decoration after the immediate, i.e.
3670  // "mul vl". We parse them here and add tokens, which must be present in the
3671  // asm string in the tablegen instruction.
3672  bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3673  bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3674  if (!Parser.getTok().getString().equals_lower("mul") ||
3675  !(NextIsVL || NextIsHash))
3676  return true;
3677 
3678  Operands.push_back(
3679  AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3680  Parser.Lex(); // Eat the "mul"
3681 
3682  if (NextIsVL) {
3683  Operands.push_back(
3684  AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3685  Parser.Lex(); // Eat the "vl"
3686  return false;
3687  }
3688 
3689  if (NextIsHash) {
3690  Parser.Lex(); // Eat the #
3691  SMLoc S = getLoc();
3692 
3693  // Parse immediate operand.
3694  const MCExpr *ImmVal;
3695  if (!Parser.parseExpression(ImmVal))
3696  if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3697  Operands.push_back(AArch64Operand::CreateImm(
3698  MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3699  getContext()));
3700  return MatchOperand_Success;
3701  }
3702  }
3703 
3704  return Error(getLoc(), "expected 'vl' or '#<imm>'");
3705 }
3706 
3707 bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
3708  MCAsmParser &Parser = getParser();
3709  auto Tok = Parser.getTok();
3710  if (Tok.isNot(AsmToken::Identifier))
3711  return true;
3712  Operands.push_back(AArch64Operand::CreateToken(Tok.getString(), false,
3713  Tok.getLoc(), getContext()));
3714  Parser.Lex();
3715  return false;
3716 }
3717 
3718 /// parseOperand - Parse a arm instruction operand. For now this parses the
3719 /// operand regardless of the mnemonic.
3720 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3721  bool invertCondCode) {
3722  MCAsmParser &Parser = getParser();
3723 
3724  OperandMatchResultTy ResTy =
3725  MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3726 
3727  // Check if the current operand has a custom associated parser, if so, try to
3728  // custom parse the operand, or fallback to the general approach.
3729  if (ResTy == MatchOperand_Success)
3730  return false;
3731  // If there wasn't a custom match, try the generic matcher below. Otherwise,
3732  // there was a match, but an error occurred, in which case, just return that
3733  // the operand parsing failed.
3734  if (ResTy == MatchOperand_ParseFail)
3735  return true;
3736 
3737  // Nothing custom, so do general case parsing.
3738  SMLoc S, E;
3739  switch (getLexer().getKind()) {
3740  default: {
3741  SMLoc S = getLoc();
3742  const MCExpr *Expr;
3743  if (parseSymbolicImmVal(Expr))
3744  return Error(S, "invalid operand");
3745 
3746  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3747  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3748  return false;
3749  }
3750  case AsmToken::LBrac: {
3751  SMLoc Loc = Parser.getTok().getLoc();
3752  Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3753  getContext()));
3754  Parser.Lex(); // Eat '['
3755 
3756  // There's no comma after a '[', so we can parse the next operand
3757  // immediately.
3758  return parseOperand(Operands, false, false);
3759  }
3760  case AsmToken::LCurly:
3761  return parseNeonVectorList(Operands);
3762  case AsmToken::Identifier: {
3763  // If we're expecting a Condition Code operand, then just parse that.
3764  if (isCondCode)
3765  return parseCondCode(Operands, invertCondCode);
3766 
3767  // If it's a register name, parse it.
3768  if (!parseRegister(Operands))
3769  return false;
3770 
3771  // See if this is a "mul vl" decoration or "mul #<int>" operand used
3772  // by SVE instructions.
3773  if (!parseOptionalMulOperand(Operands))
3774  return false;
3775 
3776  // This could be an optional "shift" or "extend" operand.
3777  OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3778  // We can only continue if no tokens were eaten.
3779  if (GotShift != MatchOperand_NoMatch)
3780  return GotShift;
3781 
3782  // If this is a two-word mnemonic, parse its special keyword
3783  // operand as an identifier.
3784  if (Mnemonic == "brb")
3785  return parseKeywordOperand(Operands);
3786 
3787  // This was not a register so parse other operands that start with an
3788  // identifier (like labels) as expressions and create them as immediates.
3789  const MCExpr *IdVal;
3790  S = getLoc();
3791  if (getParser().parseExpression(IdVal))
3792  return true;
3793  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3794  Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3795  return false;
3796  }
3797  case AsmToken::Integer:
3798  case AsmToken::Real:
3799  case AsmToken::Hash: {
3800  // #42 -> immediate.
3801  S = getLoc();
3802 
3803  parseOptionalToken(AsmToken::Hash);
3804 
3805  // Parse a negative sign
3806  bool isNegative = false;
3807  if (Parser.getTok().is(AsmToken::Minus)) {
3808  isNegative = true;
3809  // We need to consume this token only when we have a Real, otherwise
3810  // we let parseSymbolicImmVal take care of it
3811  if (Parser.getLexer().peekTok().is(AsmToken::Real))
3812  Parser.Lex();
3813  }
3814 
3815  // The only Real that should come through here is a literal #0.0 for
3816  // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3817  // so convert the value.
3818  const AsmToken &Tok = Parser.getTok();
3819  if (Tok.is(AsmToken::Real)) {
3820  APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3821  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3822  if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3823  Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3824  Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3825  return TokError("unexpected floating point literal");
3826  else if (IntVal != 0 || isNegative)
3827  return TokError("expected floating-point constant #0.0");
3828  Parser.Lex(); // Eat the token.
3829 
3830  Operands.push_back(
3831  AArch64Operand::CreateToken("#0", false, S, getContext()));
3832  Operands.push_back(
3833  AArch64Operand::CreateToken(".0", false, S, getContext()));
3834  return false;
3835  }
3836 
3837  const MCExpr *ImmVal;
3838  if (parseSymbolicImmVal(ImmVal))
3839  return true;
3840 
3841  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3842  Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3843  return false;
3844  }
3845  case AsmToken::Equal: {
3846  SMLoc Loc = getLoc();
3847  if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3848  return TokError("unexpected token in operand");
3849  Parser.Lex(); // Eat '='
3850  const MCExpr *SubExprVal;
3851  if (getParser().parseExpression(SubExprVal))
3852  return true;
3853 
3854  if (Operands.size() < 2 ||
3855  !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3856  return Error(Loc, "Only valid when first operand is register");
3857 
3858  bool IsXReg =
3859  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3860  Operands[1]->getReg());
3861 
3862  MCContext& Ctx = getContext();
3863  E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3864  // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3865  if (isa<MCConstantExpr>(SubExprVal)) {
3866  uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3867  uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3868  while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3869  ShiftAmt += 16;
3870  Imm >>= 16;
3871  }
3872  if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3873  Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3874  Operands.push_back(AArch64Operand::CreateImm(
3875  MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3876  if (ShiftAmt)
3877  Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3878  ShiftAmt, true, S, E, Ctx));
3879  return false;
3880  }
3881  APInt Simm = APInt(64, Imm << ShiftAmt);
3882  // check if the immediate is an unsigned or signed 32-bit int for W regs
3883  if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3884  return Error(Loc, "Immediate too large for register");
3885  }
3886  // If it is a label or an imm that cannot fit in a movz, put it into CP.
3887  const MCExpr *CPLoc =
3888  getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3889  Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3890  return false;
3891  }
3892  }
3893 }
3894 
3895 bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
3896  const MCExpr *Expr = nullptr;
3897  SMLoc L = getLoc();
3898  if (check(getParser().parseExpression(Expr), L, "expected expression"))
3899  return true;
3900  const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
3901  if (check(!Value, L, "expected constant expression"))
3902  return true;
3903  Out = Value->getValue();
3904  return false;
3905 }
3906 
3907 bool AArch64AsmParser::parseComma() {
3908  if (check(getParser().getTok().isNot(AsmToken::Comma), getLoc(),
3909  "expected comma"))
3910  return true;
3911  // Eat the comma
3912  getParser().Lex();
3913  return false;
3914 }
3915 
3916 bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
3917  unsigned First, unsigned Last) {
3918  unsigned Reg;
3919  SMLoc Start, End;
3920  if (check(ParseRegister(Reg, Start, End), getLoc(), "expected register"))
3921  return true;
3922 
3923  // Special handling for FP and LR; they aren't linearly after x28 in
3924  // the registers enum.
3925  unsigned RangeEnd = Last;
3926  if (Base == AArch64::X0) {
3927  if (Last == AArch64::FP) {
3928  RangeEnd = AArch64::X28;
3929  if (Reg == AArch64::FP) {
3930  Out = 29;
3931  return false;
3932  }
3933  }
3934  if (Last == AArch64::LR) {
3935  RangeEnd = AArch64::X28;
3936  if (Reg == AArch64::FP) {
3937  Out = 29;
3938  return false;
3939  } else if (Reg == AArch64::LR) {
3940  Out = 30;
3941  return false;
3942  }
3943  }
3944  }
3945 
3946  if (check(Reg < First || Reg > RangeEnd, Start,
3947  Twine("expected register in range ") +
3948  AArch64InstPrinter::getRegisterName(First) + " to " +
3950  return true;
3951  Out = Reg - Base;
3952  return false;
3953 }
3954 
3955 bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3956  const MCParsedAsmOperand &Op2) const {
3957  auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3958  auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3959  if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3960  AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3961  return MCTargetAsmParser::regsEqual(Op1, Op2);
3962 
3963  assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
3964  "Testing equality of non-scalar registers not supported");
3965 
3966  // Check if a registers match their sub/super register classes.
3967  if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3968  return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3969  if (AOp1.getRegEqualityTy() == EqualsSubReg)
3970  return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3971  if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3972  return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3973  if (AOp2.getRegEqualityTy() == EqualsSubReg)
3974  return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3975 
3976  return false;
3977 }
3978 
3979 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3980 /// operands.
3981 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3982  StringRef Name, SMLoc NameLoc,
3984  MCAsmParser &Parser = getParser();
3985  Name = StringSwitch<StringRef>(Name.lower())
3986  .Case("beq", "b.eq")
3987  .Case("bne", "b.ne")
3988  .Case("bhs", "b.hs")
3989  .Case("bcs", "b.cs")
3990  .Case("blo", "b.lo")
3991  .Case("bcc", "b.cc")
3992  .Case("bmi", "b.mi")
3993  .Case("bpl", "b.pl")
3994  .Case("bvs", "b.vs")
3995  .Case("bvc", "b.vc")
3996  .Case("bhi", "b.hi")
3997  .Case("bls", "b.ls")
3998  .Case("bge", "b.ge")
3999  .Case("blt", "b.lt")
4000  .Case("bgt", "b.gt")
4001  .Case("ble", "b.le")
4002  .Case("bal", "b.al")
4003  .Case("bnv", "b.nv")
4004  .Default(Name);
4005 
4006  // First check for the AArch64-specific .req directive.
4007  if (Parser.getTok().is(AsmToken::Identifier) &&
4008  Parser.getTok().getIdentifier().lower() == ".req") {
4009  parseDirectiveReq(Name, NameLoc);
4010  // We always return 'error' for this, as we're done with this
4011  // statement and don't need to match the 'instruction."
4012  return true;
4013  }
4014 
4015  // Create the leading tokens for the mnemonic, split by '.' characters.
4016  size_t Start = 0, Next = Name.find('.');
4017  StringRef Head = Name.slice(Start, Next);
4018 
4019  // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
4020  // the SYS instruction.
4021  if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
4022  Head == "cfp" || Head == "dvp" || Head == "cpp")
4023  return parseSysAlias(Head, NameLoc, Operands);
4024 
4025  Operands.push_back(
4026  AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
4027  Mnemonic = Head;
4028 
4029  // Handle condition codes for a branch mnemonic
4030  if (Head == "b" && Next != StringRef::npos) {
4031  Start = Next;
4032  Next = Name.find('.', Start + 1);
4033  Head = Name.slice(Start + 1, Next);
4034 
4035  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4036  (Head.data() - Name.data()));
4037  AArch64CC::CondCode CC = parseCondCodeString(Head);
4038  if (CC == AArch64CC::Invalid)
4039  return Error(SuffixLoc, "invalid condition code");
4040  Operands.push_back(
4041  AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
4042  Operands.push_back(
4043  AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
4044  }
4045 
4046  // Add the remaining tokens in the mnemonic.
4047  while (Next != StringRef::npos) {
4048  Start = Next;
4049  Next = Name.find('.', Start + 1);
4050  Head = Name.slice(Start, Next);
4051  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4052  (Head.data() - Name.data()) + 1);
4053  Operands.push_back(
4054  AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
4055  }
4056 
4057  // Conditional compare instructions have a Condition Code operand, which needs
4058  // to be parsed and an immediate operand created.
4059  bool condCodeFourthOperand =
4060  (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
4061  Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
4062  Head == "csinc" || Head == "csinv" || Head == "csneg");
4063 
4064  // These instructions are aliases to some of the conditional select
4065  // instructions. However, the condition code is inverted in the aliased
4066  // instruction.
4067  //
4068  // FIXME: Is this the correct way to handle these? Or should the parser
4069  // generate the aliased instructions directly?
4070  bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
4071  bool condCodeThirdOperand =
4072  (Head == "cinc" || Head == "cinv" || Head == "cneg");
4073 
4074  // Read the remaining operands.
4075  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4076 
4077  unsigned N = 1;
4078  do {
4079  // Parse and remember the operand.
4080  if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
4081  (N == 3 && condCodeThirdOperand) ||
4082  (N == 2 && condCodeSecondOperand),
4083  condCodeSecondOperand || condCodeThirdOperand)) {
4084  return true;
4085  }
4086 
4087  // After successfully parsing some operands there are two special cases to
4088  // consider (i.e. notional operands not separated by commas). Both are due
4089  // to memory specifiers:
4090  // + An RBrac will end an address for load/store/prefetch
4091  // + An '!' will indicate a pre-indexed operation.
4092  //
4093  // It's someone else's responsibility to make sure these tokens are sane
4094  // in the given context!
4095 
4096  SMLoc RLoc = Parser.getTok().getLoc();
4097  if (parseOptionalToken(AsmToken::RBrac))
4098  Operands.push_back(
4099  AArch64Operand::CreateToken("]", false, RLoc, getContext()));
4100  SMLoc ELoc = Parser.getTok().getLoc();
4101  if (parseOptionalToken(AsmToken::Exclaim))
4102  Operands.push_back(
4103  AArch64Operand::CreateToken("!", false, ELoc, getContext()));
4104 
4105  ++N;
4106  } while (parseOptionalToken(AsmToken::Comma));
4107  }
4108 
4109  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4110  return true;
4111 
4112  return false;
4113 }
4114 
4115 static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
4116  assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
4117  return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
4118  (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
4119  (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
4120  (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
4121  (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
4122  (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
4123 }
4124 
4125 // FIXME: This entire function is a giant hack to provide us with decent
4126 // operand range validation/diagnostics until TableGen/MC can be extended
4127 // to support autogeneration of this kind of validation.
4128 bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
4129  SmallVectorImpl<SMLoc> &Loc) {
4130  const MCRegisterInfo *RI = getContext().getRegisterInfo();
4131  const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
4132 
4133  // A prefix only applies to the instruction following it. Here we extract
4134  // prefix information for the next instruction before validating the current
4135  // one so that in the case of failure we don't erronously continue using the
4136  // current prefix.
4137  PrefixInfo Prefix = NextPrefix;
4138  NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
4139 
4140  // Before validating the instruction in isolation we run through the rules
4141  // applicable when it follows a prefix instruction.
4142  // NOTE: brk & hlt can be prefixed but require no additional validation.
4143  if (Prefix.isActive() &&
4144  (Inst.getOpcode() != AArch64::BRK) &&
4145  (Inst.getOpcode() != AArch64::HLT)) {
4146 
4147  // Prefixed intructions must have a destructive operand.
4150  return Error(IDLoc, "instruction is unpredictable when following a"
4151  " movprfx, suggest replacing movprfx with mov");
4152 
4153  // Destination operands must match.
4154  if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
4155  return Error(Loc[0], "instruction is unpredictable when following a"
4156  " movprfx writing to a different destination");
4157 
4158  // Destination operand must not be used in any other location.
4159  for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
4160  if (Inst.getOperand(i).isReg() &&
4161  (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
4162  isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
4163  return Error(Loc[0], "instruction is unpredictable when following a"
4164  " movprfx and destination also used as non-destructive"
4165  " source");
4166  }
4167 
4168  auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
4169  if (Prefix.isPredicated()) {
4170  int PgIdx = -1;
4171 
4172  // Find the instructions general predicate.
4173  for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
4174  if (Inst.getOperand(i).isReg() &&
4175  PPRRegClass.contains(Inst.getOperand(i).getReg())) {
4176  PgIdx = i;
4177  break;
4178  }
4179 
4180  // Instruction must be predicated if the movprfx is predicated.
4181  if (PgIdx == -1 ||
4183  return Error(IDLoc, "instruction is unpredictable when following a"
4184  " predicated movprfx, suggest using unpredicated movprfx");
4185 
4186  // Instruction must use same general predicate as the movprfx.
4187  if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
4188  return Error(IDLoc, "instruction is unpredictable when following a"
4189  " predicated movprfx using a different general predicate");
4190 
4191  // Instruction element type must match the movprfx.
4192  if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
4193  return Error(IDLoc, "instruction is unpredictable when following a"
4194  " predicated movprfx with a different element size");
4195  }
4196  }
4197 
4198  // Check for indexed addressing modes w/ the base register being the
4199  // same as a destination/source register or pair load where
4200  // the Rt == Rt2. All of those are undefined behaviour.
4201  switch (Inst.getOpcode()) {
4202  case AArch64::LDPSWpre:
4203  case AArch64::LDPWpost:
4204  case AArch64::LDPWpre:
4205  case AArch64::LDPXpost:
4206  case AArch64::LDPXpre: {
4207  unsigned Rt = Inst.getOperand(1).getReg();
4208  unsigned Rt2 = Inst.getOperand(2).getReg();
4209  unsigned Rn = Inst.getOperand(3).getReg();
4210  if (RI->isSubRegisterEq(Rn, Rt))
4211  return Error(Loc[0], "unpredictable LDP instruction, writeback base "
4212  "is also a destination");
4213  if (RI->isSubRegisterEq(Rn, Rt2))
4214  return Error(Loc[1], "unpredictable LDP instruction, writeback base "
4215  "is also a destination");
4217  }
4218  case AArch64::LDPDi:
4219  case AArch64::LDPQi:
4220  case AArch64::LDPSi:
4221  case AArch64::LDPSWi:
4222  case AArch64::LDPWi:
4223  case AArch64::LDPXi: {
4224  unsigned Rt = Inst.getOperand(0).getReg();
4225  unsigned Rt2 = Inst.getOperand(1).getReg();
4226  if (Rt == Rt2)
4227  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4228  break;
4229  }
4230  case AArch64::LDPDpost:
4231  case AArch64::LDPDpre:
4232  case AArch64::LDPQpost:
4233  case AArch64::LDPQpre:
4234  case AArch64::LDPSpost:
4235  case AArch64::LDPSpre:
4236  case AArch64::LDPSWpost: {
4237  unsigned Rt = Inst.getOperand(1).getReg();
4238  unsigned Rt2 = Inst.getOperand(2).getReg();
4239  if (Rt == Rt2)
4240  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4241  break;
4242  }
4243  case AArch64::STPDpost:
4244  case AArch64::STPDpre:
4245  case AArch64::STPQpost:
4246  case AArch64::STPQpre:
4247  case AArch64::STPSpost:
4248  case AArch64::STPSpre:
4249  case AArch64::STPWpost:
4250  case AArch64::STPWpre:
4251  case AArch64::STPXpost:
4252  case AArch64::STPXpre: {
4253  unsigned Rt = Inst.getOperand(1).getReg();
4254  unsigned Rt2 = Inst.getOperand(2).getReg();
4255  unsigned Rn = Inst.getOperand(3).getReg();
4256  if (RI->isSubRegisterEq(Rn, Rt))
4257  return Error(Loc[0], "unpredictable STP instruction, writeback base "
4258  "is also a source");
4259  if (RI->isSubRegisterEq(Rn, Rt2))
4260  return Error(Loc[1], "unpredictable STP instruction, writeback base "
4261  "is also a source");
4262  break;
4263  }
4264  case AArch64::LDRBBpre:
4265  case AArch64::LDRBpre:
4266  case AArch64::LDRHHpre:
4267  case AArch64::LDRHpre:
4268  case AArch64::LDRSBWpre:
4269  case AArch64::LDRSBXpre:
4270  case AArch64::LDRSHWpre:
4271  case AArch64::LDRSHXpre:
4272  case AArch64::LDRSWpre:
4273  case AArch64::LDRWpre:
4274  case AArch64::LDRXpre:
4275  case AArch64::LDRBBpost:
4276  case AArch64::LDRBpost:
4277  case AArch64::LDRHHpost:
4278  case AArch64::LDRHpost:
4279  case AArch64::LDRSBWpost:
4280  case AArch64::LDRSBXpost:
4281  case AArch64::LDRSHWpost:
4282  case AArch64::LDRSHXpost:
4283  case AArch64::LDRSWpost:
4284  case AArch64::LDRWpost:
4285  case AArch64::LDRXpost: {
4286  unsigned Rt = Inst.getOperand(1).getReg();
4287  unsigned Rn = Inst.getOperand(2).getReg();
4288  if (RI->isSubRegisterEq(Rn, Rt))
4289  return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4290  "is also a source");
4291  break;
4292  }
4293  case AArch64::STRBBpost:
4294  case AArch64::STRBpost:
4295  case AArch64::STRHHpost:
4296  case AArch64::STRHpost:
4297  case AArch64::STRWpost:
4298  case AArch64::STRXpost:
4299  case AArch64::STRBBpre:
4300  case AArch64::STRBpre:
4301  case AArch64::STRHHpre:
4302  case AArch64::STRHpre:
4303  case AArch64::STRWpre:
4304  case AArch64::STRXpre: {
4305  unsigned Rt = Inst.getOperand(1).getReg();
4306  unsigned Rn = Inst.getOperand(2).getReg();
4307  if (RI->isSubRegisterEq(Rn, Rt))
4308  return Error(Loc[0], "unpredictable STR instruction, writeback base "
4309  "is also a source");
4310  break;
4311  }
4312  case AArch64::STXRB:
4313  case AArch64::STXRH:
4314  case AArch64::STXRW:
4315  case AArch64::STXRX:
4316  case AArch64::STLXRB:
4317  case AArch64::STLXRH:
4318  case AArch64::STLXRW:
4319  case AArch64::STLXRX: {
4320  unsigned Rs = Inst.getOperand(0).getReg();
4321  unsigned Rt = Inst.getOperand(1).getReg();
4322  unsigned Rn = Inst.getOperand(2).getReg();
4323  if (RI->isSubRegisterEq(Rt, Rs) ||
4324  (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4325  return Error(Loc[0],
4326  "unpredictable STXR instruction, status is also a source");
4327  break;
4328  }
4329  case AArch64::STXPW:
4330  case AArch64::STXPX:
4331  case AArch64::STLXPW:
4332  case AArch64::STLXPX: {
4333  unsigned Rs = Inst.getOperand(0).getReg();
4334  unsigned Rt1 = Inst.getOperand(1).getReg();
4335  unsigned Rt2 = Inst.getOperand(2).getReg();
4336  unsigned Rn = Inst.getOperand(3).getReg();
4337  if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4338  (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4339  return Error(Loc[0],
4340  "unpredictable STXP instruction, status is also a source");
4341  break;
4342  }
4343  case AArch64::LDRABwriteback:
4344  case AArch64::LDRAAwriteback: {
4345  unsigned Xt = Inst.getOperand(0).getReg();
4346  unsigned Xn = Inst.getOperand(1).getReg();
4347  if (Xt == Xn)
4348  return Error(Loc[0],
4349  "unpredictable LDRA instruction, writeback base"
4350  " is also a destination");
4351  break;
4352  }
4353  }
4354 
4355 
4356  // Now check immediate ranges. Separate from the above as there is overlap
4357  // in the instructions being checked and this keeps the nested conditionals
4358  // to a minimum.
4359  switch (Inst.getOpcode()) {
4360  case AArch64::ADDSWri:
4361  case AArch64::ADDSXri:
4362  case AArch64::ADDWri:
4363  case AArch64::ADDXri:
4364  case AArch64::SUBSWri:
4365  case AArch64::SUBSXri:
4366  case AArch64::SUBWri:
4367  case AArch64::SUBXri: {
4368  // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4369  // some slight duplication here.
4370  if (Inst.getOperand(2).isExpr()) {
4371  const MCExpr *Expr = Inst.getOperand(2).getExpr();
4372  AArch64MCExpr::VariantKind ELFRefKind;
4373  MCSymbolRefExpr::VariantKind DarwinRefKind;
4374  int64_t Addend;
4375  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4376 
4377  // Only allow these with ADDXri.
4378  if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4379  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4380  Inst.getOpcode() == AArch64::ADDXri)
4381  return false;
4382 
4383  // Only allow these with ADDXri/ADDWri
4384  if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4385  ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4386  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4387  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4388  ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4389  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4390  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4391  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4392  ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4393  ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4394  (Inst.getOpcode() == AArch64::ADDXri ||
4395  Inst.getOpcode() == AArch64::ADDWri))
4396  return false;
4397 
4398  // Don't allow symbol refs in the immediate field otherwise
4399  // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4400  // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4401  // 'cmp w0, 'borked')
4402  return Error(Loc.back(), "invalid immediate expression");
4403  }
4404  // We don't validate more complex expressions here
4405  }
4406  return false;
4407  }
4408  default:
4409  return false;
4410  }
4411 }
4412 
4413 static std::string AArch64MnemonicSpellCheck(StringRef S,
4414  const FeatureBitset &FBS,
4415  unsigned VariantID = 0);
4416 
4417 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4418  uint64_t ErrorInfo,
4420  switch (ErrCode) {
4421  case Match_InvalidTiedOperand: {
4422  RegConstraintEqualityTy EqTy =
4423  static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4424  .getRegEqualityTy();
4425  switch (EqTy) {
4426  case RegConstraintEqualityTy::EqualsSubReg:
4427  return Error(Loc, "operand must be 64-bit form of destination register");
4428  case RegConstraintEqualityTy::EqualsSuperReg:
4429  return Error(Loc, "operand must be 32-bit form of destination register");
4430  case RegConstraintEqualityTy::EqualsReg:
4431  return Error(Loc, "operand must match destination register");
4432  }
4433  llvm_unreachable("Unknown RegConstraintEqualityTy");
4434  }
4435  case Match_MissingFeature:
4436  return Error(Loc,
4437  "instruction requires a CPU feature not currently enabled");
4438  case Match_InvalidOperand:
4439  return Error(Loc, "invalid operand for instruction");
4440  case Match_InvalidSuffix:
4441  return Error(Loc, "invalid type suffix for instruction");
4442  case Match_InvalidCondCode:
4443  return Error(Loc, "expected AArch64 condition code");
4444  case Match_AddSubRegExtendSmall:
4445  return Error(Loc,
4446  "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
4447  case Match_AddSubRegExtendLarge:
4448  return Error(Loc,
4449  "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4450  case Match_AddSubSecondSource:
4451  return Error(Loc,
4452  "expected compatible register, symbol or integer in range [0, 4095]");
4453  case Match_LogicalSecondSource:
4454  return Error(Loc, "expected compatible register or logical immediate");
4455  case Match_InvalidMovImm32Shift:
4456  return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4457  case Match_InvalidMovImm64Shift:
4458  return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4459  case Match_AddSubRegShift32:
4460  return Error(Loc,
4461  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4462  case Match_AddSubRegShift64:
4463  return Error(Loc,
4464  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4465  case Match_InvalidFPImm:
4466  return Error(Loc,
4467  "expected compatible register or floating-point constant");
4468  case Match_InvalidMemoryIndexedSImm6:
4469  return Error(Loc, "index must be an integer in range [-32, 31].");
4470  case Match_InvalidMemoryIndexedSImm5:
4471  return Error(Loc, "index must be an integer in range [-16, 15].");
4472  case Match_InvalidMemoryIndexed1SImm4:
4473  return Error(Loc, "index must be an integer in range [-8, 7].");
4474  case Match_InvalidMemoryIndexed2SImm4:
4475  return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4476  case Match_InvalidMemoryIndexed3SImm4:
4477  return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4478  case Match_InvalidMemoryIndexed4SImm4:
4479  return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4480  case Match_InvalidMemoryIndexed16SImm4:
4481  return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4482  case Match_InvalidMemoryIndexed32SImm4:
4483  return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
4484  case Match_InvalidMemoryIndexed1SImm6:
4485  return Error(Loc, "index must be an integer in range [-32, 31].");
4486  case Match_InvalidMemoryIndexedSImm8:
4487  return Error(Loc, "index must be an integer in range [-128, 127].");
4488  case Match_InvalidMemoryIndexedSImm9:
4489  return Error(Loc, "index must be an integer in range [-256, 255].");
4490  case Match_InvalidMemoryIndexed16SImm9:
4491  return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4492  case Match_InvalidMemoryIndexed8SImm10:
4493  return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4494  case Match_InvalidMemoryIndexed4SImm7:
4495  return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4496  case Match_InvalidMemoryIndexed8SImm7:
4497  return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4498  case Match_InvalidMemoryIndexed16SImm7:
4499  return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4500  case Match_InvalidMemoryIndexed8UImm5:
4501  return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4502  case Match_InvalidMemoryIndexed4UImm5:
4503  return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4504  case Match_InvalidMemoryIndexed2UImm5:
4505  return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4506  case Match_InvalidMemoryIndexed8UImm6:
4507  return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4508  case Match_InvalidMemoryIndexed16UImm6:
4509  return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
4510  case Match_InvalidMemoryIndexed4UImm6:
4511  return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4512  case Match_InvalidMemoryIndexed2UImm6:
4513  return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4514  case Match_InvalidMemoryIndexed1UImm6:
4515  return Error(Loc, "index must be in range [0, 63].");
4516  case Match_InvalidMemoryWExtend8:
4517  return Error(Loc,
4518  "expected 'uxtw' or 'sxtw' with optional shift of #0");
4519  case Match_InvalidMemoryWExtend16:
4520  return Error(Loc,
4521  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4522  case Match_InvalidMemoryWExtend32:
4523  return Error(Loc,
4524  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4525  case Match_InvalidMemoryWExtend64:
4526  return Error(Loc,
4527  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4528  case Match_InvalidMemoryWExtend128:
4529  return Error(Loc,
4530  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4531  case Match_InvalidMemoryXExtend8:
4532  return Error(Loc,
4533  "expected 'lsl' or 'sxtx' with optional shift of #0");
4534  case Match_InvalidMemoryXExtend16:
4535  return Error(Loc,
4536  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4537  case Match_InvalidMemoryXExtend32:
4538  return Error(Loc,
4539  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4540  case Match_InvalidMemoryXExtend64:
4541  return Error(Loc,
4542  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4543  case Match_InvalidMemoryXExtend128:
4544  return Error(Loc,
4545  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4546  case Match_InvalidMemoryIndexed1:
4547  return Error(Loc, "index must be an integer in range [0, 4095].");
4548  case Match_InvalidMemoryIndexed2:
4549  return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4550  case Match_InvalidMemoryIndexed4:
4551  return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4552  case Match_InvalidMemoryIndexed8:
4553  return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4554  case Match_InvalidMemoryIndexed16:
4555  return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4556  case Match_InvalidImm0_1:
4557  return Error(Loc, "immediate must be an integer in range [0, 1].");
4558  case Match_InvalidImm0_7:
4559  return Error(Loc, "immediate must be an integer in range [0, 7].");
4560  case Match_InvalidImm0_15:
4561  return Error(Loc, "immediate must be an integer in range [0, 15].");
4562  case Match_InvalidImm0_31:
4563  return Error(Loc, "immediate must be an integer in range [0, 31].");
4564  case Match_InvalidImm0_63:
4565  return Error(Loc, "immediate must be an integer in range [0, 63].");
4566  case Match_InvalidImm0_127:
4567  return Error(Loc, "immediate must be an integer in range [0, 127].");
4568  case Match_InvalidImm0_255:
4569  return Error(Loc, "immediate must be an integer in range [0, 255].");
4570  case Match_InvalidImm0_65535:
4571  return Error(Loc, "immediate must be an integer in range [0, 65535].");
4572  case Match_InvalidImm1_8:
4573  return Error(Loc, "immediate must be an integer in range [1, 8].");
4574  case Match_InvalidImm1_16:
4575  return Error(Loc, "immediate must be an integer in range [1, 16].");
4576  case Match_InvalidImm1_32:
4577  return Error(Loc, "immediate must be an integer in range [1, 32].");
4578  case Match_InvalidImm1_64:
4579  return Error(Loc, "immediate must be an integer in range [1, 64].");
4580  case Match_InvalidSVEAddSubImm8:
4581  return Error(Loc, "immediate must be an integer in range [0, 255]"
4582  " with a shift amount of 0");
4583  case Match_InvalidSVEAddSubImm16:
4584  case Match_InvalidSVEAddSubImm32:
4585  case Match_InvalidSVEAddSubImm64:
4586  return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4587  "multiple of 256 in range [256, 65280]");
4588  case Match_InvalidSVECpyImm8:
4589  return Error(Loc, "immediate must be an integer in range [-128, 255]"
4590  " with a shift amount of 0");
4591  case Match_InvalidSVECpyImm16:
4592  return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4593  "multiple of 256 in range [-32768, 65280]");
4594  case Match_InvalidSVECpyImm32:
4595  case Match_InvalidSVECpyImm64:
4596  return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4597  "multiple of 256 in range [-32768, 32512]");
4598  case Match_InvalidIndexRange1_1:
4599  return Error(Loc, "expected lane specifier '[1]'");
4600  case Match_InvalidIndexRange0_15:
4601  return Error(Loc, "vector lane must be an integer in range [0, 15].");
4602  case Match_InvalidIndexRange0_7:
4603  return Error(Loc, "vector lane must be an integer in range [0, 7].");
4604  case Match_InvalidIndexRange0_3:
4605  return Error(Loc, "vector lane must be an integer in range [0, 3].");
4606  case Match_InvalidIndexRange0_1:
4607  return Error(Loc, "vector lane must be an integer in range [0, 1].");
4608  case Match_InvalidSVEIndexRange0_63:
4609  return Error(Loc, "vector lane must be an integer in range [0, 63].");
4610  case Match_InvalidSVEIndexRange0_31:
4611  return Error(Loc, "vector lane must be an integer in range [0, 31].");
4612  case Match_InvalidSVEIndexRange0_15:
4613  return Error(Loc, "vector lane must be an integer in range [0, 15].");
4614  case Match_InvalidSVEIndexRange0_7:
4615  return Error(Loc, "vector lane must be an integer in range [0, 7].");
4616  case Match_InvalidSVEIndexRange0_3:
4617  return Error(Loc, "vector lane must be an integer in range [0, 3].");
4618  case Match_InvalidLabel:
4619  return Error(Loc, "expected label or encodable integer pc offset");
4620  case Match_MRS:
4621  return Error(Loc, "expected readable system register");
4622  case Match_MSR:
4623  return Error(Loc, "expected writable system register or pstate");
4624  case Match_InvalidComplexRotationEven:
4625  return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4626  case Match_InvalidComplexRotationOdd:
4627  return Error(Loc, "complex rotation must be 90 or 270.");
4628  case Match_MnemonicFail: {
4629  std::string Suggestion = AArch64MnemonicSpellCheck(
4630  ((AArch64Operand &)*Operands[0]).getToken(),
4631  ComputeAvailableFeatures(STI->getFeatureBits()));
4632  return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4633  }
4634  case Match_InvalidGPR64shifted8:
4635  return Error(Loc, "register must be x0..x30 or xzr, without shift");
4636  case Match_InvalidGPR64shifted16:
4637  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4638  case Match_InvalidGPR64shifted32:
4639  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4640  case Match_InvalidGPR64shifted64:
4641  return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4642  case Match_InvalidGPR64NoXZRshifted8:
4643  return Error(Loc, "register must be x0..x30 without shift");
4644  case Match_InvalidGPR64NoXZRshifted16:
4645  return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4646  case Match_InvalidGPR64NoXZRshifted32:
4647  return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4648  case Match_InvalidGPR64NoXZRshifted64:
4649  return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4650  case Match_InvalidZPR32UXTW8:
4651  case Match_InvalidZPR32SXTW8:
4652  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4653  case Match_InvalidZPR32UXTW16:
4654  case Match_InvalidZPR32SXTW16:
4655  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4656  case Match_InvalidZPR32UXTW32:
4657  case Match_InvalidZPR32SXTW32:
4658  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4659  case Match_InvalidZPR32UXTW64:
4660  case Match_InvalidZPR32SXTW64:
4661  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4662  case Match_InvalidZPR64UXTW8:
4663  case Match_InvalidZPR64SXTW8:
4664  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4665  case Match_InvalidZPR64UXTW16:
4666  case Match_InvalidZPR64SXTW16:
4667  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4668  case Match_InvalidZPR64UXTW32:
4669  case Match_InvalidZPR64SXTW32:
4670  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4671  case Match_InvalidZPR64UXTW64:
4672  case Match_InvalidZPR64SXTW64:
4673  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4674  case Match_InvalidZPR32LSL8:
4675  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4676  case Match_InvalidZPR32LSL16:
4677  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4678  case Match_InvalidZPR32LSL32:
4679  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4680  case Match_InvalidZPR32LSL64:
4681  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4682  case Match_InvalidZPR64LSL8:
4683  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4684  case Match_InvalidZPR64LSL16:
4685  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4686  case Match_InvalidZPR64LSL32:
4687  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4688  case Match_InvalidZPR64LSL64:
4689  return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4690  case Match_InvalidZPR0:
4691  return Error(Loc, "expected register without element width suffix");
4692  case Match_InvalidZPR8:
4693  case Match_InvalidZPR16:
4694  case Match_InvalidZPR32:
4695  case Match_InvalidZPR64:
4696  case Match_InvalidZPR128:
4697  return Error(Loc, "invalid element width");
4698  case Match_InvalidZPR_3b8:
4699  return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4700  case Match_InvalidZPR_3b16:
4701  return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4702  case Match_InvalidZPR_3b32:
4703  return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4704  case Match_InvalidZPR_4b16:
4705  return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4706  case Match_InvalidZPR_4b32:
4707  return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4708  case Match_InvalidZPR_4b64:
4709  return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4710  case Match_InvalidSVEPattern:
4711  return Error(Loc, "invalid predicate pattern");
4712  case Match_InvalidSVEPredicateAnyReg:
4713  case Match_InvalidSVEPredicateBReg:
4714  case Match_InvalidSVEPredicateHReg:
4715  case Match_InvalidSVEPredicateSReg:
4716  case Match_InvalidSVEPredicateDReg:
4717  return Error(Loc, "invalid predicate register.");
4718  case Match_InvalidSVEPredicate3bAnyReg:
4719  return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
4720  case Match_InvalidSVEPredicate3bBReg:
4721  return Error(Loc, "invalid restricted predicate register, expected p0.b..p7.b");
4722  case Match_InvalidSVEPredicate3bHReg:
4723  return Error(Loc, "invalid restricted predicate register, expected p0.h..p7.h");
4724  case Match_InvalidSVEPredicate3bSReg:
4725  return Error(Loc, "invalid restricted predicate register, expected p0.s..p7.s");
4726  case Match_InvalidSVEPredicate3bDReg:
4727  return Error(Loc, "invalid restricted predicate register, expected p0.d..p7.d");
4728  case Match_InvalidSVEExactFPImmOperandHalfOne:
4729  return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4730  case Match_InvalidSVEExactFPImmOperandHalfTwo:
4731  return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4732  case Match_InvalidSVEExactFPImmOperandZeroOne:
4733  return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4734  default:
4735  llvm_unreachable("unexpected error code!");
4736  }
4737 }
4738 
4739 static const char *getSubtargetFeatureName(uint64_t Val);
4740 
4741 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4743  MCStreamer &Out,
4744  uint64_t &ErrorInfo,
4745  bool MatchingInlineAsm) {
4746  assert(!Operands.empty() && "Unexpect empty operand list!");
4747  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4748  assert(Op.isToken() && "Leading operand should always be a mnemonic!");
4749 
4750  StringRef Tok = Op.getToken();
4751  unsigned NumOperands = Operands.size();
4752 
4753  if (NumOperands == 4 && Tok == "lsl") {
4754  AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4755  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4756  if (Op2.isScalarReg() && Op3.isImm()) {
4757  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4758  if (Op3CE) {
4759  uint64_t Op3Val = Op3CE->getValue();
4760  uint64_t NewOp3Val = 0;
4761  uint64_t NewOp4Val = 0;
4762  if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4763  Op2.getReg())) {
4764  NewOp3Val = (32 - Op3Val) & 0x1f;
4765  NewOp4Val = 31 - Op3Val;
4766  } else {
4767  NewOp3Val = (64 - Op3Val) & 0x3f;
4768  NewOp4Val = 63 - Op3Val;
4769  }
4770 
4771  const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4772  const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4773 
4774  Operands[0] = AArch64Operand::CreateToken(
4775  "ubfm", false, Op.getStartLoc(), getContext());
4776  Operands.push_back(AArch64Operand::CreateImm(
4777  NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4778  Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4779  Op3.getEndLoc(), getContext());
4780  }
4781  }
4782  } else if (NumOperands == 4 && Tok == "bfc") {
4783  // FIXME: Horrible hack to handle BFC->BFM alias.
4784  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4785  AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4786  AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4787 
4788  if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4789  const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4790  const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4791 
4792  if (LSBCE && WidthCE) {
4793  uint64_t LSB = LSBCE->getValue();
4794  uint64_t Width = WidthCE->getValue();
4795 
4796  uint64_t RegWidth = 0;
4797  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4798  Op1.getReg()))
4799  RegWidth = 64;
4800  else
4801  RegWidth = 32;
4802 
4803  if (LSB >= RegWidth)
4804  return Error(LSBOp.getStartLoc(),
4805  "expected integer in range [0, 31]");
4806  if (Width < 1 || Width > RegWidth)
4807  return Error(WidthOp.getStartLoc(),
4808  "expected integer in range [1, 32]");
4809 
4810  uint64_t ImmR = 0;
4811  if (RegWidth == 32)
4812  ImmR = (32 - LSB) & 0x1f;
4813  else
4814  ImmR = (64 - LSB) & 0x3f;
4815 
4816  uint64_t ImmS = Width - 1;
4817 
4818  if (ImmR != 0 && ImmS >= ImmR)
4819  return Error(WidthOp.getStartLoc(),
4820  "requested insert overflows register");
4821 
4822  const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4823  const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4824  Operands[0] = AArch64Operand::CreateToken(
4825  "bfm", false, Op.getStartLoc(), getContext());
4826  Operands[2] = AArch64Operand::CreateReg(
4827  RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4828  SMLoc(), SMLoc(), getContext());
4829  Operands[3] = AArch64Operand::CreateImm(
4830  ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4831  Operands.emplace_back(
4832  AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4833  WidthOp.getEndLoc(), getContext()));
4834  }
4835  }
4836  } else if (NumOperands == 5) {
4837  // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4838  // UBFIZ -> UBFM aliases.
4839  if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4840  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4841  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4842  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4843 
4844  if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4845  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4846  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4847 
4848  if (Op3CE && Op4CE) {
4849  uint64_t Op3Val = Op3CE->getValue();
4850  uint64_t Op4Val = Op4CE->getValue();
4851 
4852  uint64_t RegWidth = 0;
4853  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4854  Op1.getReg()))
4855  RegWidth = 64;
4856  else
4857  RegWidth = 32;
4858 
4859  if (Op3Val >= RegWidth)
4860  return Error(Op3.getStartLoc(),
4861  "expected integer in range [0, 31]");
4862  if (Op4Val < 1 || Op4Val > RegWidth)
4863  return Error(Op4.getStartLoc(),
4864  "expected integer in range [1, 32]");
4865 
4866  uint64_t NewOp3Val = 0;
4867  if (RegWidth == 32)
4868  NewOp3Val = (32 - Op3Val) & 0x1f;
4869  else
4870  NewOp3Val = (64 - Op3Val) & 0x3f;
4871 
4872  uint64_t NewOp4Val = Op4Val - 1;
4873 
4874  if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4875  return Error(Op4.getStartLoc(),
4876  "requested insert overflows register");
4877 
4878  const MCExpr *NewOp3 =
4879  MCConstantExpr::create(NewOp3Val, getContext());
4880  const MCExpr *NewOp4 =
4881  MCConstantExpr::create(NewOp4Val, getContext());
4882  Operands[3] = AArch64Operand::CreateImm(
4883  NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4884  Operands[4] = AArch64Operand::CreateImm(
4885  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4886  if (Tok == "bfi")
4887  Operands[0] = AArch64Operand::CreateToken(
4888  "bfm", false, Op.getStartLoc(), getContext());
4889  else if (Tok == "sbfiz")
4890  Operands[0] = AArch64Operand::CreateToken(
4891  "sbfm", false, Op.getStartLoc(), getContext());
4892  else if (Tok == "ubfiz")
4893  Operands[0] = AArch64Operand::CreateToken(
4894  "ubfm", false, Op.getStartLoc(), getContext());
4895  else
4896  llvm_unreachable("No valid mnemonic for alias?");
4897  }
4898  }
4899 
4900  // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4901  // UBFX -> UBFM aliases.
4902  } else if (NumOperands == 5 &&
4903  (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4904  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4905  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4906  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4907 
4908  if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4909  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4910  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4911 
4912  if (Op3CE && Op4CE) {
4913  uint64_t Op3Val = Op3CE->getValue();
4914  uint64_t Op4Val = Op4CE->getValue();
4915 
4916  uint64_t RegWidth = 0;
4917  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4918  Op1.getReg()))
4919  RegWidth = 64;
4920  else
4921  RegWidth = 32;
4922 
4923  if (Op3Val >= RegWidth)
4924  return Error(Op3.getStartLoc(),
4925  "expected integer in range [0, 31]");
4926  if (Op4Val < 1 || Op4Val > RegWidth)
4927  return Error(Op4.getStartLoc(),
4928  "expected integer in range [1, 32]");
4929 
4930  uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4931 
4932  if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4933  return Error(Op4.getStartLoc(),
4934  "requested extract overflows register");
4935 
4936  const MCExpr *NewOp4 =
4937  MCConstantExpr::create(NewOp4Val, getContext());
4938  Operands[4] = AArch64Operand::CreateImm(
4939  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4940  if (Tok == "bfxil")
4941  Operands[0] = AArch64Operand::CreateToken(
4942  "bfm", false, Op.getStartLoc(), getContext());
4943  else if (Tok == "sbfx")
4944  Operands[0] = AArch64Operand::CreateToken(
4945  "sbfm", false, Op.getStartLoc(), getContext());
4946  else if (Tok == "ubfx")
4947  Operands[0] = AArch64Operand::CreateToken(
4948  "ubfm", false, Op.getStartLoc(), getContext());
4949  else
4950  llvm_unreachable("No valid mnemonic for alias?");
4951  }
4952  }
4953  }
4954  }
4955 
4956  // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4957  // instruction for FP registers correctly in some rare circumstances. Convert
4958  // it to a safe instruction and warn (because silently changing someone's
4959  // assembly is rude).
4960  if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4961  NumOperands == 4 && Tok == "movi") {
4962  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4963  AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4964  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4965  if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4966  (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4967  StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4968  if (Suffix.lower() == ".2d" &&
4969  cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4970  Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4971  " correctly on this CPU, converting to equivalent movi.16b");
4972  // Switch the suffix to .16b.
4973  unsigned Idx = Op1.isToken() ? 1 : 2;
4974  Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4975  getContext());
4976  }
4977  }
4978  }
4979 
4980  // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4981  // InstAlias can't quite handle this since the reg classes aren't
4982  // subclasses.
4983  if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4984  // The source register can be Wn here, but the matcher expects a
4985  // GPR64. Twiddle it here if necessary.
4986  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4987  if (Op.isScalarReg()) {
4988  unsigned Reg = getXRegFromWReg(Op.getReg());
4989  Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4990  Op.getStartLoc(), Op.getEndLoc(),
4991  getContext());
4992  }
4993  }
4994  // FIXME: Likewise for sxt[bh] with a Xd dst operand
4995  else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4996  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4997  if (Op.isScalarReg() &&
4998  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4999  Op.getReg())) {
5000  // The source register can be Wn here, but the matcher expects a
5001  // GPR64. Twiddle it here if necessary.
5002  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
5003  if (Op.isScalarReg()) {
5004  unsigned Reg = getXRegFromWReg(Op.getReg());
5005  Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5006  Op.getStartLoc(),
5007  Op.getEndLoc(), getContext());
5008  }
5009  }
5010  }
5011  // FIXME: Likewise for uxt[bh] with a Xd dst operand
5012  else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
5013  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5014  if (Op.isScalarReg() &&
5015  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5016  Op.getReg())) {
5017  // The source register can be Wn here, but the matcher expects a
5018  // GPR32. Twiddle it here if necessary.
5019  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5020  if (Op.isScalarReg()) {
5021  unsigned Reg = getWRegFromXReg(Op.getReg());
5022  Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5023  Op.getStartLoc(),
5024  Op.getEndLoc(), getContext());
5025  }
5026  }
5027  }
5028 
5029  MCInst Inst;
5030  FeatureBitset MissingFeatures;
5031  // First try to match against the secondary set of tables containing the
5032  // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
5033  unsigned MatchResult =
5034  MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5035  MatchingInlineAsm, 1);
5036 
5037  // If that fails, try against the alternate table containing long-form NEON:
5038  // "fadd v0.2s, v1.2s, v2.2s"
5039  if (MatchResult != Match_Success) {
5040  // But first, save the short-form match result: we can use it in case the
5041  // long-form match also fails.
5042  auto ShortFormNEONErrorInfo = ErrorInfo;
5043  auto ShortFormNEONMatchResult = MatchResult;
5044  auto ShortFormNEONMissingFeatures = MissingFeatures;
5045 
5046  MatchResult =
5047  MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5048  MatchingInlineAsm, 0);
5049 
5050  // Now, both matches failed, and the long-form match failed on the mnemonic
5051  // suffix token operand. The short-form match failure is probably more
5052  // relevant: use it instead.
5053  if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
5054  Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
5055  ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
5056  MatchResult = ShortFormNEONMatchResult;
5057  ErrorInfo = ShortFormNEONErrorInfo;
5058  MissingFeatures = ShortFormNEONMissingFeatures;
5059  }
5060  }
5061 
5062  switch (MatchResult) {
5063  case Match_Success: {
5064  // Perform range checking and other semantic validations
5065  SmallVector<SMLoc, 8> OperandLocs;
5066  NumOperands = Operands.size();
5067  for (unsigned i = 1; i < NumOperands; ++i)
5068  OperandLocs.push_back(Operands[i]->getStartLoc());
5069  if (validateInstruction(Inst, IDLoc, OperandLocs))
5070  return true;
5071 
5072  Inst.setLoc(IDLoc);
5073  Out.emitInstruction(Inst, getSTI());
5074  return false;
5075  }
5076  case Match_MissingFeature: {
5077  assert(MissingFeatures.any() && "Unknown missing feature!");
5078  // Special case the error message for the very common case where only
5079  // a single subtarget feature is missing (neon, e.g.).
5080  std::string Msg = "instruction requires:";
5081  for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
5082  if (MissingFeatures[i]) {
5083  Msg += " ";
5084  Msg += getSubtargetFeatureName(i);
5085  }
5086  }
5087  return Error(IDLoc, Msg);
5088  }
5089  case Match_MnemonicFail:
5090  return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
5091  case Match_InvalidOperand: {
5092  SMLoc ErrorLoc = IDLoc;
5093 
5094  if (