LLVM 19.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
39#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSymbol.h"
43#include "llvm/MC/MCValue.h"
49#include "llvm/Support/SMLoc.h"
53#include <cassert>
54#include <cctype>
55#include <cstdint>
56#include <cstdio>
57#include <optional>
58#include <string>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65namespace {
66
67enum class RegKind {
68 Scalar,
69 NeonVector,
70 SVEDataVector,
71 SVEPredicateAsCounter,
72 SVEPredicateVector,
73 Matrix,
74 LookupTable
75};
76
77enum class MatrixKind { Array, Tile, Row, Col };
78
79enum RegConstraintEqualityTy {
80 EqualsReg,
81 EqualsSuperReg,
82 EqualsSubReg
83};
84
85class AArch64AsmParser : public MCTargetAsmParser {
86private:
87 StringRef Mnemonic; ///< Instruction mnemonic.
88
89 // Map of register aliases registers via the .req directive.
91
92 class PrefixInfo {
93 public:
94 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
95 PrefixInfo Prefix;
96 switch (Inst.getOpcode()) {
97 case AArch64::MOVPRFX_ZZ:
98 Prefix.Active = true;
99 Prefix.Dst = Inst.getOperand(0).getReg();
100 break;
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
105 Prefix.Active = true;
106 Prefix.Predicated = true;
107 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
108 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
109 "No destructive element size set for movprfx");
110 Prefix.Dst = Inst.getOperand(0).getReg();
111 Prefix.Pg = Inst.getOperand(2).getReg();
112 break;
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
117 Prefix.Active = true;
118 Prefix.Predicated = true;
119 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
120 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
121 "No destructive element size set for movprfx");
122 Prefix.Dst = Inst.getOperand(0).getReg();
123 Prefix.Pg = Inst.getOperand(1).getReg();
124 break;
125 default:
126 break;
127 }
128
129 return Prefix;
130 }
131
132 PrefixInfo() = default;
133 bool isActive() const { return Active; }
134 bool isPredicated() const { return Predicated; }
135 unsigned getElementSize() const {
136 assert(Predicated);
137 return ElementSize;
138 }
139 unsigned getDstReg() const { return Dst; }
140 unsigned getPgReg() const {
141 assert(Predicated);
142 return Pg;
143 }
144
145 private:
146 bool Active = false;
147 bool Predicated = false;
148 unsigned ElementSize;
149 unsigned Dst;
150 unsigned Pg;
151 } NextPrefix;
152
153 AArch64TargetStreamer &getTargetStreamer() {
155 return static_cast<AArch64TargetStreamer &>(TS);
156 }
157
158 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
159
160 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
161 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
163 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
164 std::string &Suggestion);
165 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
166 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
168 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
169 bool parseNeonVectorList(OperandVector &Operands);
170 bool parseOptionalMulOperand(OperandVector &Operands);
171 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
172 bool parseKeywordOperand(OperandVector &Operands);
173 bool parseOperand(OperandVector &Operands, bool isCondCode,
174 bool invertCondCode);
175 bool parseImmExpr(int64_t &Out);
176 bool parseComma();
177 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
178 unsigned Last);
179
180 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
182
183 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
184
185 bool parseDirectiveArch(SMLoc L);
186 bool parseDirectiveArchExtension(SMLoc L);
187 bool parseDirectiveCPU(SMLoc L);
188 bool parseDirectiveInst(SMLoc L);
189
190 bool parseDirectiveTLSDescCall(SMLoc L);
191
192 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
193 bool parseDirectiveLtorg(SMLoc L);
194
195 bool parseDirectiveReq(StringRef Name, SMLoc L);
196 bool parseDirectiveUnreq(SMLoc L);
197 bool parseDirectiveCFINegateRAState();
198 bool parseDirectiveCFIBKeyFrame();
199 bool parseDirectiveCFIMTETaggedFrame();
200
201 bool parseDirectiveVariantPCS(SMLoc L);
202
203 bool parseDirectiveSEHAllocStack(SMLoc L);
204 bool parseDirectiveSEHPrologEnd(SMLoc L);
205 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
206 bool parseDirectiveSEHSaveFPLR(SMLoc L);
207 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
208 bool parseDirectiveSEHSaveReg(SMLoc L);
209 bool parseDirectiveSEHSaveRegX(SMLoc L);
210 bool parseDirectiveSEHSaveRegP(SMLoc L);
211 bool parseDirectiveSEHSaveRegPX(SMLoc L);
212 bool parseDirectiveSEHSaveLRPair(SMLoc L);
213 bool parseDirectiveSEHSaveFReg(SMLoc L);
214 bool parseDirectiveSEHSaveFRegX(SMLoc L);
215 bool parseDirectiveSEHSaveFRegP(SMLoc L);
216 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
217 bool parseDirectiveSEHSetFP(SMLoc L);
218 bool parseDirectiveSEHAddFP(SMLoc L);
219 bool parseDirectiveSEHNop(SMLoc L);
220 bool parseDirectiveSEHSaveNext(SMLoc L);
221 bool parseDirectiveSEHEpilogStart(SMLoc L);
222 bool parseDirectiveSEHEpilogEnd(SMLoc L);
223 bool parseDirectiveSEHTrapFrame(SMLoc L);
224 bool parseDirectiveSEHMachineFrame(SMLoc L);
225 bool parseDirectiveSEHContext(SMLoc L);
226 bool parseDirectiveSEHECContext(SMLoc L);
227 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
228 bool parseDirectiveSEHPACSignLR(SMLoc L);
229 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
230
231 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
233 unsigned getNumRegsForRegKind(RegKind K);
234 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
237 bool MatchingInlineAsm) override;
238/// @name Auto-generated Match Functions
239/// {
240
241#define GET_ASSEMBLER_HEADER
242#include "AArch64GenAsmMatcher.inc"
243
244 /// }
245
246 ParseStatus tryParseScalarRegister(MCRegister &Reg);
247 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
248 RegKind MatchKind);
249 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
250 ParseStatus tryParseSVCR(OperandVector &Operands);
251 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
252 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
253 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
254 ParseStatus tryParseSysReg(OperandVector &Operands);
255 ParseStatus tryParseSysCROperand(OperandVector &Operands);
256 template <bool IsSVEPrefetch = false>
257 ParseStatus tryParsePrefetch(OperandVector &Operands);
258 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
259 ParseStatus tryParsePSBHint(OperandVector &Operands);
260 ParseStatus tryParseBTIHint(OperandVector &Operands);
261 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
262 ParseStatus tryParseAdrLabel(OperandVector &Operands);
263 template <bool AddFPZeroAsLiteral>
264 ParseStatus tryParseFPImm(OperandVector &Operands);
265 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
266 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
267 bool tryParseNeonVectorRegister(OperandVector &Operands);
268 ParseStatus tryParseVectorIndex(OperandVector &Operands);
269 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
270 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
271 template <bool ParseShiftExtend,
272 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
273 ParseStatus tryParseGPROperand(OperandVector &Operands);
274 ParseStatus tryParseZTOperand(OperandVector &Operands);
275 template <bool ParseShiftExtend, bool ParseSuffix>
276 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
277 template <RegKind RK>
278 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
280 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
281 template <RegKind VectorKind>
282 ParseStatus tryParseVectorList(OperandVector &Operands,
283 bool ExpectMatch = false);
284 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
285 ParseStatus tryParseSVEPattern(OperandVector &Operands);
286 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
287 ParseStatus tryParseGPR64x8(OperandVector &Operands);
288 ParseStatus tryParseImmRange(OperandVector &Operands);
289
290public:
291 enum AArch64MatchResultTy {
292 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
293#define GET_OPERAND_DIAGNOSTIC_TYPES
294#include "AArch64GenAsmMatcher.inc"
295 };
296 bool IsILP32;
297
298 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
299 const MCInstrInfo &MII, const MCTargetOptions &Options)
300 : MCTargetAsmParser(Options, STI, MII) {
304 if (S.getTargetStreamer() == nullptr)
306
307 // Alias .hword/.word/.[dx]word to the target-independent
308 // .2byte/.4byte/.8byte directives as they have the same form and
309 // semantics:
310 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
311 Parser.addAliasForDirective(".hword", ".2byte");
312 Parser.addAliasForDirective(".word", ".4byte");
313 Parser.addAliasForDirective(".dword", ".8byte");
314 Parser.addAliasForDirective(".xword", ".8byte");
315
316 // Initialize the set of available features.
317 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
318 }
319
320 bool areEqualRegs(const MCParsedAsmOperand &Op1,
321 const MCParsedAsmOperand &Op2) const override;
323 SMLoc NameLoc, OperandVector &Operands) override;
324 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
326 SMLoc &EndLoc) override;
327 bool ParseDirective(AsmToken DirectiveID) override;
329 unsigned Kind) override;
330
331 bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
332
333 static bool classifySymbolRef(const MCExpr *Expr,
334 AArch64MCExpr::VariantKind &ELFRefKind,
335 MCSymbolRefExpr::VariantKind &DarwinRefKind,
336 int64_t &Addend);
337};
338
339/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
340/// instruction.
341class AArch64Operand : public MCParsedAsmOperand {
342private:
343 enum KindTy {
344 k_Immediate,
345 k_ShiftedImm,
346 k_ImmRange,
347 k_CondCode,
348 k_Register,
349 k_MatrixRegister,
350 k_MatrixTileList,
351 k_SVCR,
352 k_VectorList,
353 k_VectorIndex,
354 k_Token,
355 k_SysReg,
356 k_SysCR,
357 k_Prefetch,
358 k_ShiftExtend,
359 k_FPImm,
360 k_Barrier,
361 k_PSBHint,
362 k_BTIHint,
363 } Kind;
364
365 SMLoc StartLoc, EndLoc;
366
367 struct TokOp {
368 const char *Data;
369 unsigned Length;
370 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
371 };
372
373 // Separate shift/extend operand.
374 struct ShiftExtendOp {
376 unsigned Amount;
377 bool HasExplicitAmount;
378 };
379
380 struct RegOp {
381 unsigned RegNum;
382 RegKind Kind;
383 int ElementWidth;
384
385 // The register may be allowed as a different register class,
386 // e.g. for GPR64as32 or GPR32as64.
387 RegConstraintEqualityTy EqualityTy;
388
389 // In some cases the shift/extend needs to be explicitly parsed together
390 // with the register, rather than as a separate operand. This is needed
391 // for addressing modes where the instruction as a whole dictates the
392 // scaling/extend, rather than specific bits in the instruction.
393 // By parsing them as a single operand, we avoid the need to pass an
394 // extra operand in all CodeGen patterns (because all operands need to
395 // have an associated value), and we avoid the need to update TableGen to
396 // accept operands that have no associated bits in the instruction.
397 //
398 // An added benefit of parsing them together is that the assembler
399 // can give a sensible diagnostic if the scaling is not correct.
400 //
401 // The default is 'lsl #0' (HasExplicitAmount = false) if no
402 // ShiftExtend is specified.
403 ShiftExtendOp ShiftExtend;
404 };
405
406 struct MatrixRegOp {
407 unsigned RegNum;
408 unsigned ElementWidth;
409 MatrixKind Kind;
410 };
411
412 struct MatrixTileListOp {
413 unsigned RegMask = 0;
414 };
415
416 struct VectorListOp {
417 unsigned RegNum;
418 unsigned Count;
419 unsigned Stride;
420 unsigned NumElements;
421 unsigned ElementWidth;
422 RegKind RegisterKind;
423 };
424
425 struct VectorIndexOp {
426 int Val;
427 };
428
429 struct ImmOp {
430 const MCExpr *Val;
431 };
432
433 struct ShiftedImmOp {
434 const MCExpr *Val;
435 unsigned ShiftAmount;
436 };
437
438 struct ImmRangeOp {
439 unsigned First;
440 unsigned Last;
441 };
442
443 struct CondCodeOp {
445 };
446
447 struct FPImmOp {
448 uint64_t Val; // APFloat value bitcasted to uint64_t.
449 bool IsExact; // describes whether parsed value was exact.
450 };
451
452 struct BarrierOp {
453 const char *Data;
454 unsigned Length;
455 unsigned Val; // Not the enum since not all values have names.
456 bool HasnXSModifier;
457 };
458
459 struct SysRegOp {
460 const char *Data;
461 unsigned Length;
462 uint32_t MRSReg;
463 uint32_t MSRReg;
464 uint32_t PStateField;
465 };
466
467 struct SysCRImmOp {
468 unsigned Val;
469 };
470
471 struct PrefetchOp {
472 const char *Data;
473 unsigned Length;
474 unsigned Val;
475 };
476
477 struct PSBHintOp {
478 const char *Data;
479 unsigned Length;
480 unsigned Val;
481 };
482
483 struct BTIHintOp {
484 const char *Data;
485 unsigned Length;
486 unsigned Val;
487 };
488
489 struct SVCROp {
490 const char *Data;
491 unsigned Length;
492 unsigned PStateField;
493 };
494
495 union {
496 struct TokOp Tok;
497 struct RegOp Reg;
498 struct MatrixRegOp MatrixReg;
499 struct MatrixTileListOp MatrixTileList;
500 struct VectorListOp VectorList;
501 struct VectorIndexOp VectorIndex;
502 struct ImmOp Imm;
503 struct ShiftedImmOp ShiftedImm;
504 struct ImmRangeOp ImmRange;
505 struct CondCodeOp CondCode;
506 struct FPImmOp FPImm;
507 struct BarrierOp Barrier;
508 struct SysRegOp SysReg;
509 struct SysCRImmOp SysCRImm;
510 struct PrefetchOp Prefetch;
511 struct PSBHintOp PSBHint;
512 struct BTIHintOp BTIHint;
513 struct ShiftExtendOp ShiftExtend;
514 struct SVCROp SVCR;
515 };
516
517 // Keep the MCContext around as the MCExprs may need manipulated during
518 // the add<>Operands() calls.
519 MCContext &Ctx;
520
521public:
522 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
523
524 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
525 Kind = o.Kind;
526 StartLoc = o.StartLoc;
527 EndLoc = o.EndLoc;
528 switch (Kind) {
529 case k_Token:
530 Tok = o.Tok;
531 break;
532 case k_Immediate:
533 Imm = o.Imm;
534 break;
535 case k_ShiftedImm:
536 ShiftedImm = o.ShiftedImm;
537 break;
538 case k_ImmRange:
539 ImmRange = o.ImmRange;
540 break;
541 case k_CondCode:
542 CondCode = o.CondCode;
543 break;
544 case k_FPImm:
545 FPImm = o.FPImm;
546 break;
547 case k_Barrier:
548 Barrier = o.Barrier;
549 break;
550 case k_Register:
551 Reg = o.Reg;
552 break;
553 case k_MatrixRegister:
554 MatrixReg = o.MatrixReg;
555 break;
556 case k_MatrixTileList:
557 MatrixTileList = o.MatrixTileList;
558 break;
559 case k_VectorList:
560 VectorList = o.VectorList;
561 break;
562 case k_VectorIndex:
563 VectorIndex = o.VectorIndex;
564 break;
565 case k_SysReg:
566 SysReg = o.SysReg;
567 break;
568 case k_SysCR:
569 SysCRImm = o.SysCRImm;
570 break;
571 case k_Prefetch:
572 Prefetch = o.Prefetch;
573 break;
574 case k_PSBHint:
575 PSBHint = o.PSBHint;
576 break;
577 case k_BTIHint:
578 BTIHint = o.BTIHint;
579 break;
580 case k_ShiftExtend:
581 ShiftExtend = o.ShiftExtend;
582 break;
583 case k_SVCR:
584 SVCR = o.SVCR;
585 break;
586 }
587 }
588
589 /// getStartLoc - Get the location of the first token of this operand.
590 SMLoc getStartLoc() const override { return StartLoc; }
591 /// getEndLoc - Get the location of the last token of this operand.
592 SMLoc getEndLoc() const override { return EndLoc; }
593
594 StringRef getToken() const {
595 assert(Kind == k_Token && "Invalid access!");
596 return StringRef(Tok.Data, Tok.Length);
597 }
598
599 bool isTokenSuffix() const {
600 assert(Kind == k_Token && "Invalid access!");
601 return Tok.IsSuffix;
602 }
603
604 const MCExpr *getImm() const {
605 assert(Kind == k_Immediate && "Invalid access!");
606 return Imm.Val;
607 }
608
609 const MCExpr *getShiftedImmVal() const {
610 assert(Kind == k_ShiftedImm && "Invalid access!");
611 return ShiftedImm.Val;
612 }
613
614 unsigned getShiftedImmShift() const {
615 assert(Kind == k_ShiftedImm && "Invalid access!");
616 return ShiftedImm.ShiftAmount;
617 }
618
619 unsigned getFirstImmVal() const {
620 assert(Kind == k_ImmRange && "Invalid access!");
621 return ImmRange.First;
622 }
623
624 unsigned getLastImmVal() const {
625 assert(Kind == k_ImmRange && "Invalid access!");
626 return ImmRange.Last;
627 }
628
630 assert(Kind == k_CondCode && "Invalid access!");
631 return CondCode.Code;
632 }
633
634 APFloat getFPImm() const {
635 assert (Kind == k_FPImm && "Invalid access!");
636 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
637 }
638
639 bool getFPImmIsExact() const {
640 assert (Kind == k_FPImm && "Invalid access!");
641 return FPImm.IsExact;
642 }
643
644 unsigned getBarrier() const {
645 assert(Kind == k_Barrier && "Invalid access!");
646 return Barrier.Val;
647 }
648
649 StringRef getBarrierName() const {
650 assert(Kind == k_Barrier && "Invalid access!");
651 return StringRef(Barrier.Data, Barrier.Length);
652 }
653
654 bool getBarriernXSModifier() const {
655 assert(Kind == k_Barrier && "Invalid access!");
656 return Barrier.HasnXSModifier;
657 }
658
659 MCRegister getReg() const override {
660 assert(Kind == k_Register && "Invalid access!");
661 return Reg.RegNum;
662 }
663
664 unsigned getMatrixReg() const {
665 assert(Kind == k_MatrixRegister && "Invalid access!");
666 return MatrixReg.RegNum;
667 }
668
669 unsigned getMatrixElementWidth() const {
670 assert(Kind == k_MatrixRegister && "Invalid access!");
671 return MatrixReg.ElementWidth;
672 }
673
674 MatrixKind getMatrixKind() const {
675 assert(Kind == k_MatrixRegister && "Invalid access!");
676 return MatrixReg.Kind;
677 }
678
679 unsigned getMatrixTileListRegMask() const {
680 assert(isMatrixTileList() && "Invalid access!");
681 return MatrixTileList.RegMask;
682 }
683
684 RegConstraintEqualityTy getRegEqualityTy() const {
685 assert(Kind == k_Register && "Invalid access!");
686 return Reg.EqualityTy;
687 }
688
689 unsigned getVectorListStart() const {
690 assert(Kind == k_VectorList && "Invalid access!");
691 return VectorList.RegNum;
692 }
693
694 unsigned getVectorListCount() const {
695 assert(Kind == k_VectorList && "Invalid access!");
696 return VectorList.Count;
697 }
698
699 unsigned getVectorListStride() const {
700 assert(Kind == k_VectorList && "Invalid access!");
701 return VectorList.Stride;
702 }
703
704 int getVectorIndex() const {
705 assert(Kind == k_VectorIndex && "Invalid access!");
706 return VectorIndex.Val;
707 }
708
709 StringRef getSysReg() const {
710 assert(Kind == k_SysReg && "Invalid access!");
711 return StringRef(SysReg.Data, SysReg.Length);
712 }
713
714 unsigned getSysCR() const {
715 assert(Kind == k_SysCR && "Invalid access!");
716 return SysCRImm.Val;
717 }
718
719 unsigned getPrefetch() const {
720 assert(Kind == k_Prefetch && "Invalid access!");
721 return Prefetch.Val;
722 }
723
724 unsigned getPSBHint() const {
725 assert(Kind == k_PSBHint && "Invalid access!");
726 return PSBHint.Val;
727 }
728
729 StringRef getPSBHintName() const {
730 assert(Kind == k_PSBHint && "Invalid access!");
731 return StringRef(PSBHint.Data, PSBHint.Length);
732 }
733
734 unsigned getBTIHint() const {
735 assert(Kind == k_BTIHint && "Invalid access!");
736 return BTIHint.Val;
737 }
738
739 StringRef getBTIHintName() const {
740 assert(Kind == k_BTIHint && "Invalid access!");
741 return StringRef(BTIHint.Data, BTIHint.Length);
742 }
743
744 StringRef getSVCR() const {
745 assert(Kind == k_SVCR && "Invalid access!");
746 return StringRef(SVCR.Data, SVCR.Length);
747 }
748
749 StringRef getPrefetchName() const {
750 assert(Kind == k_Prefetch && "Invalid access!");
751 return StringRef(Prefetch.Data, Prefetch.Length);
752 }
753
754 AArch64_AM::ShiftExtendType getShiftExtendType() const {
755 if (Kind == k_ShiftExtend)
756 return ShiftExtend.Type;
757 if (Kind == k_Register)
758 return Reg.ShiftExtend.Type;
759 llvm_unreachable("Invalid access!");
760 }
761
762 unsigned getShiftExtendAmount() const {
763 if (Kind == k_ShiftExtend)
764 return ShiftExtend.Amount;
765 if (Kind == k_Register)
766 return Reg.ShiftExtend.Amount;
767 llvm_unreachable("Invalid access!");
768 }
769
770 bool hasShiftExtendAmount() const {
771 if (Kind == k_ShiftExtend)
772 return ShiftExtend.HasExplicitAmount;
773 if (Kind == k_Register)
774 return Reg.ShiftExtend.HasExplicitAmount;
775 llvm_unreachable("Invalid access!");
776 }
777
778 bool isImm() const override { return Kind == k_Immediate; }
779 bool isMem() const override { return false; }
780
781 bool isUImm6() const {
782 if (!isImm())
783 return false;
784 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
785 if (!MCE)
786 return false;
787 int64_t Val = MCE->getValue();
788 return (Val >= 0 && Val < 64);
789 }
790
791 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
792
793 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
794 return isImmScaled<Bits, Scale>(true);
795 }
796
797 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
798 DiagnosticPredicate isUImmScaled() const {
799 if (IsRange && isImmRange() &&
800 (getLastImmVal() != getFirstImmVal() + Offset))
801 return DiagnosticPredicateTy::NoMatch;
802
803 return isImmScaled<Bits, Scale, IsRange>(false);
804 }
805
806 template <int Bits, int Scale, bool IsRange = false>
807 DiagnosticPredicate isImmScaled(bool Signed) const {
808 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
809 (isImmRange() && !IsRange))
810 return DiagnosticPredicateTy::NoMatch;
811
812 int64_t Val;
813 if (isImmRange())
814 Val = getFirstImmVal();
815 else {
816 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
817 if (!MCE)
818 return DiagnosticPredicateTy::NoMatch;
819 Val = MCE->getValue();
820 }
821
822 int64_t MinVal, MaxVal;
823 if (Signed) {
824 int64_t Shift = Bits - 1;
825 MinVal = (int64_t(1) << Shift) * -Scale;
826 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
827 } else {
828 MinVal = 0;
829 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
830 }
831
832 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
833 return DiagnosticPredicateTy::Match;
834
835 return DiagnosticPredicateTy::NearMatch;
836 }
837
838 DiagnosticPredicate isSVEPattern() const {
839 if (!isImm())
840 return DiagnosticPredicateTy::NoMatch;
841 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
842 if (!MCE)
843 return DiagnosticPredicateTy::NoMatch;
844 int64_t Val = MCE->getValue();
845 if (Val >= 0 && Val < 32)
846 return DiagnosticPredicateTy::Match;
847 return DiagnosticPredicateTy::NearMatch;
848 }
849
850 DiagnosticPredicate isSVEVecLenSpecifier() const {
851 if (!isImm())
852 return DiagnosticPredicateTy::NoMatch;
853 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
854 if (!MCE)
855 return DiagnosticPredicateTy::NoMatch;
856 int64_t Val = MCE->getValue();
857 if (Val >= 0 && Val <= 1)
858 return DiagnosticPredicateTy::Match;
859 return DiagnosticPredicateTy::NearMatch;
860 }
861
862 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
864 MCSymbolRefExpr::VariantKind DarwinRefKind;
865 int64_t Addend;
866 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
867 Addend)) {
868 // If we don't understand the expression, assume the best and
869 // let the fixup and relocation code deal with it.
870 return true;
871 }
872
873 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
874 ELFRefKind == AArch64MCExpr::VK_LO12 ||
875 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
876 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
877 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
878 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
879 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
881 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
882 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
883 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
884 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
885 // Note that we don't range-check the addend. It's adjusted modulo page
886 // size when converted, so there is no "out of range" condition when using
887 // @pageoff.
888 return true;
889 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
890 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
891 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
892 return Addend == 0;
893 }
894
895 return false;
896 }
897
898 template <int Scale> bool isUImm12Offset() const {
899 if (!isImm())
900 return false;
901
902 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
903 if (!MCE)
904 return isSymbolicUImm12Offset(getImm());
905
906 int64_t Val = MCE->getValue();
907 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
908 }
909
910 template <int N, int M>
911 bool isImmInRange() const {
912 if (!isImm())
913 return false;
914 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
915 if (!MCE)
916 return false;
917 int64_t Val = MCE->getValue();
918 return (Val >= N && Val <= M);
919 }
920
921 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
922 // a logical immediate can always be represented when inverted.
923 template <typename T>
924 bool isLogicalImm() const {
925 if (!isImm())
926 return false;
927 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
928 if (!MCE)
929 return false;
930
931 int64_t Val = MCE->getValue();
932 // Avoid left shift by 64 directly.
933 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
934 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
935 if ((Val & Upper) && (Val & Upper) != Upper)
936 return false;
937
938 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
939 }
940
941 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
942
943 bool isImmRange() const { return Kind == k_ImmRange; }
944
945 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
946 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
947 /// immediate that can be shifted by 'Shift'.
948 template <unsigned Width>
949 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
950 if (isShiftedImm() && Width == getShiftedImmShift())
951 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
952 return std::make_pair(CE->getValue(), Width);
953
954 if (isImm())
955 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
956 int64_t Val = CE->getValue();
957 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
958 return std::make_pair(Val >> Width, Width);
959 else
960 return std::make_pair(Val, 0u);
961 }
962
963 return {};
964 }
965
966 bool isAddSubImm() const {
967 if (!isShiftedImm() && !isImm())
968 return false;
969
970 const MCExpr *Expr;
971
972 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
973 if (isShiftedImm()) {
974 unsigned Shift = ShiftedImm.ShiftAmount;
975 Expr = ShiftedImm.Val;
976 if (Shift != 0 && Shift != 12)
977 return false;
978 } else {
979 Expr = getImm();
980 }
981
983 MCSymbolRefExpr::VariantKind DarwinRefKind;
984 int64_t Addend;
985 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
986 DarwinRefKind, Addend)) {
987 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
988 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
989 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
990 || ELFRefKind == AArch64MCExpr::VK_LO12
991 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
992 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
993 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
994 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
995 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
996 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
997 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
998 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
999 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
1000 }
1001
1002 // If it's a constant, it should be a real immediate in range.
1003 if (auto ShiftedVal = getShiftedVal<12>())
1004 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1005
1006 // If it's an expression, we hope for the best and let the fixup/relocation
1007 // code deal with it.
1008 return true;
1009 }
1010
1011 bool isAddSubImmNeg() const {
1012 if (!isShiftedImm() && !isImm())
1013 return false;
1014
1015 // Otherwise it should be a real negative immediate in range.
1016 if (auto ShiftedVal = getShiftedVal<12>())
1017 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1018
1019 return false;
1020 }
1021
1022 // Signed value in the range -128 to +127. For element widths of
1023 // 16 bits or higher it may also be a signed multiple of 256 in the
1024 // range -32768 to +32512.
1025 // For element-width of 8 bits a range of -128 to 255 is accepted,
1026 // since a copy of a byte can be either signed/unsigned.
1027 template <typename T>
1028 DiagnosticPredicate isSVECpyImm() const {
1029 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1030 return DiagnosticPredicateTy::NoMatch;
1031
1032 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1033 std::is_same<int8_t, T>::value;
1034 if (auto ShiftedImm = getShiftedVal<8>())
1035 if (!(IsByte && ShiftedImm->second) &&
1036 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1037 << ShiftedImm->second))
1038 return DiagnosticPredicateTy::Match;
1039
1040 return DiagnosticPredicateTy::NearMatch;
1041 }
1042
1043 // Unsigned value in the range 0 to 255. For element widths of
1044 // 16 bits or higher it may also be a signed multiple of 256 in the
1045 // range 0 to 65280.
1046 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1047 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1048 return DiagnosticPredicateTy::NoMatch;
1049
1050 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1051 std::is_same<int8_t, T>::value;
1052 if (auto ShiftedImm = getShiftedVal<8>())
1053 if (!(IsByte && ShiftedImm->second) &&
1054 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1055 << ShiftedImm->second))
1056 return DiagnosticPredicateTy::Match;
1057
1058 return DiagnosticPredicateTy::NearMatch;
1059 }
1060
1061 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1062 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1063 return DiagnosticPredicateTy::Match;
1064 return DiagnosticPredicateTy::NoMatch;
1065 }
1066
1067 bool isCondCode() const { return Kind == k_CondCode; }
1068
1069 bool isSIMDImmType10() const {
1070 if (!isImm())
1071 return false;
1072 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1073 if (!MCE)
1074 return false;
1076 }
1077
1078 template<int N>
1079 bool isBranchTarget() const {
1080 if (!isImm())
1081 return false;
1082 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1083 if (!MCE)
1084 return true;
1085 int64_t Val = MCE->getValue();
1086 if (Val & 0x3)
1087 return false;
1088 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1089 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1090 }
1091
1092 bool
1093 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1094 if (!isImm())
1095 return false;
1096
1097 AArch64MCExpr::VariantKind ELFRefKind;
1098 MCSymbolRefExpr::VariantKind DarwinRefKind;
1099 int64_t Addend;
1100 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1101 DarwinRefKind, Addend)) {
1102 return false;
1103 }
1104 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1105 return false;
1106
1107 return llvm::is_contained(AllowedModifiers, ELFRefKind);
1108 }
1109
1110 bool isMovWSymbolG3() const {
1112 }
1113
1114 bool isMovWSymbolG2() const {
1115 return isMovWSymbol(
1120 }
1121
1122 bool isMovWSymbolG1() const {
1123 return isMovWSymbol(
1129 }
1130
1131 bool isMovWSymbolG0() const {
1132 return isMovWSymbol(
1138 }
1139
1140 template<int RegWidth, int Shift>
1141 bool isMOVZMovAlias() const {
1142 if (!isImm()) return false;
1143
1144 const MCExpr *E = getImm();
1145 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1146 uint64_t Value = CE->getValue();
1147
1148 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1149 }
1150 // Only supports the case of Shift being 0 if an expression is used as an
1151 // operand
1152 return !Shift && E;
1153 }
1154
1155 template<int RegWidth, int Shift>
1156 bool isMOVNMovAlias() const {
1157 if (!isImm()) return false;
1158
1159 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1160 if (!CE) return false;
1161 uint64_t Value = CE->getValue();
1162
1163 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1164 }
1165
1166 bool isFPImm() const {
1167 return Kind == k_FPImm &&
1168 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1169 }
1170
1171 bool isBarrier() const {
1172 return Kind == k_Barrier && !getBarriernXSModifier();
1173 }
1174 bool isBarriernXS() const {
1175 return Kind == k_Barrier && getBarriernXSModifier();
1176 }
1177 bool isSysReg() const { return Kind == k_SysReg; }
1178
1179 bool isMRSSystemRegister() const {
1180 if (!isSysReg()) return false;
1181
1182 return SysReg.MRSReg != -1U;
1183 }
1184
1185 bool isMSRSystemRegister() const {
1186 if (!isSysReg()) return false;
1187 return SysReg.MSRReg != -1U;
1188 }
1189
1190 bool isSystemPStateFieldWithImm0_1() const {
1191 if (!isSysReg()) return false;
1192 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1193 }
1194
1195 bool isSystemPStateFieldWithImm0_15() const {
1196 if (!isSysReg())
1197 return false;
1198 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1199 }
1200
1201 bool isSVCR() const {
1202 if (Kind != k_SVCR)
1203 return false;
1204 return SVCR.PStateField != -1U;
1205 }
1206
1207 bool isReg() const override {
1208 return Kind == k_Register;
1209 }
1210
1211 bool isVectorList() const { return Kind == k_VectorList; }
1212
1213 bool isScalarReg() const {
1214 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1215 }
1216
1217 bool isNeonVectorReg() const {
1218 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1219 }
1220
1221 bool isNeonVectorRegLo() const {
1222 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1223 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1224 Reg.RegNum) ||
1225 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1226 Reg.RegNum));
1227 }
1228
1229 bool isNeonVectorReg0to7() const {
1230 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1231 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1232 Reg.RegNum));
1233 }
1234
1235 bool isMatrix() const { return Kind == k_MatrixRegister; }
1236 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1237
1238 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1239 RegKind RK;
1240 switch (Class) {
1241 case AArch64::PPRRegClassID:
1242 case AArch64::PPR_3bRegClassID:
1243 case AArch64::PPR_p8to15RegClassID:
1244 case AArch64::PNRRegClassID:
1245 case AArch64::PNR_p8to15RegClassID:
1246 case AArch64::PPRorPNRRegClassID:
1247 RK = RegKind::SVEPredicateAsCounter;
1248 break;
1249 default:
1250 llvm_unreachable("Unsupport register class");
1251 }
1252
1253 return (Kind == k_Register && Reg.Kind == RK) &&
1254 AArch64MCRegisterClasses[Class].contains(getReg());
1255 }
1256
1257 template <unsigned Class> bool isSVEVectorReg() const {
1258 RegKind RK;
1259 switch (Class) {
1260 case AArch64::ZPRRegClassID:
1261 case AArch64::ZPR_3bRegClassID:
1262 case AArch64::ZPR_4bRegClassID:
1263 RK = RegKind::SVEDataVector;
1264 break;
1265 case AArch64::PPRRegClassID:
1266 case AArch64::PPR_3bRegClassID:
1267 case AArch64::PPR_p8to15RegClassID:
1268 case AArch64::PNRRegClassID:
1269 case AArch64::PNR_p8to15RegClassID:
1270 case AArch64::PPRorPNRRegClassID:
1271 RK = RegKind::SVEPredicateVector;
1272 break;
1273 default:
1274 llvm_unreachable("Unsupport register class");
1275 }
1276
1277 return (Kind == k_Register && Reg.Kind == RK) &&
1278 AArch64MCRegisterClasses[Class].contains(getReg());
1279 }
1280
1281 template <unsigned Class> bool isFPRasZPR() const {
1282 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1283 AArch64MCRegisterClasses[Class].contains(getReg());
1284 }
1285
1286 template <int ElementWidth, unsigned Class>
1287 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1288 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1289 return DiagnosticPredicateTy::NoMatch;
1290
1291 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1292 return DiagnosticPredicateTy::Match;
1293
1294 return DiagnosticPredicateTy::NearMatch;
1295 }
1296
1297 template <int ElementWidth, unsigned Class>
1298 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1299 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1300 Reg.Kind != RegKind::SVEPredicateVector))
1301 return DiagnosticPredicateTy::NoMatch;
1302
1303 if ((isSVEPredicateAsCounterReg<Class>() ||
1304 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1305 Reg.ElementWidth == ElementWidth)
1306 return DiagnosticPredicateTy::Match;
1307
1308 return DiagnosticPredicateTy::NearMatch;
1309 }
1310
1311 template <int ElementWidth, unsigned Class>
1312 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1313 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1314 return DiagnosticPredicateTy::NoMatch;
1315
1316 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1317 return DiagnosticPredicateTy::Match;
1318
1319 return DiagnosticPredicateTy::NearMatch;
1320 }
1321
1322 template <int ElementWidth, unsigned Class>
1323 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1324 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1325 return DiagnosticPredicateTy::NoMatch;
1326
1327 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1328 return DiagnosticPredicateTy::Match;
1329
1330 return DiagnosticPredicateTy::NearMatch;
1331 }
1332
1333 template <int ElementWidth, unsigned Class,
1334 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1335 bool ShiftWidthAlwaysSame>
1336 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1337 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1338 if (!VectorMatch.isMatch())
1339 return DiagnosticPredicateTy::NoMatch;
1340
1341 // Give a more specific diagnostic when the user has explicitly typed in
1342 // a shift-amount that does not match what is expected, but for which
1343 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1344 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1345 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1346 ShiftExtendTy == AArch64_AM::SXTW) &&
1347 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1348 return DiagnosticPredicateTy::NoMatch;
1349
1350 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1351 return DiagnosticPredicateTy::Match;
1352
1353 return DiagnosticPredicateTy::NearMatch;
1354 }
1355
1356 bool isGPR32as64() const {
1357 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1358 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1359 }
1360
1361 bool isGPR64as32() const {
1362 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1363 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1364 }
1365
1366 bool isGPR64x8() const {
1367 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1368 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1369 Reg.RegNum);
1370 }
1371
1372 bool isWSeqPair() const {
1373 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1374 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1375 Reg.RegNum);
1376 }
1377
1378 bool isXSeqPair() const {
1379 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1380 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1381 Reg.RegNum);
1382 }
1383
1384 bool isSyspXzrPair() const {
1385 return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1386 }
1387
1388 template<int64_t Angle, int64_t Remainder>
1389 DiagnosticPredicate isComplexRotation() const {
1390 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1391
1392 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1393 if (!CE) return DiagnosticPredicateTy::NoMatch;
1394 uint64_t Value = CE->getValue();
1395
1396 if (Value % Angle == Remainder && Value <= 270)
1397 return DiagnosticPredicateTy::Match;
1398 return DiagnosticPredicateTy::NearMatch;
1399 }
1400
1401 template <unsigned RegClassID> bool isGPR64() const {
1402 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1403 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1404 }
1405
1406 template <unsigned RegClassID, int ExtWidth>
1407 DiagnosticPredicate isGPR64WithShiftExtend() const {
1408 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1409 return DiagnosticPredicateTy::NoMatch;
1410
1411 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1412 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1413 return DiagnosticPredicateTy::Match;
1414 return DiagnosticPredicateTy::NearMatch;
1415 }
1416
1417 /// Is this a vector list with the type implicit (presumably attached to the
1418 /// instruction itself)?
1419 template <RegKind VectorKind, unsigned NumRegs>
1420 bool isImplicitlyTypedVectorList() const {
1421 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1422 VectorList.NumElements == 0 &&
1423 VectorList.RegisterKind == VectorKind;
1424 }
1425
1426 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1427 unsigned ElementWidth, unsigned Stride = 1>
1428 bool isTypedVectorList() const {
1429 if (Kind != k_VectorList)
1430 return false;
1431 if (VectorList.Count != NumRegs)
1432 return false;
1433 if (VectorList.RegisterKind != VectorKind)
1434 return false;
1435 if (VectorList.ElementWidth != ElementWidth)
1436 return false;
1437 if (VectorList.Stride != Stride)
1438 return false;
1439 return VectorList.NumElements == NumElements;
1440 }
1441
1442 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1443 unsigned ElementWidth>
1444 DiagnosticPredicate isTypedVectorListMultiple() const {
1445 bool Res =
1446 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1447 if (!Res)
1448 return DiagnosticPredicateTy::NoMatch;
1449 if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0)
1450 return DiagnosticPredicateTy::NearMatch;
1451 return DiagnosticPredicateTy::Match;
1452 }
1453
1454 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1455 unsigned ElementWidth>
1456 DiagnosticPredicate isTypedVectorListStrided() const {
1457 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1458 ElementWidth, Stride>();
1459 if (!Res)
1460 return DiagnosticPredicateTy::NoMatch;
1461 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1462 ((VectorList.RegNum >= AArch64::Z16) &&
1463 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1464 return DiagnosticPredicateTy::Match;
1465 return DiagnosticPredicateTy::NoMatch;
1466 }
1467
1468 template <int Min, int Max>
1469 DiagnosticPredicate isVectorIndex() const {
1470 if (Kind != k_VectorIndex)
1471 return DiagnosticPredicateTy::NoMatch;
1472 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1473 return DiagnosticPredicateTy::Match;
1474 return DiagnosticPredicateTy::NearMatch;
1475 }
1476
1477 bool isToken() const override { return Kind == k_Token; }
1478
1479 bool isTokenEqual(StringRef Str) const {
1480 return Kind == k_Token && getToken() == Str;
1481 }
1482 bool isSysCR() const { return Kind == k_SysCR; }
1483 bool isPrefetch() const { return Kind == k_Prefetch; }
1484 bool isPSBHint() const { return Kind == k_PSBHint; }
1485 bool isBTIHint() const { return Kind == k_BTIHint; }
1486 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1487 bool isShifter() const {
1488 if (!isShiftExtend())
1489 return false;
1490
1491 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1492 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1493 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1494 ST == AArch64_AM::MSL);
1495 }
1496
1497 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1498 if (Kind != k_FPImm)
1499 return DiagnosticPredicateTy::NoMatch;
1500
1501 if (getFPImmIsExact()) {
1502 // Lookup the immediate from table of supported immediates.
1503 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1504 assert(Desc && "Unknown enum value");
1505
1506 // Calculate its FP value.
1507 APFloat RealVal(APFloat::IEEEdouble());
1508 auto StatusOrErr =
1509 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1510 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1511 llvm_unreachable("FP immediate is not exact");
1512
1513 if (getFPImm().bitwiseIsEqual(RealVal))
1514 return DiagnosticPredicateTy::Match;
1515 }
1516
1517 return DiagnosticPredicateTy::NearMatch;
1518 }
1519
1520 template <unsigned ImmA, unsigned ImmB>
1521 DiagnosticPredicate isExactFPImm() const {
1522 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1523 if ((Res = isExactFPImm<ImmA>()))
1524 return DiagnosticPredicateTy::Match;
1525 if ((Res = isExactFPImm<ImmB>()))
1526 return DiagnosticPredicateTy::Match;
1527 return Res;
1528 }
1529
1530 bool isExtend() const {
1531 if (!isShiftExtend())
1532 return false;
1533
1534 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1535 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1536 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1537 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1538 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1539 ET == AArch64_AM::LSL) &&
1540 getShiftExtendAmount() <= 4;
1541 }
1542
1543 bool isExtend64() const {
1544 if (!isExtend())
1545 return false;
1546 // Make sure the extend expects a 32-bit source register.
1547 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1548 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1549 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1550 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1551 }
1552
1553 bool isExtendLSL64() const {
1554 if (!isExtend())
1555 return false;
1556 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1557 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1558 ET == AArch64_AM::LSL) &&
1559 getShiftExtendAmount() <= 4;
1560 }
1561
1562 bool isLSLImm3Shift() const {
1563 if (!isShiftExtend())
1564 return false;
1565 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1566 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1567 }
1568
1569 template<int Width> bool isMemXExtend() const {
1570 if (!isExtend())
1571 return false;
1572 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1573 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1574 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1575 getShiftExtendAmount() == 0);
1576 }
1577
1578 template<int Width> bool isMemWExtend() const {
1579 if (!isExtend())
1580 return false;
1581 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1582 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1583 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1584 getShiftExtendAmount() == 0);
1585 }
1586
1587 template <unsigned width>
1588 bool isArithmeticShifter() const {
1589 if (!isShifter())
1590 return false;
1591
1592 // An arithmetic shifter is LSL, LSR, or ASR.
1593 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1594 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1595 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1596 }
1597
1598 template <unsigned width>
1599 bool isLogicalShifter() const {
1600 if (!isShifter())
1601 return false;
1602
1603 // A logical shifter is LSL, LSR, ASR or ROR.
1604 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1605 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1606 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1607 getShiftExtendAmount() < width;
1608 }
1609
1610 bool isMovImm32Shifter() const {
1611 if (!isShifter())
1612 return false;
1613
1614 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1615 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1616 if (ST != AArch64_AM::LSL)
1617 return false;
1618 uint64_t Val = getShiftExtendAmount();
1619 return (Val == 0 || Val == 16);
1620 }
1621
1622 bool isMovImm64Shifter() const {
1623 if (!isShifter())
1624 return false;
1625
1626 // A MOVi shifter is LSL of 0 or 16.
1627 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1628 if (ST != AArch64_AM::LSL)
1629 return false;
1630 uint64_t Val = getShiftExtendAmount();
1631 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1632 }
1633
1634 bool isLogicalVecShifter() const {
1635 if (!isShifter())
1636 return false;
1637
1638 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1639 unsigned Shift = getShiftExtendAmount();
1640 return getShiftExtendType() == AArch64_AM::LSL &&
1641 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1642 }
1643
1644 bool isLogicalVecHalfWordShifter() const {
1645 if (!isLogicalVecShifter())
1646 return false;
1647
1648 // A logical vector shifter is a left shift by 0 or 8.
1649 unsigned Shift = getShiftExtendAmount();
1650 return getShiftExtendType() == AArch64_AM::LSL &&
1651 (Shift == 0 || Shift == 8);
1652 }
1653
1654 bool isMoveVecShifter() const {
1655 if (!isShiftExtend())
1656 return false;
1657
1658 // A logical vector shifter is a left shift by 8 or 16.
1659 unsigned Shift = getShiftExtendAmount();
1660 return getShiftExtendType() == AArch64_AM::MSL &&
1661 (Shift == 8 || Shift == 16);
1662 }
1663
1664 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1665 // to LDUR/STUR when the offset is not legal for the former but is for
1666 // the latter. As such, in addition to checking for being a legal unscaled
1667 // address, also check that it is not a legal scaled address. This avoids
1668 // ambiguity in the matcher.
1669 template<int Width>
1670 bool isSImm9OffsetFB() const {
1671 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1672 }
1673
1674 bool isAdrpLabel() const {
1675 // Validation was handled during parsing, so we just verify that
1676 // something didn't go haywire.
1677 if (!isImm())
1678 return false;
1679
1680 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1681 int64_t Val = CE->getValue();
1682 int64_t Min = - (4096 * (1LL << (21 - 1)));
1683 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1684 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1685 }
1686
1687 return true;
1688 }
1689
1690 bool isAdrLabel() const {
1691 // Validation was handled during parsing, so we just verify that
1692 // something didn't go haywire.
1693 if (!isImm())
1694 return false;
1695
1696 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1697 int64_t Val = CE->getValue();
1698 int64_t Min = - (1LL << (21 - 1));
1699 int64_t Max = ((1LL << (21 - 1)) - 1);
1700 return Val >= Min && Val <= Max;
1701 }
1702
1703 return true;
1704 }
1705
1706 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1707 DiagnosticPredicate isMatrixRegOperand() const {
1708 if (!isMatrix())
1709 return DiagnosticPredicateTy::NoMatch;
1710 if (getMatrixKind() != Kind ||
1711 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1712 EltSize != getMatrixElementWidth())
1713 return DiagnosticPredicateTy::NearMatch;
1714 return DiagnosticPredicateTy::Match;
1715 }
1716
1717 bool isPAuthPCRelLabel16Operand() const {
1718 // PAuth PCRel16 operands are similar to regular branch targets, but only
1719 // negative values are allowed for concrete immediates as signing instr
1720 // should be in a lower address.
1721 if (!isImm())
1722 return false;
1723 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1724 if (!MCE)
1725 return true;
1726 int64_t Val = MCE->getValue();
1727 if (Val & 0b11)
1728 return false;
1729 return (Val <= 0) && (Val > -(1 << 18));
1730 }
1731
1732 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1733 // Add as immediates when possible. Null MCExpr = 0.
1734 if (!Expr)
1736 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1737 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1738 else
1740 }
1741
1742 void addRegOperands(MCInst &Inst, unsigned N) const {
1743 assert(N == 1 && "Invalid number of operands!");
1745 }
1746
1747 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1748 assert(N == 1 && "Invalid number of operands!");
1749 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1750 }
1751
1752 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1753 assert(N == 1 && "Invalid number of operands!");
1754 assert(
1755 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1756
1757 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1758 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1759 RI->getEncodingValue(getReg()));
1760
1762 }
1763
1764 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1765 assert(N == 1 && "Invalid number of operands!");
1766 assert(
1767 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1768
1769 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1770 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1771 RI->getEncodingValue(getReg()));
1772
1774 }
1775
1776 template <int Width>
1777 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1778 unsigned Base;
1779 switch (Width) {
1780 case 8: Base = AArch64::B0; break;
1781 case 16: Base = AArch64::H0; break;
1782 case 32: Base = AArch64::S0; break;
1783 case 64: Base = AArch64::D0; break;
1784 case 128: Base = AArch64::Q0; break;
1785 default:
1786 llvm_unreachable("Unsupported width");
1787 }
1788 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1789 }
1790
1791 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1792 assert(N == 1 && "Invalid number of operands!");
1793 unsigned Reg = getReg();
1794 // Normalise to PPR
1795 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1796 Reg = Reg - AArch64::PN0 + AArch64::P0;
1798 }
1799
1800 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1801 assert(N == 1 && "Invalid number of operands!");
1802 Inst.addOperand(
1803 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1804 }
1805
1806 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1807 assert(N == 1 && "Invalid number of operands!");
1808 assert(
1809 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1810 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1811 }
1812
1813 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1814 assert(N == 1 && "Invalid number of operands!");
1815 assert(
1816 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1818 }
1819
1820 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1821 assert(N == 1 && "Invalid number of operands!");
1823 }
1824
1825 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1826 assert(N == 1 && "Invalid number of operands!");
1828 }
1829
1830 enum VecListIndexType {
1831 VecListIdx_DReg = 0,
1832 VecListIdx_QReg = 1,
1833 VecListIdx_ZReg = 2,
1834 VecListIdx_PReg = 3,
1835 };
1836
1837 template <VecListIndexType RegTy, unsigned NumRegs>
1838 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1839 assert(N == 1 && "Invalid number of operands!");
1840 static const unsigned FirstRegs[][5] = {
1841 /* DReg */ { AArch64::Q0,
1842 AArch64::D0, AArch64::D0_D1,
1843 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1844 /* QReg */ { AArch64::Q0,
1845 AArch64::Q0, AArch64::Q0_Q1,
1846 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1847 /* ZReg */ { AArch64::Z0,
1848 AArch64::Z0, AArch64::Z0_Z1,
1849 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1850 /* PReg */ { AArch64::P0,
1851 AArch64::P0, AArch64::P0_P1 }
1852 };
1853
1854 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1855 " NumRegs must be <= 4 for ZRegs");
1856
1857 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1858 " NumRegs must be <= 2 for PRegs");
1859
1860 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1861 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1862 FirstRegs[(unsigned)RegTy][0]));
1863 }
1864
1865 template <unsigned NumRegs>
1866 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1867 assert(N == 1 && "Invalid number of operands!");
1868 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1869
1870 switch (NumRegs) {
1871 case 2:
1872 if (getVectorListStart() < AArch64::Z16) {
1873 assert((getVectorListStart() < AArch64::Z8) &&
1874 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1876 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1877 } else {
1878 assert((getVectorListStart() < AArch64::Z24) &&
1879 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1881 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1882 }
1883 break;
1884 case 4:
1885 if (getVectorListStart() < AArch64::Z16) {
1886 assert((getVectorListStart() < AArch64::Z4) &&
1887 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1889 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1890 } else {
1891 assert((getVectorListStart() < AArch64::Z20) &&
1892 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1894 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1895 }
1896 break;
1897 default:
1898 llvm_unreachable("Unsupported number of registers for strided vec list");
1899 }
1900 }
1901
1902 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1903 assert(N == 1 && "Invalid number of operands!");
1904 unsigned RegMask = getMatrixTileListRegMask();
1905 assert(RegMask <= 0xFF && "Invalid mask!");
1906 Inst.addOperand(MCOperand::createImm(RegMask));
1907 }
1908
1909 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1910 assert(N == 1 && "Invalid number of operands!");
1911 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1912 }
1913
1914 template <unsigned ImmIs0, unsigned ImmIs1>
1915 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1916 assert(N == 1 && "Invalid number of operands!");
1917 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1918 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1919 }
1920
1921 void addImmOperands(MCInst &Inst, unsigned N) const {
1922 assert(N == 1 && "Invalid number of operands!");
1923 // If this is a pageoff symrefexpr with an addend, adjust the addend
1924 // to be only the page-offset portion. Otherwise, just add the expr
1925 // as-is.
1926 addExpr(Inst, getImm());
1927 }
1928
1929 template <int Shift>
1930 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1931 assert(N == 2 && "Invalid number of operands!");
1932 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1933 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1934 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1935 } else if (isShiftedImm()) {
1936 addExpr(Inst, getShiftedImmVal());
1937 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1938 } else {
1939 addExpr(Inst, getImm());
1941 }
1942 }
1943
1944 template <int Shift>
1945 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1946 assert(N == 2 && "Invalid number of operands!");
1947 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1948 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1949 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1950 } else
1951 llvm_unreachable("Not a shifted negative immediate");
1952 }
1953
1954 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1955 assert(N == 1 && "Invalid number of operands!");
1957 }
1958
1959 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1960 assert(N == 1 && "Invalid number of operands!");
1961 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1962 if (!MCE)
1963 addExpr(Inst, getImm());
1964 else
1965 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1966 }
1967
1968 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1969 addImmOperands(Inst, N);
1970 }
1971
1972 template<int Scale>
1973 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1974 assert(N == 1 && "Invalid number of operands!");
1975 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1976
1977 if (!MCE) {
1978 Inst.addOperand(MCOperand::createExpr(getImm()));
1979 return;
1980 }
1981 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1982 }
1983
1984 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1985 assert(N == 1 && "Invalid number of operands!");
1986 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1988 }
1989
1990 template <int Scale>
1991 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1992 assert(N == 1 && "Invalid number of operands!");
1993 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1994 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1995 }
1996
1997 template <int Scale>
1998 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
1999 assert(N == 1 && "Invalid number of operands!");
2000 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
2001 }
2002
2003 template <typename T>
2004 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2005 assert(N == 1 && "Invalid number of operands!");
2006 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2007 std::make_unsigned_t<T> Val = MCE->getValue();
2008 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2009 Inst.addOperand(MCOperand::createImm(encoding));
2010 }
2011
2012 template <typename T>
2013 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2014 assert(N == 1 && "Invalid number of operands!");
2015 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2016 std::make_unsigned_t<T> Val = ~MCE->getValue();
2017 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2018 Inst.addOperand(MCOperand::createImm(encoding));
2019 }
2020
2021 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2022 assert(N == 1 && "Invalid number of operands!");
2023 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2025 Inst.addOperand(MCOperand::createImm(encoding));
2026 }
2027
2028 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2029 // Branch operands don't encode the low bits, so shift them off
2030 // here. If it's a label, however, just put it on directly as there's
2031 // not enough information now to do anything.
2032 assert(N == 1 && "Invalid number of operands!");
2033 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2034 if (!MCE) {
2035 addExpr(Inst, getImm());
2036 return;
2037 }
2038 assert(MCE && "Invalid constant immediate operand!");
2039 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2040 }
2041
2042 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2043 // PC-relative operands don't encode the low bits, so shift them off
2044 // here. If it's a label, however, just put it on directly as there's
2045 // not enough information now to do anything.
2046 assert(N == 1 && "Invalid number of operands!");
2047 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2048 if (!MCE) {
2049 addExpr(Inst, getImm());
2050 return;
2051 }
2052 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2053 }
2054
2055 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2056 // Branch operands don't encode the low bits, so shift them off
2057 // here. If it's a label, however, just put it on directly as there's
2058 // not enough information now to do anything.
2059 assert(N == 1 && "Invalid number of operands!");
2060 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2061 if (!MCE) {
2062 addExpr(Inst, getImm());
2063 return;
2064 }
2065 assert(MCE && "Invalid constant immediate operand!");
2066 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2067 }
2068
2069 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2070 // Branch operands don't encode the low bits, so shift them off
2071 // here. If it's a label, however, just put it on directly as there's
2072 // not enough information now to do anything.
2073 assert(N == 1 && "Invalid number of operands!");
2074 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2075 if (!MCE) {
2076 addExpr(Inst, getImm());
2077 return;
2078 }
2079 assert(MCE && "Invalid constant immediate operand!");
2080 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2081 }
2082
2083 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2084 assert(N == 1 && "Invalid number of operands!");
2086 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2087 }
2088
2089 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2090 assert(N == 1 && "Invalid number of operands!");
2091 Inst.addOperand(MCOperand::createImm(getBarrier()));
2092 }
2093
2094 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2095 assert(N == 1 && "Invalid number of operands!");
2096 Inst.addOperand(MCOperand::createImm(getBarrier()));
2097 }
2098
2099 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2100 assert(N == 1 && "Invalid number of operands!");
2101
2102 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2103 }
2104
2105 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2106 assert(N == 1 && "Invalid number of operands!");
2107
2108 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2109 }
2110
2111 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2112 assert(N == 1 && "Invalid number of operands!");
2113
2114 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2115 }
2116
2117 void addSVCROperands(MCInst &Inst, unsigned N) const {
2118 assert(N == 1 && "Invalid number of operands!");
2119
2120 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2121 }
2122
2123 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2124 assert(N == 1 && "Invalid number of operands!");
2125
2126 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2127 }
2128
2129 void addSysCROperands(MCInst &Inst, unsigned N) const {
2130 assert(N == 1 && "Invalid number of operands!");
2131 Inst.addOperand(MCOperand::createImm(getSysCR()));
2132 }
2133
2134 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2135 assert(N == 1 && "Invalid number of operands!");
2136 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2137 }
2138
2139 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2140 assert(N == 1 && "Invalid number of operands!");
2141 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2142 }
2143
2144 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2145 assert(N == 1 && "Invalid number of operands!");
2146 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2147 }
2148
2149 void addShifterOperands(MCInst &Inst, unsigned N) const {
2150 assert(N == 1 && "Invalid number of operands!");
2151 unsigned Imm =
2152 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2154 }
2155
2156 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2157 assert(N == 1 && "Invalid number of operands!");
2158 unsigned Imm = getShiftExtendAmount();
2160 }
2161
2162 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2163 assert(N == 1 && "Invalid number of operands!");
2164
2165 if (!isScalarReg())
2166 return;
2167
2168 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2169 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2171 if (Reg != AArch64::XZR)
2172 llvm_unreachable("wrong register");
2173
2174 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2175 }
2176
2177 void addExtendOperands(MCInst &Inst, unsigned N) const {
2178 assert(N == 1 && "Invalid number of operands!");
2179 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2180 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2181 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2183 }
2184
2185 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2186 assert(N == 1 && "Invalid number of operands!");
2187 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2188 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2189 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2191 }
2192
2193 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2194 assert(N == 2 && "Invalid number of operands!");
2195 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2196 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2197 Inst.addOperand(MCOperand::createImm(IsSigned));
2198 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2199 }
2200
2201 // For 8-bit load/store instructions with a register offset, both the
2202 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2203 // they're disambiguated by whether the shift was explicit or implicit rather
2204 // than its size.
2205 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2206 assert(N == 2 && "Invalid number of operands!");
2207 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2208 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2209 Inst.addOperand(MCOperand::createImm(IsSigned));
2210 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2211 }
2212
2213 template<int Shift>
2214 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2215 assert(N == 1 && "Invalid number of operands!");
2216
2217 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2218 if (CE) {
2219 uint64_t Value = CE->getValue();
2220 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2221 } else {
2222 addExpr(Inst, getImm());
2223 }
2224 }
2225
2226 template<int Shift>
2227 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2228 assert(N == 1 && "Invalid number of operands!");
2229
2230 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2231 uint64_t Value = CE->getValue();
2232 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2233 }
2234
2235 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2236 assert(N == 1 && "Invalid number of operands!");
2237 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2238 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2239 }
2240
2241 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2242 assert(N == 1 && "Invalid number of operands!");
2243 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2244 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2245 }
2246
2247 void print(raw_ostream &OS) const override;
2248
2249 static std::unique_ptr<AArch64Operand>
2250 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2251 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2252 Op->Tok.Data = Str.data();
2253 Op->Tok.Length = Str.size();
2254 Op->Tok.IsSuffix = IsSuffix;
2255 Op->StartLoc = S;
2256 Op->EndLoc = S;
2257 return Op;
2258 }
2259
2260 static std::unique_ptr<AArch64Operand>
2261 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2262 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2264 unsigned ShiftAmount = 0,
2265 unsigned HasExplicitAmount = false) {
2266 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2267 Op->Reg.RegNum = RegNum;
2268 Op->Reg.Kind = Kind;
2269 Op->Reg.ElementWidth = 0;
2270 Op->Reg.EqualityTy = EqTy;
2271 Op->Reg.ShiftExtend.Type = ExtTy;
2272 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2273 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2274 Op->StartLoc = S;
2275 Op->EndLoc = E;
2276 return Op;
2277 }
2278
2279 static std::unique_ptr<AArch64Operand>
2280 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2281 SMLoc S, SMLoc E, MCContext &Ctx,
2283 unsigned ShiftAmount = 0,
2284 unsigned HasExplicitAmount = false) {
2285 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2286 Kind == RegKind::SVEPredicateVector ||
2287 Kind == RegKind::SVEPredicateAsCounter) &&
2288 "Invalid vector kind");
2289 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2290 HasExplicitAmount);
2291 Op->Reg.ElementWidth = ElementWidth;
2292 return Op;
2293 }
2294
2295 static std::unique_ptr<AArch64Operand>
2296 CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2297 unsigned NumElements, unsigned ElementWidth,
2298 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2299 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2300 Op->VectorList.RegNum = RegNum;
2301 Op->VectorList.Count = Count;
2302 Op->VectorList.Stride = Stride;
2303 Op->VectorList.NumElements = NumElements;
2304 Op->VectorList.ElementWidth = ElementWidth;
2305 Op->VectorList.RegisterKind = RegisterKind;
2306 Op->StartLoc = S;
2307 Op->EndLoc = E;
2308 return Op;
2309 }
2310
2311 static std::unique_ptr<AArch64Operand>
2312 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2313 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2314 Op->VectorIndex.Val = Idx;
2315 Op->StartLoc = S;
2316 Op->EndLoc = E;
2317 return Op;
2318 }
2319
2320 static std::unique_ptr<AArch64Operand>
2321 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2322 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2323 Op->MatrixTileList.RegMask = RegMask;
2324 Op->StartLoc = S;
2325 Op->EndLoc = E;
2326 return Op;
2327 }
2328
2329 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2330 const unsigned ElementWidth) {
2331 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2332 RegMap = {
2333 {{0, AArch64::ZAB0},
2334 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2335 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2336 {{8, AArch64::ZAB0},
2337 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2338 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2339 {{16, AArch64::ZAH0},
2340 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2341 {{16, AArch64::ZAH1},
2342 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2343 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2344 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2345 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2346 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2347 };
2348
2349 if (ElementWidth == 64)
2350 OutRegs.insert(Reg);
2351 else {
2352 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2353 assert(!Regs.empty() && "Invalid tile or element width!");
2354 for (auto OutReg : Regs)
2355 OutRegs.insert(OutReg);
2356 }
2357 }
2358
2359 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2360 SMLoc E, MCContext &Ctx) {
2361 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2362 Op->Imm.Val = Val;
2363 Op->StartLoc = S;
2364 Op->EndLoc = E;
2365 return Op;
2366 }
2367
2368 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2369 unsigned ShiftAmount,
2370 SMLoc S, SMLoc E,
2371 MCContext &Ctx) {
2372 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2373 Op->ShiftedImm .Val = Val;
2374 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2375 Op->StartLoc = S;
2376 Op->EndLoc = E;
2377 return Op;
2378 }
2379
2380 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2381 unsigned Last, SMLoc S,
2382 SMLoc E,
2383 MCContext &Ctx) {
2384 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2385 Op->ImmRange.First = First;
2386 Op->ImmRange.Last = Last;
2387 Op->EndLoc = E;
2388 return Op;
2389 }
2390
2391 static std::unique_ptr<AArch64Operand>
2392 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2393 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2394 Op->CondCode.Code = Code;
2395 Op->StartLoc = S;
2396 Op->EndLoc = E;
2397 return Op;
2398 }
2399
2400 static std::unique_ptr<AArch64Operand>
2401 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2402 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2403 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2404 Op->FPImm.IsExact = IsExact;
2405 Op->StartLoc = S;
2406 Op->EndLoc = S;
2407 return Op;
2408 }
2409
2410 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2411 StringRef Str,
2412 SMLoc S,
2413 MCContext &Ctx,
2414 bool HasnXSModifier) {
2415 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2416 Op->Barrier.Val = Val;
2417 Op->Barrier.Data = Str.data();
2418 Op->Barrier.Length = Str.size();
2419 Op->Barrier.HasnXSModifier = HasnXSModifier;
2420 Op->StartLoc = S;
2421 Op->EndLoc = S;
2422 return Op;
2423 }
2424
2425 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2426 uint32_t MRSReg,
2427 uint32_t MSRReg,
2428 uint32_t PStateField,
2429 MCContext &Ctx) {
2430 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2431 Op->SysReg.Data = Str.data();
2432 Op->SysReg.Length = Str.size();
2433 Op->SysReg.MRSReg = MRSReg;
2434 Op->SysReg.MSRReg = MSRReg;
2435 Op->SysReg.PStateField = PStateField;
2436 Op->StartLoc = S;
2437 Op->EndLoc = S;
2438 return Op;
2439 }
2440
2441 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2442 SMLoc E, MCContext &Ctx) {
2443 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2444 Op->SysCRImm.Val = Val;
2445 Op->StartLoc = S;
2446 Op->EndLoc = E;
2447 return Op;
2448 }
2449
2450 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2451 StringRef Str,
2452 SMLoc S,
2453 MCContext &Ctx) {
2454 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2455 Op->Prefetch.Val = Val;
2456 Op->Barrier.Data = Str.data();
2457 Op->Barrier.Length = Str.size();
2458 Op->StartLoc = S;
2459 Op->EndLoc = S;
2460 return Op;
2461 }
2462
2463 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2464 StringRef Str,
2465 SMLoc S,
2466 MCContext &Ctx) {
2467 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2468 Op->PSBHint.Val = Val;
2469 Op->PSBHint.Data = Str.data();
2470 Op->PSBHint.Length = Str.size();
2471 Op->StartLoc = S;
2472 Op->EndLoc = S;
2473 return Op;
2474 }
2475
2476 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2477 StringRef Str,
2478 SMLoc S,
2479 MCContext &Ctx) {
2480 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2481 Op->BTIHint.Val = Val | 32;
2482 Op->BTIHint.Data = Str.data();
2483 Op->BTIHint.Length = Str.size();
2484 Op->StartLoc = S;
2485 Op->EndLoc = S;
2486 return Op;
2487 }
2488
2489 static std::unique_ptr<AArch64Operand>
2490 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2491 SMLoc S, SMLoc E, MCContext &Ctx) {
2492 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2493 Op->MatrixReg.RegNum = RegNum;
2494 Op->MatrixReg.ElementWidth = ElementWidth;
2495 Op->MatrixReg.Kind = Kind;
2496 Op->StartLoc = S;
2497 Op->EndLoc = E;
2498 return Op;
2499 }
2500
2501 static std::unique_ptr<AArch64Operand>
2502 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2503 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2504 Op->SVCR.PStateField = PStateField;
2505 Op->SVCR.Data = Str.data();
2506 Op->SVCR.Length = Str.size();
2507 Op->StartLoc = S;
2508 Op->EndLoc = S;
2509 return Op;
2510 }
2511
2512 static std::unique_ptr<AArch64Operand>
2513 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2514 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2515 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2516 Op->ShiftExtend.Type = ShOp;
2517 Op->ShiftExtend.Amount = Val;
2518 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2519 Op->StartLoc = S;
2520 Op->EndLoc = E;
2521 return Op;
2522 }
2523};
2524
2525} // end anonymous namespace.
2526
2527void AArch64Operand::print(raw_ostream &OS) const {
2528 switch (Kind) {
2529 case k_FPImm:
2530 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2531 if (!getFPImmIsExact())
2532 OS << " (inexact)";
2533 OS << ">";
2534 break;
2535 case k_Barrier: {
2536 StringRef Name = getBarrierName();
2537 if (!Name.empty())
2538 OS << "<barrier " << Name << ">";
2539 else
2540 OS << "<barrier invalid #" << getBarrier() << ">";
2541 break;
2542 }
2543 case k_Immediate:
2544 OS << *getImm();
2545 break;
2546 case k_ShiftedImm: {
2547 unsigned Shift = getShiftedImmShift();
2548 OS << "<shiftedimm ";
2549 OS << *getShiftedImmVal();
2550 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2551 break;
2552 }
2553 case k_ImmRange: {
2554 OS << "<immrange ";
2555 OS << getFirstImmVal();
2556 OS << ":" << getLastImmVal() << ">";
2557 break;
2558 }
2559 case k_CondCode:
2560 OS << "<condcode " << getCondCode() << ">";
2561 break;
2562 case k_VectorList: {
2563 OS << "<vectorlist ";
2564 unsigned Reg = getVectorListStart();
2565 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2566 OS << Reg + i * getVectorListStride() << " ";
2567 OS << ">";
2568 break;
2569 }
2570 case k_VectorIndex:
2571 OS << "<vectorindex " << getVectorIndex() << ">";
2572 break;
2573 case k_SysReg:
2574 OS << "<sysreg: " << getSysReg() << '>';
2575 break;
2576 case k_Token:
2577 OS << "'" << getToken() << "'";
2578 break;
2579 case k_SysCR:
2580 OS << "c" << getSysCR();
2581 break;
2582 case k_Prefetch: {
2583 StringRef Name = getPrefetchName();
2584 if (!Name.empty())
2585 OS << "<prfop " << Name << ">";
2586 else
2587 OS << "<prfop invalid #" << getPrefetch() << ">";
2588 break;
2589 }
2590 case k_PSBHint:
2591 OS << getPSBHintName();
2592 break;
2593 case k_BTIHint:
2594 OS << getBTIHintName();
2595 break;
2596 case k_MatrixRegister:
2597 OS << "<matrix " << getMatrixReg() << ">";
2598 break;
2599 case k_MatrixTileList: {
2600 OS << "<matrixlist ";
2601 unsigned RegMask = getMatrixTileListRegMask();
2602 unsigned MaxBits = 8;
2603 for (unsigned I = MaxBits; I > 0; --I)
2604 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2605 OS << '>';
2606 break;
2607 }
2608 case k_SVCR: {
2609 OS << getSVCR();
2610 break;
2611 }
2612 case k_Register:
2613 OS << "<register " << getReg() << ">";
2614 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2615 break;
2616 [[fallthrough]];
2617 case k_ShiftExtend:
2618 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2619 << getShiftExtendAmount();
2620 if (!hasShiftExtendAmount())
2621 OS << "<imp>";
2622 OS << '>';
2623 break;
2624 }
2625}
2626
2627/// @name Auto-generated Match Functions
2628/// {
2629
2631
2632/// }
2633
2635 return StringSwitch<unsigned>(Name.lower())
2636 .Case("v0", AArch64::Q0)
2637 .Case("v1", AArch64::Q1)
2638 .Case("v2", AArch64::Q2)
2639 .Case("v3", AArch64::Q3)
2640 .Case("v4", AArch64::Q4)
2641 .Case("v5", AArch64::Q5)
2642 .Case("v6", AArch64::Q6)
2643 .Case("v7", AArch64::Q7)
2644 .Case("v8", AArch64::Q8)
2645 .Case("v9", AArch64::Q9)
2646 .Case("v10", AArch64::Q10)
2647 .Case("v11", AArch64::Q11)
2648 .Case("v12", AArch64::Q12)
2649 .Case("v13", AArch64::Q13)
2650 .Case("v14", AArch64::Q14)
2651 .Case("v15", AArch64::Q15)
2652 .Case("v16", AArch64::Q16)
2653 .Case("v17", AArch64::Q17)
2654 .Case("v18", AArch64::Q18)
2655 .Case("v19", AArch64::Q19)
2656 .Case("v20", AArch64::Q20)
2657 .Case("v21", AArch64::Q21)
2658 .Case("v22", AArch64::Q22)
2659 .Case("v23", AArch64::Q23)
2660 .Case("v24", AArch64::Q24)
2661 .Case("v25", AArch64::Q25)
2662 .Case("v26", AArch64::Q26)
2663 .Case("v27", AArch64::Q27)
2664 .Case("v28", AArch64::Q28)
2665 .Case("v29", AArch64::Q29)
2666 .Case("v30", AArch64::Q30)
2667 .Case("v31", AArch64::Q31)
2668 .Default(0);
2669}
2670
2671/// Returns an optional pair of (#elements, element-width) if Suffix
2672/// is a valid vector kind. Where the number of elements in a vector
2673/// or the vector width is implicit or explicitly unknown (but still a
2674/// valid suffix kind), 0 is used.
2675static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2676 RegKind VectorKind) {
2677 std::pair<int, int> Res = {-1, -1};
2678
2679 switch (VectorKind) {
2680 case RegKind::NeonVector:
2682 .Case("", {0, 0})
2683 .Case(".1d", {1, 64})
2684 .Case(".1q", {1, 128})
2685 // '.2h' needed for fp16 scalar pairwise reductions
2686 .Case(".2h", {2, 16})
2687 .Case(".2b", {2, 8})
2688 .Case(".2s", {2, 32})
2689 .Case(".2d", {2, 64})
2690 // '.4b' is another special case for the ARMv8.2a dot product
2691 // operand
2692 .Case(".4b", {4, 8})
2693 .Case(".4h", {4, 16})
2694 .Case(".4s", {4, 32})
2695 .Case(".8b", {8, 8})
2696 .Case(".8h", {8, 16})
2697 .Case(".16b", {16, 8})
2698 // Accept the width neutral ones, too, for verbose syntax. If
2699 // those aren't used in the right places, the token operand won't
2700 // match so all will work out.
2701 .Case(".b", {0, 8})
2702 .Case(".h", {0, 16})
2703 .Case(".s", {0, 32})
2704 .Case(".d", {0, 64})
2705 .Default({-1, -1});
2706 break;
2707 case RegKind::SVEPredicateAsCounter:
2708 case RegKind::SVEPredicateVector:
2709 case RegKind::SVEDataVector:
2710 case RegKind::Matrix:
2712 .Case("", {0, 0})
2713 .Case(".b", {0, 8})
2714 .Case(".h", {0, 16})
2715 .Case(".s", {0, 32})
2716 .Case(".d", {0, 64})
2717 .Case(".q", {0, 128})
2718 .Default({-1, -1});
2719 break;
2720 default:
2721 llvm_unreachable("Unsupported RegKind");
2722 }
2723
2724 if (Res == std::make_pair(-1, -1))
2725 return std::nullopt;
2726
2727 return std::optional<std::pair<int, int>>(Res);
2728}
2729
2730static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2731 return parseVectorKind(Suffix, VectorKind).has_value();
2732}
2733
2735 return StringSwitch<unsigned>(Name.lower())
2736 .Case("z0", AArch64::Z0)
2737 .Case("z1", AArch64::Z1)
2738 .Case("z2", AArch64::Z2)
2739 .Case("z3", AArch64::Z3)
2740 .Case("z4", AArch64::Z4)
2741 .Case("z5", AArch64::Z5)
2742 .Case("z6", AArch64::Z6)
2743 .Case("z7", AArch64::Z7)
2744 .Case("z8", AArch64::Z8)
2745 .Case("z9", AArch64::Z9)
2746 .Case("z10", AArch64::Z10)
2747 .Case("z11", AArch64::Z11)
2748 .Case("z12", AArch64::Z12)
2749 .Case("z13", AArch64::Z13)
2750 .Case("z14", AArch64::Z14)
2751 .Case("z15", AArch64::Z15)
2752 .Case("z16", AArch64::Z16)
2753 .Case("z17", AArch64::Z17)
2754 .Case("z18", AArch64::Z18)
2755 .Case("z19", AArch64::Z19)
2756 .Case("z20", AArch64::Z20)
2757 .Case("z21", AArch64::Z21)
2758 .Case("z22", AArch64::Z22)
2759 .Case("z23", AArch64::Z23)
2760 .Case("z24", AArch64::Z24)
2761 .Case("z25", AArch64::Z25)
2762 .Case("z26", AArch64::Z26)
2763 .Case("z27", AArch64::Z27)
2764 .Case("z28", AArch64::Z28)
2765 .Case("z29", AArch64::Z29)
2766 .Case("z30", AArch64::Z30)
2767 .Case("z31", AArch64::Z31)
2768 .Default(0);
2769}
2770
2772 return StringSwitch<unsigned>(Name.lower())
2773 .Case("p0", AArch64::P0)
2774 .Case("p1", AArch64::P1)
2775 .Case("p2", AArch64::P2)
2776 .Case("p3", AArch64::P3)
2777 .Case("p4", AArch64::P4)
2778 .Case("p5", AArch64::P5)
2779 .Case("p6", AArch64::P6)
2780 .Case("p7", AArch64::P7)
2781 .Case("p8", AArch64::P8)
2782 .Case("p9", AArch64::P9)
2783 .Case("p10", AArch64::P10)
2784 .Case("p11", AArch64::P11)
2785 .Case("p12", AArch64::P12)
2786 .Case("p13", AArch64::P13)
2787 .Case("p14", AArch64::P14)
2788 .Case("p15", AArch64::P15)
2789 .Default(0);
2790}
2791
2793 return StringSwitch<unsigned>(Name.lower())
2794 .Case("pn0", AArch64::PN0)
2795 .Case("pn1", AArch64::PN1)
2796 .Case("pn2", AArch64::PN2)
2797 .Case("pn3", AArch64::PN3)
2798 .Case("pn4", AArch64::PN4)
2799 .Case("pn5", AArch64::PN5)
2800 .Case("pn6", AArch64::PN6)
2801 .Case("pn7", AArch64::PN7)
2802 .Case("pn8", AArch64::PN8)
2803 .Case("pn9", AArch64::PN9)
2804 .Case("pn10", AArch64::PN10)
2805 .Case("pn11", AArch64::PN11)
2806 .Case("pn12", AArch64::PN12)
2807 .Case("pn13", AArch64::PN13)
2808 .Case("pn14", AArch64::PN14)
2809 .Case("pn15", AArch64::PN15)
2810 .Default(0);
2811}
2812
2814 return StringSwitch<unsigned>(Name.lower())
2815 .Case("za0.d", AArch64::ZAD0)
2816 .Case("za1.d", AArch64::ZAD1)
2817 .Case("za2.d", AArch64::ZAD2)
2818 .Case("za3.d", AArch64::ZAD3)
2819 .Case("za4.d", AArch64::ZAD4)
2820 .Case("za5.d", AArch64::ZAD5)
2821 .Case("za6.d", AArch64::ZAD6)
2822 .Case("za7.d", AArch64::ZAD7)
2823 .Case("za0.s", AArch64::ZAS0)
2824 .Case("za1.s", AArch64::ZAS1)
2825 .Case("za2.s", AArch64::ZAS2)
2826 .Case("za3.s", AArch64::ZAS3)
2827 .Case("za0.h", AArch64::ZAH0)
2828 .Case("za1.h", AArch64::ZAH1)
2829 .Case("za0.b", AArch64::ZAB0)
2830 .Default(0);
2831}
2832
2834 return StringSwitch<unsigned>(Name.lower())
2835 .Case("za", AArch64::ZA)
2836 .Case("za0.q", AArch64::ZAQ0)
2837 .Case("za1.q", AArch64::ZAQ1)
2838 .Case("za2.q", AArch64::ZAQ2)
2839 .Case("za3.q", AArch64::ZAQ3)
2840 .Case("za4.q", AArch64::ZAQ4)
2841 .Case("za5.q", AArch64::ZAQ5)
2842 .Case("za6.q", AArch64::ZAQ6)
2843 .Case("za7.q", AArch64::ZAQ7)
2844 .Case("za8.q", AArch64::ZAQ8)
2845 .Case("za9.q", AArch64::ZAQ9)
2846 .Case("za10.q", AArch64::ZAQ10)
2847 .Case("za11.q", AArch64::ZAQ11)
2848 .Case("za12.q", AArch64::ZAQ12)
2849 .Case("za13.q", AArch64::ZAQ13)
2850 .Case("za14.q", AArch64::ZAQ14)
2851 .Case("za15.q", AArch64::ZAQ15)
2852 .Case("za0.d", AArch64::ZAD0)
2853 .Case("za1.d", AArch64::ZAD1)
2854 .Case("za2.d", AArch64::ZAD2)
2855 .Case("za3.d", AArch64::ZAD3)
2856 .Case("za4.d", AArch64::ZAD4)
2857 .Case("za5.d", AArch64::ZAD5)
2858 .Case("za6.d", AArch64::ZAD6)
2859 .Case("za7.d", AArch64::ZAD7)
2860 .Case("za0.s", AArch64::ZAS0)
2861 .Case("za1.s", AArch64::ZAS1)
2862 .Case("za2.s", AArch64::ZAS2)
2863 .Case("za3.s", AArch64::ZAS3)
2864 .Case("za0.h", AArch64::ZAH0)
2865 .Case("za1.h", AArch64::ZAH1)
2866 .Case("za0.b", AArch64::ZAB0)
2867 .Case("za0h.q", AArch64::ZAQ0)
2868 .Case("za1h.q", AArch64::ZAQ1)
2869 .Case("za2h.q", AArch64::ZAQ2)
2870 .Case("za3h.q", AArch64::ZAQ3)
2871 .Case("za4h.q", AArch64::ZAQ4)
2872 .Case("za5h.q", AArch64::ZAQ5)
2873 .Case("za6h.q", AArch64::ZAQ6)
2874 .Case("za7h.q", AArch64::ZAQ7)
2875 .Case("za8h.q", AArch64::ZAQ8)
2876 .Case("za9h.q", AArch64::ZAQ9)
2877 .Case("za10h.q", AArch64::ZAQ10)
2878 .Case("za11h.q", AArch64::ZAQ11)
2879 .Case("za12h.q", AArch64::ZAQ12)
2880 .Case("za13h.q", AArch64::ZAQ13)
2881 .Case("za14h.q", AArch64::ZAQ14)
2882 .Case("za15h.q", AArch64::ZAQ15)
2883 .Case("za0h.d", AArch64::ZAD0)
2884 .Case("za1h.d", AArch64::ZAD1)
2885 .Case("za2h.d", AArch64::ZAD2)
2886 .Case("za3h.d", AArch64::ZAD3)
2887 .Case("za4h.d", AArch64::ZAD4)
2888 .Case("za5h.d", AArch64::ZAD5)
2889 .Case("za6h.d", AArch64::ZAD6)
2890 .Case("za7h.d", AArch64::ZAD7)
2891 .Case("za0h.s", AArch64::ZAS0)
2892 .Case("za1h.s", AArch64::ZAS1)
2893 .Case("za2h.s", AArch64::ZAS2)
2894 .Case("za3h.s", AArch64::ZAS3)
2895 .Case("za0h.h", AArch64::ZAH0)
2896 .Case("za1h.h", AArch64::ZAH1)
2897 .Case("za0h.b", AArch64::ZAB0)
2898 .Case("za0v.q", AArch64::ZAQ0)
2899 .Case("za1v.q", AArch64::ZAQ1)
2900 .Case("za2v.q", AArch64::ZAQ2)
2901 .Case("za3v.q", AArch64::ZAQ3)
2902 .Case("za4v.q", AArch64::ZAQ4)
2903 .Case("za5v.q", AArch64::ZAQ5)
2904 .Case("za6v.q", AArch64::ZAQ6)
2905 .Case("za7v.q", AArch64::ZAQ7)
2906 .Case("za8v.q", AArch64::ZAQ8)
2907 .Case("za9v.q", AArch64::ZAQ9)
2908 .Case("za10v.q", AArch64::ZAQ10)
2909 .Case("za11v.q", AArch64::ZAQ11)
2910 .Case("za12v.q", AArch64::ZAQ12)
2911 .Case("za13v.q", AArch64::ZAQ13)
2912 .Case("za14v.q", AArch64::ZAQ14)
2913 .Case("za15v.q", AArch64::ZAQ15)
2914 .Case("za0v.d", AArch64::ZAD0)
2915 .Case("za1v.d", AArch64::ZAD1)
2916 .Case("za2v.d", AArch64::ZAD2)
2917 .Case("za3v.d", AArch64::ZAD3)
2918 .Case("za4v.d", AArch64::ZAD4)
2919 .Case("za5v.d", AArch64::ZAD5)
2920 .Case("za6v.d", AArch64::ZAD6)
2921 .Case("za7v.d", AArch64::ZAD7)
2922 .Case("za0v.s", AArch64::ZAS0)
2923 .Case("za1v.s", AArch64::ZAS1)
2924 .Case("za2v.s", AArch64::ZAS2)
2925 .Case("za3v.s", AArch64::ZAS3)
2926 .Case("za0v.h", AArch64::ZAH0)
2927 .Case("za1v.h", AArch64::ZAH1)
2928 .Case("za0v.b", AArch64::ZAB0)
2929 .Default(0);
2930}
2931
2932bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
2933 SMLoc &EndLoc) {
2934 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
2935}
2936
2937ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
2938 SMLoc &EndLoc) {
2939 StartLoc = getLoc();
2940 ParseStatus Res = tryParseScalarRegister(Reg);
2941 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2942 return Res;
2943}
2944
2945// Matches a register name or register alias previously defined by '.req'
2946unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2947 RegKind Kind) {
2948 unsigned RegNum = 0;
2949 if ((RegNum = matchSVEDataVectorRegName(Name)))
2950 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2951
2952 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2953 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2954
2956 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
2957
2958 if ((RegNum = MatchNeonVectorRegName(Name)))
2959 return Kind == RegKind::NeonVector ? RegNum : 0;
2960
2961 if ((RegNum = matchMatrixRegName(Name)))
2962 return Kind == RegKind::Matrix ? RegNum : 0;
2963
2964 if (Name.equals_insensitive("zt0"))
2965 return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0;
2966
2967 // The parsed register must be of RegKind Scalar
2968 if ((RegNum = MatchRegisterName(Name)))
2969 return (Kind == RegKind::Scalar) ? RegNum : 0;
2970
2971 if (!RegNum) {
2972 // Handle a few common aliases of registers.
2973 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2974 .Case("fp", AArch64::FP)
2975 .Case("lr", AArch64::LR)
2976 .Case("x31", AArch64::XZR)
2977 .Case("w31", AArch64::WZR)
2978 .Default(0))
2979 return Kind == RegKind::Scalar ? RegNum : 0;
2980
2981 // Check for aliases registered via .req. Canonicalize to lower case.
2982 // That's more consistent since register names are case insensitive, and
2983 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2984 auto Entry = RegisterReqs.find(Name.lower());
2985 if (Entry == RegisterReqs.end())
2986 return 0;
2987
2988 // set RegNum if the match is the right kind of register
2989 if (Kind == Entry->getValue().first)
2990 RegNum = Entry->getValue().second;
2991 }
2992 return RegNum;
2993}
2994
2995unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
2996 switch (K) {
2997 case RegKind::Scalar:
2998 case RegKind::NeonVector:
2999 case RegKind::SVEDataVector:
3000 return 32;
3001 case RegKind::Matrix:
3002 case RegKind::SVEPredicateVector:
3003 case RegKind::SVEPredicateAsCounter:
3004 return 16;
3005 case RegKind::LookupTable:
3006 return 1;
3007 }
3008 llvm_unreachable("Unsupported RegKind");
3009}
3010
3011/// tryParseScalarRegister - Try to parse a register name. The token must be an
3012/// Identifier when called, and if it is a register name the token is eaten and
3013/// the register is added to the operand list.
3014ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3015 const AsmToken &Tok = getTok();
3016 if (Tok.isNot(AsmToken::Identifier))
3017 return ParseStatus::NoMatch;
3018
3019 std::string lowerCase = Tok.getString().lower();
3020 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3021 if (Reg == 0)
3022 return ParseStatus::NoMatch;
3023
3024 RegNum = Reg;
3025 Lex(); // Eat identifier token.
3026 return ParseStatus::Success;
3027}
3028
3029/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3030ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3031 SMLoc S = getLoc();
3032
3033 if (getTok().isNot(AsmToken::Identifier))
3034 return Error(S, "Expected cN operand where 0 <= N <= 15");
3035
3036 StringRef Tok = getTok().getIdentifier();
3037 if (Tok[0] != 'c' && Tok[0] != 'C')
3038 return Error(S, "Expected cN operand where 0 <= N <= 15");
3039
3040 uint32_t CRNum;
3041 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3042 if (BadNum || CRNum > 15)
3043 return Error(S, "Expected cN operand where 0 <= N <= 15");
3044
3045 Lex(); // Eat identifier token.
3046 Operands.push_back(
3047 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3048 return ParseStatus::Success;
3049}
3050
3051// Either an identifier for named values or a 6-bit immediate.
3052ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3053 SMLoc S = getLoc();
3054 const AsmToken &Tok = getTok();
3055
3056 unsigned MaxVal = 63;
3057
3058 // Immediate case, with optional leading hash:
3059 if (parseOptionalToken(AsmToken::Hash) ||
3060 Tok.is(AsmToken::Integer)) {
3061 const MCExpr *ImmVal;
3062 if (getParser().parseExpression(ImmVal))
3063 return ParseStatus::Failure;
3064
3065 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3066 if (!MCE)
3067 return TokError("immediate value expected for prefetch operand");
3068 unsigned prfop = MCE->getValue();
3069 if (prfop > MaxVal)
3070 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3071 "] expected");
3072
3073 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3074 Operands.push_back(AArch64Operand::CreatePrefetch(
3075 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3076 return ParseStatus::Success;
3077 }
3078
3079 if (Tok.isNot(AsmToken::Identifier))
3080 return TokError("prefetch hint expected");
3081
3082 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3083 if (!RPRFM)
3084 return TokError("prefetch hint expected");
3085
3086 Operands.push_back(AArch64Operand::CreatePrefetch(
3087 RPRFM->Encoding, Tok.getString(), S, getContext()));
3088 Lex(); // Eat identifier token.
3089 return ParseStatus::Success;
3090}
3091
3092/// tryParsePrefetch - Try to parse a prefetch operand.
3093template <bool IsSVEPrefetch>
3094ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3095 SMLoc S = getLoc();
3096 const AsmToken &Tok = getTok();
3097
3098 auto LookupByName = [](StringRef N) {
3099 if (IsSVEPrefetch) {
3100 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3101 return std::optional<unsigned>(Res->Encoding);
3102 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3103 return std::optional<unsigned>(Res->Encoding);
3104 return std::optional<unsigned>();
3105 };
3106
3107 auto LookupByEncoding = [](unsigned E) {
3108 if (IsSVEPrefetch) {
3109 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3110 return std::optional<StringRef>(Res->Name);
3111 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3112 return std::optional<StringRef>(Res->Name);
3113 return std::optional<StringRef>();
3114 };
3115 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3116
3117 // Either an identifier for named values or a 5-bit immediate.
3118 // Eat optional hash.
3119 if (parseOptionalToken(AsmToken::Hash) ||
3120 Tok.is(AsmToken::Integer)) {
3121 const MCExpr *ImmVal;
3122 if (getParser().parseExpression(ImmVal))
3123 return ParseStatus::Failure;
3124
3125 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3126 if (!MCE)
3127 return TokError("immediate value expected for prefetch operand");
3128 unsigned prfop = MCE->getValue();
3129 if (prfop > MaxVal)
3130 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3131 "] expected");
3132
3133 auto PRFM = LookupByEncoding(MCE->getValue());
3134 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3135 S, getContext()));
3136 return ParseStatus::Success;
3137 }
3138
3139 if (Tok.isNot(AsmToken::Identifier))
3140 return TokError("prefetch hint expected");
3141
3142 auto PRFM = LookupByName(Tok.getString());
3143 if (!PRFM)
3144 return TokError("prefetch hint expected");
3145
3146 Operands.push_back(AArch64Operand::CreatePrefetch(
3147 *PRFM, Tok.getString(), S, getContext()));
3148 Lex(); // Eat identifier token.
3149 return ParseStatus::Success;
3150}
3151
3152/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3153ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3154 SMLoc S = getLoc();
3155 const AsmToken &Tok = getTok();
3156 if (Tok.isNot(AsmToken::Identifier))
3157 return TokError("invalid operand for instruction");
3158
3159 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3160 if (!PSB)
3161 return TokError("invalid operand for instruction");
3162
3163 Operands.push_back(AArch64Operand::CreatePSBHint(
3164 PSB->Encoding, Tok.getString(), S, getContext()));
3165 Lex(); // Eat identifier token.
3166 return ParseStatus::Success;
3167}
3168
3169ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3170 SMLoc StartLoc = getLoc();
3171
3172 MCRegister RegNum;
3173
3174 // The case where xzr, xzr is not present is handled by an InstAlias.
3175
3176 auto RegTok = getTok(); // in case we need to backtrack
3177 if (!tryParseScalarRegister(RegNum).isSuccess())
3178 return ParseStatus::NoMatch;
3179
3180 if (RegNum != AArch64::XZR) {
3181 getLexer().UnLex(RegTok);
3182 return ParseStatus::NoMatch;
3183 }
3184
3185 if (parseComma())
3186 return ParseStatus::Failure;
3187
3188 if (!tryParseScalarRegister(RegNum).isSuccess())
3189 return TokError("expected register operand");
3190
3191 if (RegNum != AArch64::XZR)
3192 return TokError("xzr must be followed by xzr");
3193
3194 // We need to push something, since we claim this is an operand in .td.
3195 // See also AArch64AsmParser::parseKeywordOperand.
3196 Operands.push_back(AArch64Operand::CreateReg(
3197 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3198
3199 return ParseStatus::Success;
3200}
3201
3202/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3203ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3204 SMLoc S = getLoc();
3205 const AsmToken &Tok = getTok();
3206 if (Tok.isNot(AsmToken::Identifier))
3207 return TokError("invalid operand for instruction");
3208
3209 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3210 if (!BTI)
3211 return TokError("invalid operand for instruction");
3212
3213 Operands.push_back(AArch64Operand::CreateBTIHint(
3214 BTI->Encoding, Tok.getString(), S, getContext()));
3215 Lex(); // Eat identifier token.
3216 return ParseStatus::Success;
3217}
3218
3219/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3220/// instruction.
3221ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3222 SMLoc S = getLoc();
3223 const MCExpr *Expr = nullptr;
3224
3225 if (getTok().is(AsmToken::Hash)) {
3226 Lex(); // Eat hash token.
3227 }
3228
3229 if (parseSymbolicImmVal(Expr))
3230 return ParseStatus::Failure;
3231
3232 AArch64MCExpr::VariantKind ELFRefKind;
3233 MCSymbolRefExpr::VariantKind DarwinRefKind;
3234 int64_t Addend;
3235 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3236 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3237 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3238 // No modifier was specified at all; this is the syntax for an ELF basic
3239 // ADRP relocation (unfortunately).
3240 Expr =
3242 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3243 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3244 Addend != 0) {
3245 return Error(S, "gotpage label reference not allowed an addend");
3246 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3247 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3248 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3249 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3250 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3251 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3252 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3253 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
3254 // The operand must be an @page or @gotpage qualified symbolref.
3255 return Error(S, "page or gotpage label reference expected");
3256 }
3257 }
3258
3259 // We have either a label reference possibly with addend or an immediate. The
3260 // addend is a raw value here. The linker will adjust it to only reference the
3261 // page.
3262 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3263 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3264
3265 return ParseStatus::Success;
3266}
3267
3268/// tryParseAdrLabel - Parse and validate a source label for the ADR
3269/// instruction.
3270ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3271 SMLoc S = getLoc();
3272 const MCExpr *Expr = nullptr;
3273
3274 // Leave anything with a bracket to the default for SVE
3275 if (getTok().is(AsmToken::LBrac))
3276 return ParseStatus::NoMatch;
3277
3278 if (getTok().is(AsmToken::Hash))
3279 Lex(); // Eat hash token.
3280
3281 if (parseSymbolicImmVal(Expr))
3282 return ParseStatus::Failure;
3283
3284 AArch64MCExpr::VariantKind ELFRefKind;
3285 MCSymbolRefExpr::VariantKind DarwinRefKind;
3286 int64_t Addend;
3287 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3288 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3289 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3290 // No modifier was specified at all; this is the syntax for an ELF basic
3291 // ADR relocation (unfortunately).
3292 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3293 } else {
3294 return Error(S, "unexpected adr label");
3295 }
3296 }
3297
3298 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3299 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3300 return ParseStatus::Success;
3301}
3302
3303/// tryParseFPImm - A floating point immediate expression operand.
3304template <bool AddFPZeroAsLiteral>
3305ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3306 SMLoc S = getLoc();
3307
3308 bool Hash = parseOptionalToken(AsmToken::Hash);
3309
3310 // Handle negation, as that still comes through as a separate token.
3311 bool isNegative = parseOptionalToken(AsmToken::Minus);
3312
3313 const AsmToken &Tok = getTok();
3314 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3315 if (!Hash)
3316 return ParseStatus::NoMatch;
3317 return TokError("invalid floating point immediate");
3318 }
3319
3320 // Parse hexadecimal representation.
3321 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3322 if (Tok.getIntVal() > 255 || isNegative)
3323 return TokError("encoded floating point value out of range");
3324
3326 Operands.push_back(
3327 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3328 } else {
3329 // Parse FP representation.
3330 APFloat RealVal(APFloat::IEEEdouble());
3331 auto StatusOrErr =
3332 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3333 if (errorToBool(StatusOrErr.takeError()))
3334 return TokError("invalid floating point representation");
3335
3336 if (isNegative)
3337 RealVal.changeSign();
3338
3339 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3340 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3341 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3342 } else
3343 Operands.push_back(AArch64Operand::CreateFPImm(
3344 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3345 }
3346
3347 Lex(); // Eat the token.
3348
3349 return ParseStatus::Success;
3350}
3351
3352/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3353/// a shift suffix, for example '#1, lsl #12'.
3355AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3356 SMLoc S = getLoc();
3357
3358 if (getTok().is(AsmToken::Hash))
3359 Lex(); // Eat '#'
3360 else if (getTok().isNot(AsmToken::Integer))
3361 // Operand should start from # or should be integer, emit error otherwise.
3362 return ParseStatus::NoMatch;
3363
3364 if (getTok().is(AsmToken::Integer) &&
3365 getLexer().peekTok().is(AsmToken::Colon))
3366 return tryParseImmRange(Operands);
3367
3368 const MCExpr *Imm = nullptr;
3369 if (parseSymbolicImmVal(Imm))
3370 return ParseStatus::Failure;
3371 else if (getTok().isNot(AsmToken::Comma)) {
3372 Operands.push_back(
3373 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3374 return ParseStatus::Success;
3375 }
3376
3377 // Eat ','
3378 Lex();
3379 StringRef VecGroup;
3380 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3381 Operands.push_back(
3382 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3383 Operands.push_back(
3384 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3385 return ParseStatus::Success;
3386 }
3387
3388 // The optional operand must be "lsl #N" where N is non-negative.
3389 if (!getTok().is(AsmToken::Identifier) ||
3390 !getTok().getIdentifier().equals_insensitive("lsl"))
3391 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3392
3393 // Eat 'lsl'
3394 Lex();
3395
3396 parseOptionalToken(AsmToken::Hash);
3397
3398 if (getTok().isNot(AsmToken::Integer))
3399 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3400
3401 int64_t ShiftAmount = getTok().getIntVal();
3402
3403 if (ShiftAmount < 0)
3404 return Error(getLoc(), "positive shift amount required");
3405 Lex(); // Eat the number
3406
3407 // Just in case the optional lsl #0 is used for immediates other than zero.
3408 if (ShiftAmount == 0 && Imm != nullptr) {
3409 Operands.push_back(
3410 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3411 return ParseStatus::Success;
3412 }
3413
3414 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3415 getLoc(), getContext()));
3416 return ParseStatus::Success;
3417}
3418
3419/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3420/// suggestion to help common typos.
3422AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3424 .Case("eq", AArch64CC::EQ)
3425 .Case("ne", AArch64CC::NE)
3426 .Case("cs", AArch64CC::HS)
3427 .Case("hs", AArch64CC::HS)
3428 .Case("cc", AArch64CC::LO)
3429 .Case("lo", AArch64CC::LO)
3430 .Case("mi", AArch64CC::MI)
3431 .Case("pl", AArch64CC::PL)
3432 .Case("vs", AArch64CC::VS)
3433 .Case("vc", AArch64CC::VC)
3434 .Case("hi", AArch64CC::HI)
3435 .Case("ls", AArch64CC::LS)
3436 .Case("ge", AArch64CC::GE)
3437 .Case("lt", AArch64CC::LT)
3438 .Case("gt", AArch64CC::GT)
3439 .Case("le", AArch64CC::LE)
3440 .Case("al", AArch64CC::AL)
3441 .Case("nv", AArch64CC::NV)
3443
3444 if (CC == AArch64CC::Invalid && getSTI().hasFeature(AArch64::FeatureSVE)) {
3446 .Case("none", AArch64CC::EQ)
3447 .Case("any", AArch64CC::NE)
3448 .Case("nlast", AArch64CC::HS)
3449 .Case("last", AArch64CC::LO)
3450 .Case("first", AArch64CC::MI)
3451 .Case("nfrst", AArch64CC::PL)
3452 .Case("pmore", AArch64CC::HI)
3453 .Case("plast", AArch64CC::LS)
3454 .Case("tcont", AArch64CC::GE)
3455 .Case("tstop", AArch64CC::LT)
3457
3458 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3459 Suggestion = "nfrst";
3460 }
3461 return CC;
3462}
3463
3464/// parseCondCode - Parse a Condition Code operand.
3465bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3466 bool invertCondCode) {
3467 SMLoc S = getLoc();
3468 const AsmToken &Tok = getTok();
3469 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3470
3471 StringRef Cond = Tok.getString();
3472 std::string Suggestion;
3473 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3474 if (CC == AArch64CC::Invalid) {
3475 std::string Msg = "invalid condition code";
3476 if (!Suggestion.empty())
3477 Msg += ", did you mean " + Suggestion + "?";
3478 return TokError(Msg);
3479 }
3480 Lex(); // Eat identifier token.
3481
3482 if (invertCondCode) {
3483 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3484 return TokError("condition codes AL and NV are invalid for this instruction");
3486 }
3487
3488 Operands.push_back(
3489 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3490 return false;
3491}
3492
3493ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3494 const AsmToken &Tok = getTok();
3495 SMLoc S = getLoc();
3496
3497 if (Tok.isNot(AsmToken::Identifier))
3498 return TokError("invalid operand for instruction");
3499
3500 unsigned PStateImm = -1;
3501 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3502 if (!SVCR)
3503 return ParseStatus::NoMatch;
3504 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3505 PStateImm = SVCR->Encoding;
3506
3507 Operands.push_back(
3508 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3509 Lex(); // Eat identifier token.
3510 return ParseStatus::Success;
3511}
3512
3513ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3514 const AsmToken &Tok = getTok();
3515 SMLoc S = getLoc();
3516
3517 StringRef Name = Tok.getString();
3518
3519 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3520 Lex(); // eat "za[.(b|h|s|d)]"
3521 unsigned ElementWidth = 0;
3522 auto DotPosition = Name.find('.');
3523 if (DotPosition != StringRef::npos) {
3524 const auto &KindRes =
3525 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3526 if (!KindRes)
3527 return TokError(
3528 "Expected the register to be followed by element width suffix");
3529 ElementWidth = KindRes->second;
3530 }
3531 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3532 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3533 getContext()));
3534 if (getLexer().is(AsmToken::LBrac)) {
3535 // There's no comma after matrix operand, so we can parse the next operand
3536 // immediately.
3537 if (parseOperand(Operands, false, false))
3538 return ParseStatus::NoMatch;
3539 }
3540 return ParseStatus::Success;
3541 }
3542
3543 // Try to parse matrix register.
3544 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3545 if (!Reg)
3546 return ParseStatus::NoMatch;
3547
3548 size_t DotPosition = Name.find('.');
3549 assert(DotPosition != StringRef::npos && "Unexpected register");
3550
3551 StringRef Head = Name.take_front(DotPosition);
3552 StringRef Tail = Name.drop_front(DotPosition);
3553 StringRef RowOrColumn = Head.take_back();
3554
3555 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3556 .Case("h", MatrixKind::Row)
3557 .Case("v", MatrixKind::Col)
3558 .Default(MatrixKind::Tile);
3559
3560 // Next up, parsing the suffix
3561 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3562 if (!KindRes)
3563 return TokError(
3564 "Expected the register to be followed by element width suffix");
3565 unsigned ElementWidth = KindRes->second;
3566
3567 Lex();
3568
3569 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3570 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3571
3572 if (getLexer().is(AsmToken::LBrac)) {
3573 // There's no comma after matrix operand, so we can parse the next operand
3574 // immediately.
3575 if (parseOperand(Operands, false, false))
3576 return ParseStatus::NoMatch;
3577 }
3578 return ParseStatus::Success;
3579}
3580
3581/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3582/// them if present.
3584AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3585 const AsmToken &Tok = getTok();
3586 std::string LowerID = Tok.getString().lower();
3589 .Case("lsl", AArch64_AM::LSL)
3590 .Case("lsr", AArch64_AM::LSR)
3591 .Case("asr", AArch64_AM::ASR)
3592 .Case("ror", AArch64_AM::ROR)
3593 .Case("msl", AArch64_AM::MSL)
3594 .Case("uxtb", AArch64_AM::UXTB)
3595 .Case("uxth", AArch64_AM::UXTH)
3596 .Case("uxtw", AArch64_AM::UXTW)
3597 .Case("uxtx", AArch64_AM::UXTX)
3598 .Case("sxtb", AArch64_AM::SXTB)
3599 .Case("sxth", AArch64_AM::SXTH)
3600 .Case("sxtw", AArch64_AM::SXTW)
3601 .Case("sxtx", AArch64_AM::SXTX)
3603
3605 return ParseStatus::NoMatch;
3606
3607 SMLoc S = Tok.getLoc();
3608 Lex();
3609
3610 bool Hash = parseOptionalToken(AsmToken::Hash);
3611
3612 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3613 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3614 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3615 ShOp == AArch64_AM::MSL) {
3616 // We expect a number here.
3617 return TokError("expected #imm after shift specifier");
3618 }
3619
3620 // "extend" type operations don't need an immediate, #0 is implicit.
3621 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3622 Operands.push_back(
3623 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3624 return ParseStatus::Success;
3625 }
3626
3627 // Make sure we do actually have a number, identifier or a parenthesized
3628 // expression.
3629 SMLoc E = getLoc();
3630 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3631 !getTok().is(AsmToken::Identifier))
3632 return Error(E, "expected integer shift amount");
3633
3634 const MCExpr *ImmVal;
3635 if (getParser().parseExpression(ImmVal))
3636 return ParseStatus::Failure;
3637
3638 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3639 if (!MCE)
3640 return Error(E, "expected constant '#imm' after shift specifier");
3641
3642 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3643 Operands.push_back(AArch64Operand::CreateShiftExtend(
3644 ShOp, MCE->getValue(), true, S, E, getContext()));
3645 return ParseStatus::Success;
3646}
3647
3648static const struct Extension {
3649 const char *Name;
3651} ExtensionMap[] = {
3652 {"crc", {AArch64::FeatureCRC}},
3653 {"sm4", {AArch64::FeatureSM4}},
3654 {"sha3", {AArch64::FeatureSHA3}},
3655 {"sha2", {AArch64::FeatureSHA2}},
3656 {"aes", {AArch64::FeatureAES}},
3657 {"crypto", {AArch64::FeatureCrypto}},
3658 {"fp", {AArch64::FeatureFPARMv8}},
3659 {"simd", {AArch64::FeatureNEON}},
3660 {"ras", {AArch64::FeatureRAS}},
3661 {"rasv2", {AArch64::FeatureRASv2}},
3662 {"lse", {AArch64::FeatureLSE}},
3663 {"predres", {AArch64::FeaturePredRes}},
3664 {"predres2", {AArch64::FeatureSPECRES2}},
3665 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3666 {"mte", {AArch64::FeatureMTE}},
3667 {"memtag", {AArch64::FeatureMTE}},
3668 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3669 {"pan", {AArch64::FeaturePAN}},
3670 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3671 {"ccpp", {AArch64::FeatureCCPP}},
3672 {"rcpc", {AArch64::FeatureRCPC}},
3673 {"rng", {AArch64::FeatureRandGen}},
3674 {"sve", {AArch64::FeatureSVE}},
3675 {"sve2", {AArch64::FeatureSVE2}},
3676 {"sve2-aes", {AArch64::FeatureSVE2AES}},
3677 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3678 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3679 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3680 {"sve2p1", {AArch64::FeatureSVE2p1}},
3681 {"b16b16", {AArch64::FeatureB16B16}},
3682 {"ls64", {AArch64::FeatureLS64}},
3683 {"xs", {AArch64::FeatureXS}},
3684 {"pauth", {AArch64::FeaturePAuth}},
3685 {"flagm", {AArch64::FeatureFlagM}},
3686 {"rme", {AArch64::FeatureRME}},
3687 {"sme", {AArch64::FeatureSME}},
3688 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3689 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3690 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3691 {"sme2", {AArch64::FeatureSME2}},
3692 {"sme2p1", {AArch64::FeatureSME2p1}},
3693 {"hbc", {AArch64::FeatureHBC}},
3694 {"mops", {AArch64::FeatureMOPS}},
3695 {"mec", {AArch64::FeatureMEC}},
3696 {"the", {AArch64::FeatureTHE}},
3697 {"d128", {AArch64::FeatureD128}},
3698 {"lse128", {AArch64::FeatureLSE128}},
3699 {"ite", {AArch64::FeatureITE}},
3700 {"cssc", {AArch64::FeatureCSSC}},
3701 {"rcpc3", {AArch64::FeatureRCPC3}},
3702 {"gcs", {AArch64::FeatureGCS}},
3703 {"bf16", {AArch64::FeatureBF16}},
3704 {"compnum", {AArch64::FeatureComplxNum}},
3705 {"dotprod", {AArch64::FeatureDotProd}},
3706 {"f32mm", {AArch64::FeatureMatMulFP32}},
3707 {"f64mm", {AArch64::FeatureMatMulFP64}},
3708 {"fp16", {AArch64::FeatureFullFP16}},
3709 {"fp16fml", {AArch64::FeatureFP16FML}},
3710 {"i8mm", {AArch64::FeatureMatMulInt8}},
3711 {"lor", {AArch64::FeatureLOR}},
3712 {"profile", {AArch64::FeatureSPE}},
3713 // "rdma" is the name documented by binutils for the feature, but
3714 // binutils also accepts incomplete prefixes of features, so "rdm"
3715 // works too. Support both spellings here.
3716 {"rdm", {AArch64::FeatureRDM}},
3717 {"rdma", {AArch64::FeatureRDM}},
3718 {"sb", {AArch64::FeatureSB}},
3719 {"ssbs", {AArch64::FeatureSSBS}},
3720 {"tme", {AArch64::FeatureTME}},
3721 {"fpmr", {AArch64::FeatureFPMR}},
3722 {"fp8", {AArch64::FeatureFP8}},
3723 {"faminmax", {AArch64::FeatureFAMINMAX}},
3724 {"fp8fma", {AArch64::FeatureFP8FMA}},
3725 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3726 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3727 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3728 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3729 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3730 {"lut", {AArch64::FeatureLUT}},
3731 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3732 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3733 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3734 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3735 {"cpa", {AArch64::FeatureCPA}},
3736 {"tlbiw", {AArch64::FeatureTLBIW}},
3738
3739static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3740 if (FBS[AArch64::HasV8_0aOps])
3741 Str += "ARMv8a";
3742 if (FBS[AArch64::HasV8_1aOps])
3743 Str += "ARMv8.1a";
3744 else if (FBS[AArch64::HasV8_2aOps])
3745 Str += "ARMv8.2a";
3746 else if (FBS[AArch64::HasV8_3aOps])
3747 Str += "ARMv8.3a";
3748 else if (FBS[AArch64::HasV8_4aOps])
3749 Str += "ARMv8.4a";
3750 else if (FBS[AArch64::HasV8_5aOps])
3751 Str += "ARMv8.5a";
3752 else if (FBS[AArch64::HasV8_6aOps])
3753 Str += "ARMv8.6a";
3754 else if (FBS[AArch64::HasV8_7aOps])
3755 Str += "ARMv8.7a";
3756 else if (FBS[AArch64::HasV8_8aOps])
3757 Str += "ARMv8.8a";
3758 else if (FBS[AArch64::HasV8_9aOps])
3759 Str += "ARMv8.9a";
3760 else if (FBS[AArch64::HasV9_0aOps])
3761 Str += "ARMv9-a";
3762 else if (FBS[AArch64::HasV9_1aOps])
3763 Str += "ARMv9.1a";
3764 else if (FBS[AArch64::HasV9_2aOps])
3765 Str += "ARMv9.2a";
3766 else if (FBS[AArch64::HasV9_3aOps])
3767 Str += "ARMv9.3a";
3768 else if (FBS[AArch64::HasV9_4aOps])
3769 Str += "ARMv9.4a";
3770 else if (FBS[AArch64::HasV9_5aOps])
3771 Str += "ARMv9.5a";
3772 else if (FBS[AArch64::HasV8_0rOps])
3773 Str += "ARMv8r";
3774 else {
3775 SmallVector<std::string, 2> ExtMatches;
3776 for (const auto& Ext : ExtensionMap) {
3777 // Use & in case multiple features are enabled
3778 if ((FBS & Ext.Features) != FeatureBitset())
3779 ExtMatches.push_back(Ext.Name);
3780 }
3781 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3782 }
3783}
3784
3785void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3786 SMLoc S) {
3787 const uint16_t Op2 = Encoding & 7;
3788 const uint16_t Cm = (Encoding & 0x78) >> 3;
3789 const uint16_t Cn = (Encoding & 0x780) >> 7;
3790 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3791
3792 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3793
3794 Operands.push_back(
3795 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3796 Operands.push_back(
3797 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3798 Operands.push_back(
3799 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3800 Expr = MCConstantExpr::create(Op2, getContext());
3801 Operands.push_back(
3802 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3803}
3804
3805/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3806/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3807bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3809 if (Name.contains('.'))
3810 return TokError("invalid operand");
3811
3812 Mnemonic = Name;
3813 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3814
3815 const AsmToken &Tok = getTok();
3816 StringRef Op = Tok.getString();
3817 SMLoc S = Tok.getLoc();
3818
3819 if (Mnemonic == "ic") {
3820 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3821 if (!IC)
3822 return TokError("invalid operand for IC instruction");
3823 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3824 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3826 return TokError(Str);
3827 }
3828 createSysAlias(IC->Encoding, Operands, S);
3829 } else if (Mnemonic == "dc") {
3830 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3831 if (!DC)
3832 return TokError("invalid operand for DC instruction");
3833 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3834 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3836 return TokError(Str);
3837 }
3838 createSysAlias(DC->Encoding, Operands, S);
3839 } else if (Mnemonic == "at") {
3840 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3841 if (!AT)
3842 return TokError("invalid operand for AT instruction");
3843 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3844 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3846 return TokError(Str);
3847 }
3848 createSysAlias(AT->Encoding, Operands, S);
3849 } else if (Mnemonic == "tlbi") {
3850 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3851 if (!TLBI)
3852 return TokError("invalid operand for TLBI instruction");
3853 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3854 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3856 return TokError(Str);
3857 }
3858 createSysAlias(TLBI->Encoding, Operands, S);
3859 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3860
3861 if (Op.lower() != "rctx")
3862 return TokError("invalid operand for prediction restriction instruction");
3863
3864 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3865 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3866 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3867
3868 if (Mnemonic == "cosp" && !hasSpecres2)
3869 return TokError("COSP requires: predres2");
3870 if (!hasPredres)
3871 return TokError(Mnemonic.upper() + "RCTX requires: predres");
3872
3873 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
3874 : Mnemonic == "dvp" ? 0b101
3875 : Mnemonic == "cosp" ? 0b110
3876 : Mnemonic == "cpp" ? 0b111
3877 : 0;
3878 assert(PRCTX_Op2 &&
3879 "Invalid mnemonic for prediction restriction instruction");
3880 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3881 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3882
3883 createSysAlias(Encoding, Operands, S);
3884 }
3885
3886 Lex(); // Eat operand.
3887
3888 bool ExpectRegister = !Op.contains_insensitive("all");
3889 bool HasRegister = false;
3890
3891 // Check for the optional register operand.
3892 if (parseOptionalToken(AsmToken::Comma)) {
3893 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3894 return TokError("expected register operand");
3895 HasRegister = true;
3896 }
3897
3898 if (ExpectRegister && !HasRegister)
3899 return TokError("specified " + Mnemonic + " op requires a register");
3900 else if (!ExpectRegister && HasRegister)
3901 return TokError("specified " + Mnemonic + " op does not use a register");
3902
3903 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3904 return true;
3905
3906 return false;
3907}
3908
3909/// parseSyspAlias - The TLBIP instructions are simple aliases for
3910/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
3911bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
3913 if (Name.contains('.'))
3914 return TokError("invalid operand");
3915
3916 Mnemonic = Name;
3917 Operands.push_back(
3918 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
3919
3920 const AsmToken &Tok = getTok();
3921 StringRef Op = Tok.getString();
3922 SMLoc S = Tok.getLoc();
3923
3924 if (Mnemonic == "tlbip") {
3925 bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
3926 if (HasnXSQualifier) {
3927 Op = Op.drop_back(3);
3928 }
3929 const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
3930 if (!TLBIorig)
3931 return TokError("invalid operand for TLBIP instruction");
3932 const AArch64TLBI::TLBI TLBI(
3933 TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
3934 TLBIorig->NeedsReg,
3935 HasnXSQualifier
3936 ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
3937 : TLBIorig->FeaturesRequired);
3938 if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
3939 std::string Name =
3940 std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
3941 std::string Str("TLBIP " + Name + " requires: ");
3943 return TokError(Str);
3944 }
3945 createSysAlias(TLBI.Encoding, Operands, S);
3946 }
3947
3948 Lex(); // Eat operand.
3949
3950 if (parseComma())
3951 return true;
3952
3953 if (Tok.isNot(AsmToken::Identifier))
3954 return TokError("expected register identifier");
3955 auto Result = tryParseSyspXzrPair(Operands);
3956 if (Result.isNoMatch())
3957 Result = tryParseGPRSeqPair(Operands);
3958 if (!Result.isSuccess())
3959 return TokError("specified " + Mnemonic +
3960 " op requires a pair of registers");
3961
3962 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3963 return true;
3964
3965 return false;
3966}
3967
3968ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3969 MCAsmParser &Parser = getParser();
3970 const AsmToken &Tok = getTok();
3971
3972 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
3973 return TokError("'csync' operand expected");
3974 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3975 // Immediate operand.
3976 const MCExpr *ImmVal;
3977 SMLoc ExprLoc = getLoc();
3978 AsmToken IntTok = Tok;
3979 if (getParser().parseExpression(ImmVal))
3980 return ParseStatus::Failure;
3981 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3982 if (!MCE)
3983 return Error(ExprLoc, "immediate value expected for barrier operand");
3984 int64_t Value = MCE->getValue();
3985 if (Mnemonic == "dsb" && Value > 15) {
3986 // This case is a no match here, but it might be matched by the nXS
3987 // variant. Deliberately not unlex the optional '#' as it is not necessary
3988 // to characterize an integer immediate.
3989 Parser.getLexer().UnLex(IntTok);
3990 return ParseStatus::NoMatch;
3991 }
3992 if (Value < 0 || Value > 15)
3993 return Error(ExprLoc, "barrier operand out of range");
3994 auto DB = AArch64DB::lookupDBByEncoding(Value);
3995 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3996 ExprLoc, getContext(),
3997 false /*hasnXSModifier*/));
3998 return ParseStatus::Success;
3999 }
4000
4001 if (Tok.isNot(AsmToken::Identifier))
4002 return TokError("invalid operand for instruction");
4003
4004 StringRef Operand = Tok.getString();
4005 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4006 auto DB = AArch64DB::lookupDBByName(Operand);
4007 // The only valid named option for ISB is 'sy'
4008 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4009 return TokError("'sy' or #imm operand expected");
4010 // The only valid named option for TSB is 'csync'
4011 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4012 return TokError("'csync' operand expected");
4013 if (!DB && !TSB) {
4014 if (Mnemonic == "dsb") {
4015 // This case is a no match here, but it might be matched by the nXS
4016 // variant.
4017 return ParseStatus::NoMatch;
4018 }
4019 return TokError("invalid barrier option name");
4020 }
4021
4022 Operands.push_back(AArch64Operand::CreateBarrier(
4023 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
4024 getContext(), false /*hasnXSModifier*/));
4025 Lex(); // Consume the option
4026
4027 return ParseStatus::Success;
4028}
4029
4031AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4032 const AsmToken &Tok = getTok();
4033
4034 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4035 if (Mnemonic != "dsb")
4036 return ParseStatus::Failure;
4037
4038 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4039 // Immediate operand.
4040 const MCExpr *ImmVal;
4041 SMLoc ExprLoc = getLoc();
4042 if (getParser().parseExpression(ImmVal))
4043 return ParseStatus::Failure;
4044 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4045 if (!MCE)
4046 return Error(ExprLoc, "immediate value expected for barrier operand");
4047 int64_t Value = MCE->getValue();
4048 // v8.7-A DSB in the nXS variant accepts only the following immediate
4049 // values: 16, 20, 24, 28.
4050 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4051 return Error(ExprLoc, "barrier operand out of range");
4052 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4053 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4054 ExprLoc, getContext(),
4055 true /*hasnXSModifier*/));
4056 return ParseStatus::Success;
4057 }
4058
4059 if (Tok.isNot(AsmToken::Identifier))
4060 return TokError("invalid operand for instruction");
4061
4062 StringRef Operand = Tok.getString();
4063 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4064
4065 if (!DB)
4066 return TokError("invalid barrier option name");
4067
4068 Operands.push_back(
4069 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4070 getContext(), true /*hasnXSModifier*/));
4071 Lex(); // Consume the option
4072
4073 return ParseStatus::Success;
4074}
4075
4076ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4077 const AsmToken &Tok = getTok();
4078
4079 if (Tok.isNot(AsmToken::Identifier))
4080 return ParseStatus::NoMatch;
4081
4082 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4083 return ParseStatus::NoMatch;
4084
4085 int MRSReg, MSRReg;
4086 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4087 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4088 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4089 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4090 } else
4091 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4092
4093 unsigned PStateImm = -1;
4094 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4095 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4096 PStateImm = PState15->Encoding;
4097 if (!PState15) {
4098 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4099 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4100 PStateImm = PState1->Encoding;
4101 }
4102
4103 Operands.push_back(
4104 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4105 PStateImm, getContext()));
4106 Lex(); // Eat identifier
4107
4108 return ParseStatus::Success;
4109}
4110
4111/// tryParseNeonVectorRegister - Parse a vector register operand.
4112bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4113 if (getTok().isNot(AsmToken::Identifier))
4114 return true;
4115
4116 SMLoc S = getLoc();
4117 // Check for a vector register specifier first.
4120 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4121 if (!Res.isSuccess())
4122 return true;
4123
4124 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4125 if (!KindRes)
4126 return true;
4127
4128 unsigned ElementWidth = KindRes->second;
4129 Operands.push_back(
4130 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4131 S, getLoc(), getContext()));
4132
4133 // If there was an explicit qualifier, that goes on as a literal text
4134 // operand.
4135 if (!Kind.empty())
4136 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4137
4138 return tryParseVectorIndex(Operands).isFailure();
4139}
4140
4141ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4142 SMLoc SIdx = getLoc();
4143 if (parseOptionalToken(AsmToken::LBrac)) {
4144 const MCExpr *ImmVal;
4145 if (getParser().parseExpression(ImmVal))
4146 return ParseStatus::NoMatch;
4147 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4148 if (!MCE)
4149 return TokError("immediate value expected for vector index");
4150
4151 SMLoc E = getLoc();
4152
4153 if (parseToken(AsmToken::RBrac, "']' expected"))
4154 return ParseStatus::Failure;
4155
4156 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4157 E, getContext()));
4158 return ParseStatus::Success;
4159 }
4160
4161 return ParseStatus::NoMatch;
4162}
4163
4164// tryParseVectorRegister - Try to parse a vector register name with
4165// optional kind specifier. If it is a register specifier, eat the token
4166// and return it.
4167ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4168 StringRef &Kind,
4169 RegKind MatchKind) {
4170 const AsmToken &Tok = getTok();
4171
4172 if (Tok.isNot(AsmToken::Identifier))
4173 return ParseStatus::NoMatch;
4174
4175 StringRef Name = Tok.getString();
4176 // If there is a kind specifier, it's separated from the register name by
4177 // a '.'.
4178 size_t Start = 0, Next = Name.find('.');
4179 StringRef Head = Name.slice(Start, Next);
4180 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4181
4182 if (RegNum) {
4183 if (Next != StringRef::npos) {
4184 Kind = Name.slice(Next, StringRef::npos);
4185 if (!isValidVectorKind(Kind, MatchKind))
4186 return TokError("invalid vector kind qualifier");
4187 }
4188 Lex(); // Eat the register token.
4189
4190 Reg = RegNum;
4191 return ParseStatus::Success;
4192 }
4193
4194 return ParseStatus::NoMatch;
4195}
4196
4197ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4200 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4201 if (!Status.isSuccess())
4202 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4203 return Status;
4204}
4205
4206/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4207template <RegKind RK>
4209AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4210 // Check for a SVE predicate register specifier first.
4211 const SMLoc S = getLoc();
4213 MCRegister RegNum;
4214 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4215 if (!Res.isSuccess())
4216 return Res;
4217
4218 const auto &KindRes = parseVectorKind(Kind, RK);
4219 if (!KindRes)
4220 return ParseStatus::NoMatch;
4221
4222 unsigned ElementWidth = KindRes->second;
4223 Operands.push_back(AArch64Operand::CreateVectorReg(
4224 RegNum, RK, ElementWidth, S,
4225 getLoc(), getContext()));
4226
4227 if (getLexer().is(AsmToken::LBrac)) {
4228 if (RK == RegKind::SVEPredicateAsCounter) {
4229 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4230 if (ResIndex.isSuccess())
4231 return ParseStatus::Success;
4232 } else {
4233 // Indexed predicate, there's no comma so try parse the next operand
4234 // immediately.
4235 if (parseOperand(Operands, false, false))
4236 return ParseStatus::NoMatch;
4237 }
4238 }
4239
4240 // Not all predicates are followed by a '/m' or '/z'.
4241 if (getTok().isNot(AsmToken::Slash))
4242 return ParseStatus::Success;
4243
4244 // But when they do they shouldn't have an element type suffix.
4245 if (!Kind.empty())
4246 return Error(S, "not expecting size suffix");
4247
4248 // Add a literal slash as operand
4249 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4250
4251 Lex(); // Eat the slash.
4252
4253 // Zeroing or merging?
4254 auto Pred = getTok().getString().lower();
4255 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4256 return Error(getLoc(), "expecting 'z' predication");
4257
4258 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4259 return Error(getLoc(), "expecting 'm' or 'z' predication");
4260
4261 // Add zero/merge token.
4262 const char *ZM = Pred == "z" ? "z" : "m";
4263 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4264
4265 Lex(); // Eat zero/merge token.
4266 return ParseStatus::Success;
4267}
4268
4269/// parseRegister - Parse a register operand.
4270bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4271 // Try for a Neon vector register.
4272 if (!tryParseNeonVectorRegister(Operands))
4273 return false;
4274
4275 if (tryParseZTOperand(Operands).isSuccess())
4276 return false;
4277
4278 // Otherwise try for a scalar register.
4279 if (tryParseGPROperand<false>(Operands).isSuccess())
4280 return false;
4281
4282 return true;
4283}
4284
4285bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4286 bool HasELFModifier = false;
4288
4289 if (parseOptionalToken(AsmToken::Colon)) {
4290 HasELFModifier = true;
4291
4292 if (getTok().isNot(AsmToken::Identifier))
4293 return TokError("expect relocation specifier in operand after ':'");
4294
4295 std::string LowerCase = getTok().getIdentifier().lower();
4296 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
4298 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4299 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4300 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4301 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4302 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4303 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4304 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4305 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4306 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4307 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4308 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4309 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4310 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4311 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4312 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4313 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4314 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4315 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4316 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4317 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4318 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4319 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4320 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4321 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4322 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4323 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4324 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4325 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4326 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4327 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4328 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4329 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4330 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4331 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4332 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4334 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4335 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4337 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4338 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4339 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4341 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4342 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4344
4345 if (RefKind == AArch64MCExpr::VK_INVALID)
4346 return TokError("expect relocation specifier in operand after ':'");
4347
4348 Lex(); // Eat identifier
4349
4350 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4351 return true;
4352 }
4353
4354 if (getParser().parseExpression(ImmVal))
4355 return true;
4356
4357 if (HasELFModifier)
4358 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4359
4360 return false;
4361}
4362
4363ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4364 if (getTok().isNot(AsmToken::LCurly))
4365 return ParseStatus::NoMatch;
4366
4367 auto ParseMatrixTile = [this](unsigned &Reg,
4368 unsigned &ElementWidth) -> ParseStatus {
4369 StringRef Name = getTok().getString();
4370 size_t DotPosition = Name.find('.');
4371 if (DotPosition == StringRef::npos)
4372 return ParseStatus::NoMatch;
4373
4374 unsigned RegNum = matchMatrixTileListRegName(Name);
4375 if (!RegNum)
4376 return ParseStatus::NoMatch;
4377
4378 StringRef Tail = Name.drop_front(DotPosition);
4379 const std::optional<std::pair<int, int>> &KindRes =
4380 parseVectorKind(Tail, RegKind::Matrix);
4381 if (!KindRes)
4382 return TokError(
4383 "Expected the register to be followed by element width suffix");
4384 ElementWidth = KindRes->second;
4385 Reg = RegNum;
4386 Lex(); // Eat the register.
4387 return ParseStatus::Success;
4388 };
4389
4390 SMLoc S = getLoc();
4391 auto LCurly = getTok();
4392 Lex(); // Eat left bracket token.
4393
4394 // Empty matrix list
4395 if (parseOptionalToken(AsmToken::RCurly)) {
4396 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4397 /*RegMask=*/0, S, getLoc(), getContext()));
4398 return ParseStatus::Success;
4399 }
4400
4401 // Try parse {za} alias early
4402 if (getTok().getString().equals_insensitive("za")) {
4403 Lex(); // Eat 'za'
4404
4405 if (parseToken(AsmToken::RCurly, "'}' expected"))
4406 return ParseStatus::Failure;
4407
4408 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4409 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4410 return ParseStatus::Success;
4411 }
4412
4413 SMLoc TileLoc = getLoc();
4414
4415 unsigned FirstReg, ElementWidth;
4416 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4417 if (!ParseRes.isSuccess()) {
4418 getLexer().UnLex(LCurly);
4419 return ParseRes;
4420 }
4421
4422 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4423
4424 unsigned PrevReg = FirstReg;
4425
4427 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4428
4429 SmallSet<unsigned, 8> SeenRegs;
4430 SeenRegs.insert(FirstReg);
4431
4432 while (parseOptionalToken(AsmToken::Comma)) {
4433 TileLoc = getLoc();
4434 unsigned Reg, NextElementWidth;
4435 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4436 if (!ParseRes.isSuccess())
4437 return ParseRes;
4438
4439 // Element size must match on all regs in the list.
4440 if (ElementWidth != NextElementWidth)
4441 return Error(TileLoc, "mismatched register size suffix");
4442
4443 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4444 Warning(TileLoc, "tile list not in ascending order");
4445
4446 if (SeenRegs.contains(Reg))
4447 Warning(TileLoc, "duplicate tile in list");
4448 else {
4449 SeenRegs.insert(Reg);
4450 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4451 }
4452
4453 PrevReg = Reg;
4454 }
4455
4456 if (parseToken(AsmToken::RCurly, "'}' expected"))
4457 return ParseStatus::Failure;
4458
4459 unsigned RegMask = 0;
4460 for (auto Reg : DRegs)
4461 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4462 RI->getEncodingValue(AArch64::ZAD0));
4463 Operands.push_back(
4464 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4465
4466 return ParseStatus::Success;
4467}
4468
4469template <RegKind VectorKind>
4470ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4471 bool ExpectMatch) {
4472 MCAsmParser &Parser = getParser();
4473 if (!getTok().is(AsmToken::LCurly))
4474 return ParseStatus::NoMatch;
4475
4476 // Wrapper around parse function
4477 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4478 bool NoMatchIsError) -> ParseStatus {
4479 auto RegTok = getTok();
4480 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4481 if (ParseRes.isSuccess()) {
4482 if (parseVectorKind(Kind, VectorKind))
4483 return ParseRes;
4484 llvm_unreachable("Expected a valid vector kind");
4485 }
4486
4487 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4488 RegTok.getString().equals_insensitive("zt0"))
4489 return ParseStatus::NoMatch;
4490
4491 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4492 (ParseRes.isNoMatch() && NoMatchIsError &&
4493 !RegTok.getString().starts_with_insensitive("za")))
4494 return Error(Loc, "vector register expected");
4495
4496 return ParseStatus::NoMatch;
4497 };
4498
4499 int NumRegs = getNumRegsForRegKind(VectorKind);
4500 SMLoc S = getLoc();
4501 auto LCurly = getTok();
4502 Lex(); // Eat left bracket token.
4503
4505 MCRegister FirstReg;
4506 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4507
4508 // Put back the original left bracket if there was no match, so that
4509 // different types of list-operands can be matched (e.g. SVE, Neon).
4510 if (ParseRes.isNoMatch())
4511 Parser.getLexer().UnLex(LCurly);
4512
4513 if (!ParseRes.isSuccess())
4514 return ParseRes;
4515
4516 int64_t PrevReg = FirstReg;
4517 unsigned Count = 1;
4518
4519 int Stride = 1;
4520 if (parseOptionalToken(AsmToken::Minus)) {
4521 SMLoc Loc = getLoc();
4522 StringRef NextKind;
4523
4525 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4526 if (!ParseRes.isSuccess())
4527 return ParseRes;
4528
4529 // Any Kind suffices must match on all regs in the list.
4530 if (Kind != NextKind)
4531 return Error(Loc, "mismatched register size suffix");
4532
4533 unsigned Space =
4534 (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4535
4536 if (Space == 0 || Space > 3)
4537 return Error(Loc, "invalid number of vectors");
4538
4539 Count += Space;
4540 }
4541 else {
4542 bool HasCalculatedStride = false;
4543 while (parseOptionalToken(AsmToken::Comma)) {
4544 SMLoc Loc = getLoc();
4545 StringRef NextKind;
4547 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4548 if (!ParseRes.isSuccess())
4549 return ParseRes;
4550
4551 // Any Kind suffices must match on all regs in the list.
4552 if (Kind != NextKind)
4553 return Error(Loc, "mismatched register size suffix");
4554
4555 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4556 unsigned PrevRegVal =
4557 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4558 if (!HasCalculatedStride) {
4559 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4560 : (RegVal + NumRegs - PrevRegVal);
4561 HasCalculatedStride = true;
4562 }
4563
4564 // Register must be incremental (with a wraparound at last register).
4565 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4566 return Error(Loc, "registers must have the same sequential stride");
4567
4568 PrevReg = Reg;
4569 ++Count;
4570 }
4571 }
4572
4573 if (parseToken(AsmToken::RCurly, "'}' expected"))
4574 return ParseStatus::Failure;
4575
4576 if (Count > 4)
4577 return Error(S, "invalid number of vectors");
4578
4579 unsigned NumElements = 0;
4580 unsigned ElementWidth = 0;
4581 if (!Kind.empty()) {
4582 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4583 std::tie(NumElements, ElementWidth) = *VK;
4584 }
4585
4586 Operands.push_back(AArch64Operand::CreateVectorList(
4587 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4588 getLoc(), getContext()));
4589
4590 return ParseStatus::Success;
4591}
4592
4593/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4594bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4595 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4596 if (!ParseRes.isSuccess())
4597 return true;
4598
4599 return tryParseVectorIndex(Operands).isFailure();
4600}
4601
4602ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4603 SMLoc StartLoc = getLoc();
4604
4605 MCRegister RegNum;
4606 ParseStatus Res = tryParseScalarRegister(RegNum);
4607 if (!Res.isSuccess())
4608 return Res;
4609
4610 if (!parseOptionalToken(AsmToken::Comma)) {
4611 Operands.push_back(AArch64Operand::CreateReg(
4612 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4613 return ParseStatus::Success;
4614 }
4615
4616 parseOptionalToken(AsmToken::Hash);
4617
4618 if (getTok().isNot(AsmToken::Integer))
4619 return Error(getLoc(), "index must be absent or #0");
4620
4621 const MCExpr *ImmVal;
4622 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4623 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4624 return Error(getLoc(), "index must be absent or #0");
4625
4626 Operands.push_back(AArch64Operand::CreateReg(
4627 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4628 return ParseStatus::Success;
4629}
4630
4631ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4632 SMLoc StartLoc = getLoc();
4633 const AsmToken &Tok = getTok();
4634 std::string Name = Tok.getString().lower();
4635
4636 unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4637
4638 if (RegNum == 0)
4639 return ParseStatus::NoMatch;
4640
4641 Operands.push_back(AArch64Operand::CreateReg(
4642 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4643 Lex(); // Eat register.
4644
4645 // Check if register is followed by an index
4646 if (parseOptionalToken(AsmToken::LBrac)) {
4647 Operands.push_back(
4648 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4649 const MCExpr *ImmVal;
4650 if (getParser().parseExpression(ImmVal))
4651 return ParseStatus::NoMatch;
4652 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4653 if (!MCE)
4654 return TokError("immediate value expected for vector index");
4655 Operands.push_back(AArch64Operand::CreateImm(
4656 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4657 getLoc(), getContext()));
4658 if (parseOptionalToken(AsmToken::Comma))
4659 if (parseOptionalMulOperand(Operands))
4660 return ParseStatus::Failure;
4661 if (parseToken(AsmToken::RBrac, "']' expected"))
4662 return ParseStatus::Failure;
4663 Operands.push_back(
4664 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4665 }
4666 return ParseStatus::Success;
4667}
4668
4669template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4670ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4671 SMLoc StartLoc = getLoc();
4672
4673 MCRegister RegNum;
4674 ParseStatus Res = tryParseScalarRegister(RegNum);
4675 if (!Res.isSuccess())
4676 return Res;
4677
4678 // No shift/extend is the default.
4679 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4680 Operands.push_back(AArch64Operand::CreateReg(
4681 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4682 return ParseStatus::Success;
4683 }
4684
4685 // Eat the comma
4686 Lex();
4687
4688 // Match the shift
4690 Res = tryParseOptionalShiftExtend(ExtOpnd);
4691 if (!Res.isSuccess())
4692 return Res;
4693
4694 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4695 Operands.push_back(AArch64Operand::CreateReg(
4696 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4697 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4698 Ext->hasShiftExtendAmount()));
4699
4700 return ParseStatus::Success;
4701}
4702
4703bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4704 MCAsmParser &Parser = getParser();
4705
4706 // Some SVE instructions have a decoration after the immediate, i.e.
4707 // "mul vl". We parse them here and add tokens, which must be present in the
4708 // asm string in the tablegen instruction.
4709 bool NextIsVL =
4710 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4711 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4712 if (!getTok().getString().equals_insensitive("mul") ||
4713 !(NextIsVL || NextIsHash))
4714 return true;
4715
4716 Operands.push_back(
4717 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4718 Lex(); // Eat the "mul"
4719
4720 if (NextIsVL) {
4721 Operands.push_back(
4722 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4723 Lex(); // Eat the "vl"
4724 return false;
4725 }
4726
4727 if (NextIsHash) {
4728 Lex(); // Eat the #
4729 SMLoc S = getLoc();
4730
4731 // Parse immediate operand.
4732 const MCExpr *ImmVal;
4733 if (!Parser.parseExpression(ImmVal))
4734 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4735 Operands.push_back(AArch64Operand::CreateImm(
4736 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4737 getContext()));
4738 return false;
4739 }
4740 }
4741
4742 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4743}
4744
4745bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4746 StringRef &VecGroup) {
4747 MCAsmParser &Parser = getParser();
4748 auto Tok = Parser.getTok();
4749 if (Tok.isNot(AsmToken::Identifier))
4750 return true;
4751
4753 .Case("vgx2", "vgx2")
4754 .Case("vgx4", "vgx4")
4755 .Default("");
4756
4757 if (VG.empty())
4758 return true;
4759
4760 VecGroup = VG;
4761 Parser.Lex(); // Eat vgx[2|4]
4762 return false;
4763}
4764
4765bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4766 auto Tok = getTok();
4767 if (Tok.isNot(AsmToken::Identifier))
4768 return true;
4769
4770 auto Keyword = Tok.getString();
4772 .Case("sm", "sm")
4773 .Case("za", "za")
4774 .Default(Keyword);
4775 Operands.push_back(
4776 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4777
4778 Lex();
4779 return false;
4780}
4781
4782/// parseOperand - Parse a arm instruction operand. For now this parses the
4783/// operand regardless of the mnemonic.
4784bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4785 bool invertCondCode) {
4786 MCAsmParser &Parser = getParser();
4787
4788 ParseStatus ResTy =
4789 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
4790
4791 // Check if the current operand has a custom associated parser, if so, try to
4792 // custom parse the operand, or fallback to the general approach.
4793 if (ResTy.isSuccess())
4794 return false;
4795 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4796 // there was a match, but an error occurred, in which case, just return that
4797 // the operand parsing failed.
4798 if (ResTy.isFailure())
4799 return true;
4800
4801 // Nothing custom, so do general case parsing.
4802 SMLoc S, E;
4803 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
4804 if (parseOptionalToken(AsmToken::Comma)) {
4805 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
4806 if (!Res.isNoMatch())
4807 return Res.isFailure();
4808 getLexer().UnLex(SavedTok);
4809 }
4810 return false;
4811 };
4812 switch (getLexer().getKind()) {
4813 default: {
4814 SMLoc S = getLoc();
4815 const MCExpr *Expr;
4816 if (parseSymbolicImmVal(Expr))
4817 return Error(S, "invalid operand");
4818
4819 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4820 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4821 return parseOptionalShiftExtend(getTok());
4822 }
4823 case AsmToken::LBrac: {
4824 Operands.push_back(
4825 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4826 Lex(); // Eat '['
4827
4828 // There's no comma after a '[', so we can parse the next operand
4829 // immediately.
4830 return parseOperand(Operands, false, false);
4831 }
4832 case AsmToken::LCurly: {
4833 if (!parseNeonVectorList(Operands))
4834 return false;
4835
4836 Operands.push_back(
4837 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4838 Lex(); // Eat '{'
4839
4840 // There's no comma after a '{', so we can parse the next operand
4841 // immediately.
4842 return parseOperand(Operands, false, false);
4843 }
4844 case AsmToken::Identifier: {
4845 // See if this is a "VG" decoration used by SME instructions.
4846 StringRef VecGroup;
4847 if (!parseOptionalVGOperand(Operands, VecGroup)) {
4848 Operands.push_back(
4849 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4850 return false;
4851 }
4852 // If we're expecting a Condition Code operand, then just parse that.
4853 if (isCondCode)
4854 return parseCondCode(Operands, invertCondCode);
4855
4856 // If it's a register name, parse it.
4857 if (!parseRegister(Operands)) {
4858 // Parse an optional shift/extend modifier.
4859 AsmToken SavedTok = getTok();
4860 if (parseOptionalToken(AsmToken::Comma)) {
4861 // The operand after the register may be a label (e.g. ADR/ADRP). Check
4862 // such cases and don't report an error when <label> happens to match a
4863 // shift/extend modifier.
4864 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
4865 /*ParseForAllFeatures=*/true);
4866 if (!Res.isNoMatch())
4867 return Res.isFailure();
4868 Res = tryParseOptionalShiftExtend(Operands);
4869 if (!Res.isNoMatch())
4870 return Res.isFailure();
4871 getLexer().UnLex(SavedTok);
4872 }
4873 return false;
4874 }
4875
4876 // See if this is a "mul vl" decoration or "mul #<int>" operand used
4877 // by SVE instructions.
4878 if (!parseOptionalMulOperand(Operands))
4879 return false;
4880
4881 // If this is a two-word mnemonic, parse its special keyword
4882 // operand as an identifier.
4883 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
4884 Mnemonic == "gcsb")
4885 return parseKeywordOperand(Operands);
4886
4887 // This was not a register so parse other operands that start with an
4888 // identifier (like labels) as expressions and create them as immediates.
4889 const MCExpr *IdVal;
4890 S = getLoc();
4891 if (getParser().parseExpression(IdVal))
4892 return true;
4893 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4894 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4895 return false;
4896 }
4897 case AsmToken::Integer:
4898 case AsmToken::Real:
4899 case AsmToken::Hash: {
4900 // #42 -> immediate.
4901 S = getLoc();
4902
4903 parseOptionalToken(AsmToken::Hash);
4904
4905 // Parse a negative sign
4906 bool isNegative = false;
4907 if (getTok().is(AsmToken::Minus)) {
4908 isNegative = true;
4909 // We need to consume this token only when we have a Real, otherwise
4910 // we let parseSymbolicImmVal take care of it
4911 if (Parser.getLexer().peekTok().is(AsmToken::Rea