LLVM 17.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
39#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSymbol.h"
43#include "llvm/MC/MCValue.h"
50#include "llvm/Support/SMLoc.h"
53#include <cassert>
54#include <cctype>
55#include <cstdint>
56#include <cstdio>
57#include <optional>
58#include <string>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65namespace {
66
67enum class RegKind {
68 Scalar,
69 NeonVector,
70 SVEDataVector,
71 SVEPredicateAsCounter,
72 SVEPredicateVector,
73 Matrix,
74 LookupTable
75};
76
77enum class MatrixKind { Array, Tile, Row, Col };
78
79enum RegConstraintEqualityTy {
80 EqualsReg,
81 EqualsSuperReg,
82 EqualsSubReg
83};
84
85class AArch64AsmParser : public MCTargetAsmParser {
86private:
87 StringRef Mnemonic; ///< Instruction mnemonic.
88
89 // Map of register aliases registers via the .req directive.
91
92 class PrefixInfo {
93 public:
94 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
95 PrefixInfo Prefix;
96 switch (Inst.getOpcode()) {
97 case AArch64::MOVPRFX_ZZ:
98 Prefix.Active = true;
99 Prefix.Dst = Inst.getOperand(0).getReg();
100 break;
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
105 Prefix.Active = true;
106 Prefix.Predicated = true;
108 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
109 "No destructive element size set for movprfx");
110 Prefix.Dst = Inst.getOperand(0).getReg();
111 Prefix.Pg = Inst.getOperand(2).getReg();
112 break;
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
117 Prefix.Active = true;
118 Prefix.Predicated = true;
120 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
121 "No destructive element size set for movprfx");
122 Prefix.Dst = Inst.getOperand(0).getReg();
123 Prefix.Pg = Inst.getOperand(1).getReg();
124 break;
125 default:
126 break;
127 }
128
129 return Prefix;
130 }
131
132 PrefixInfo() = default;
133 bool isActive() const { return Active; }
134 bool isPredicated() const { return Predicated; }
135 unsigned getElementSize() const {
136 assert(Predicated);
137 return ElementSize;
138 }
139 unsigned getDstReg() const { return Dst; }
140 unsigned getPgReg() const {
141 assert(Predicated);
142 return Pg;
143 }
144
145 private:
146 bool Active = false;
147 bool Predicated = false;
148 unsigned ElementSize;
149 unsigned Dst;
150 unsigned Pg;
151 } NextPrefix;
152
153 AArch64TargetStreamer &getTargetStreamer() {
155 return static_cast<AArch64TargetStreamer &>(TS);
156 }
157
158 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
159
160 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
161 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
163 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
164 std::string &Suggestion);
165 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
166 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
168 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
169 bool parseNeonVectorList(OperandVector &Operands);
170 bool parseOptionalMulOperand(OperandVector &Operands);
171 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
172 bool parseKeywordOperand(OperandVector &Operands);
173 bool parseOperand(OperandVector &Operands, bool isCondCode,
174 bool invertCondCode);
175 bool parseImmExpr(int64_t &Out);
176 bool parseComma();
177 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
178 unsigned Last);
179
180 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
182
183 bool parseDirectiveArch(SMLoc L);
184 bool parseDirectiveArchExtension(SMLoc L);
185 bool parseDirectiveCPU(SMLoc L);
186 bool parseDirectiveInst(SMLoc L);
187
188 bool parseDirectiveTLSDescCall(SMLoc L);
189
190 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
191 bool parseDirectiveLtorg(SMLoc L);
192
193 bool parseDirectiveReq(StringRef Name, SMLoc L);
194 bool parseDirectiveUnreq(SMLoc L);
195 bool parseDirectiveCFINegateRAState();
196 bool parseDirectiveCFIBKeyFrame();
197 bool parseDirectiveCFIMTETaggedFrame();
198
199 bool parseDirectiveVariantPCS(SMLoc L);
200
201 bool parseDirectiveSEHAllocStack(SMLoc L);
202 bool parseDirectiveSEHPrologEnd(SMLoc L);
203 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
204 bool parseDirectiveSEHSaveFPLR(SMLoc L);
205 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
206 bool parseDirectiveSEHSaveReg(SMLoc L);
207 bool parseDirectiveSEHSaveRegX(SMLoc L);
208 bool parseDirectiveSEHSaveRegP(SMLoc L);
209 bool parseDirectiveSEHSaveRegPX(SMLoc L);
210 bool parseDirectiveSEHSaveLRPair(SMLoc L);
211 bool parseDirectiveSEHSaveFReg(SMLoc L);
212 bool parseDirectiveSEHSaveFRegX(SMLoc L);
213 bool parseDirectiveSEHSaveFRegP(SMLoc L);
214 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
215 bool parseDirectiveSEHSetFP(SMLoc L);
216 bool parseDirectiveSEHAddFP(SMLoc L);
217 bool parseDirectiveSEHNop(SMLoc L);
218 bool parseDirectiveSEHSaveNext(SMLoc L);
219 bool parseDirectiveSEHEpilogStart(SMLoc L);
220 bool parseDirectiveSEHEpilogEnd(SMLoc L);
221 bool parseDirectiveSEHTrapFrame(SMLoc L);
222 bool parseDirectiveSEHMachineFrame(SMLoc L);
223 bool parseDirectiveSEHContext(SMLoc L);
224 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
225 bool parseDirectiveSEHPACSignLR(SMLoc L);
226 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
227
228 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
230 unsigned getNumRegsForRegKind(RegKind K);
231 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
234 bool MatchingInlineAsm) override;
235/// @name Auto-generated Match Functions
236/// {
237
238#define GET_ASSEMBLER_HEADER
239#include "AArch64GenAsmMatcher.inc"
240
241 /// }
242
243 OperandMatchResultTy tryParseScalarRegister(MCRegister &Reg);
244 OperandMatchResultTy tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
245 RegKind MatchKind);
246 OperandMatchResultTy tryParseMatrixRegister(OperandVector &Operands);
248 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
249 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
250 OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands);
251 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
253 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
254 template <bool IsSVEPrefetch = false>
256 OperandMatchResultTy tryParseRPRFMOperand(OperandVector &Operands);
259 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
261 template<bool AddFPZeroAsLiteral>
263 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
264 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
265 bool tryParseNeonVectorRegister(OperandVector &Operands);
266 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
267 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
268 OperandMatchResultTy tryParseSyspXzrPair(OperandVector &Operands);
269 template <bool ParseShiftExtend,
270 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
271 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
272 OperandMatchResultTy tryParseZTOperand(OperandVector &Operands);
273 template <bool ParseShiftExtend, bool ParseSuffix>
274 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
275 template <RegKind RK>
276 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
277 template <RegKind VectorKind>
278 OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
279 bool ExpectMatch = false);
280 OperandMatchResultTy tryParseMatrixTileList(OperandVector &Operands);
281 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
282 OperandMatchResultTy tryParseSVEVecLenSpecifier(OperandVector &Operands);
285
286public:
287 enum AArch64MatchResultTy {
288 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
289#define GET_OPERAND_DIAGNOSTIC_TYPES
290#include "AArch64GenAsmMatcher.inc"
291 };
292 bool IsILP32;
293
294 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
295 const MCInstrInfo &MII, const MCTargetOptions &Options)
296 : MCTargetAsmParser(Options, STI, MII) {
300 if (S.getTargetStreamer() == nullptr)
302
303 // Alias .hword/.word/.[dx]word to the target-independent
304 // .2byte/.4byte/.8byte directives as they have the same form and
305 // semantics:
306 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
307 Parser.addAliasForDirective(".hword", ".2byte");
308 Parser.addAliasForDirective(".word", ".4byte");
309 Parser.addAliasForDirective(".dword", ".8byte");
310 Parser.addAliasForDirective(".xword", ".8byte");
311
312 // Initialize the set of available features.
313 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
314 }
315
316 bool areEqualRegs(const MCParsedAsmOperand &Op1,
317 const MCParsedAsmOperand &Op2) const override;
319 SMLoc NameLoc, OperandVector &Operands) override;
320 bool parseRegister(MCRegister &RegNo, SMLoc &StartLoc,
321 SMLoc &EndLoc) override;
323 SMLoc &EndLoc) override;
324 bool ParseDirective(AsmToken DirectiveID) override;
326 unsigned Kind) override;
327
328 static bool classifySymbolRef(const MCExpr *Expr,
329 AArch64MCExpr::VariantKind &ELFRefKind,
330 MCSymbolRefExpr::VariantKind &DarwinRefKind,
331 int64_t &Addend);
332};
333
334/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
335/// instruction.
336class AArch64Operand : public MCParsedAsmOperand {
337private:
338 enum KindTy {
339 k_Immediate,
340 k_ShiftedImm,
341 k_ImmRange,
342 k_CondCode,
343 k_Register,
344 k_MatrixRegister,
345 k_MatrixTileList,
346 k_SVCR,
347 k_VectorList,
348 k_VectorIndex,
349 k_Token,
350 k_SysReg,
351 k_SysCR,
352 k_Prefetch,
353 k_ShiftExtend,
354 k_FPImm,
355 k_Barrier,
356 k_PSBHint,
357 k_BTIHint,
358 } Kind;
359
360 SMLoc StartLoc, EndLoc;
361
362 struct TokOp {
363 const char *Data;
364 unsigned Length;
365 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
366 };
367
368 // Separate shift/extend operand.
369 struct ShiftExtendOp {
371 unsigned Amount;
372 bool HasExplicitAmount;
373 };
374
375 struct RegOp {
376 unsigned RegNum;
377 RegKind Kind;
378 int ElementWidth;
379
380 // The register may be allowed as a different register class,
381 // e.g. for GPR64as32 or GPR32as64.
382 RegConstraintEqualityTy EqualityTy;
383
384 // In some cases the shift/extend needs to be explicitly parsed together
385 // with the register, rather than as a separate operand. This is needed
386 // for addressing modes where the instruction as a whole dictates the
387 // scaling/extend, rather than specific bits in the instruction.
388 // By parsing them as a single operand, we avoid the need to pass an
389 // extra operand in all CodeGen patterns (because all operands need to
390 // have an associated value), and we avoid the need to update TableGen to
391 // accept operands that have no associated bits in the instruction.
392 //
393 // An added benefit of parsing them together is that the assembler
394 // can give a sensible diagnostic if the scaling is not correct.
395 //
396 // The default is 'lsl #0' (HasExplicitAmount = false) if no
397 // ShiftExtend is specified.
398 ShiftExtendOp ShiftExtend;
399 };
400
401 struct MatrixRegOp {
402 unsigned RegNum;
403 unsigned ElementWidth;
404 MatrixKind Kind;
405 };
406
407 struct MatrixTileListOp {
408 unsigned RegMask = 0;
409 };
410
411 struct VectorListOp {
412 unsigned RegNum;
413 unsigned Count;
414 unsigned Stride;
415 unsigned NumElements;
416 unsigned ElementWidth;
417 RegKind RegisterKind;
418 };
419
420 struct VectorIndexOp {
421 int Val;
422 };
423
424 struct ImmOp {
425 const MCExpr *Val;
426 };
427
428 struct ShiftedImmOp {
429 const MCExpr *Val;
430 unsigned ShiftAmount;
431 };
432
433 struct ImmRangeOp {
434 unsigned First;
435 unsigned Last;
436 };
437
438 struct CondCodeOp {
440 };
441
442 struct FPImmOp {
443 uint64_t Val; // APFloat value bitcasted to uint64_t.
444 bool IsExact; // describes whether parsed value was exact.
445 };
446
447 struct BarrierOp {
448 const char *Data;
449 unsigned Length;
450 unsigned Val; // Not the enum since not all values have names.
451 bool HasnXSModifier;
452 };
453
454 struct SysRegOp {
455 const char *Data;
456 unsigned Length;
457 uint32_t MRSReg;
458 uint32_t MSRReg;
459 uint32_t PStateField;
460 };
461
462 struct SysCRImmOp {
463 unsigned Val;
464 };
465
466 struct PrefetchOp {
467 const char *Data;
468 unsigned Length;
469 unsigned Val;
470 };
471
472 struct PSBHintOp {
473 const char *Data;
474 unsigned Length;
475 unsigned Val;
476 };
477
478 struct BTIHintOp {
479 const char *Data;
480 unsigned Length;
481 unsigned Val;
482 };
483
484 struct SVCROp {
485 const char *Data;
486 unsigned Length;
487 unsigned PStateField;
488 };
489
490 union {
491 struct TokOp Tok;
492 struct RegOp Reg;
493 struct MatrixRegOp MatrixReg;
494 struct MatrixTileListOp MatrixTileList;
495 struct VectorListOp VectorList;
496 struct VectorIndexOp VectorIndex;
497 struct ImmOp Imm;
498 struct ShiftedImmOp ShiftedImm;
499 struct ImmRangeOp ImmRange;
500 struct CondCodeOp CondCode;
501 struct FPImmOp FPImm;
502 struct BarrierOp Barrier;
503 struct SysRegOp SysReg;
504 struct SysCRImmOp SysCRImm;
505 struct PrefetchOp Prefetch;
506 struct PSBHintOp PSBHint;
507 struct BTIHintOp BTIHint;
508 struct ShiftExtendOp ShiftExtend;
509 struct SVCROp SVCR;
510 };
511
512 // Keep the MCContext around as the MCExprs may need manipulated during
513 // the add<>Operands() calls.
514 MCContext &Ctx;
515
516public:
517 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
518
519 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
520 Kind = o.Kind;
521 StartLoc = o.StartLoc;
522 EndLoc = o.EndLoc;
523 switch (Kind) {
524 case k_Token:
525 Tok = o.Tok;
526 break;
527 case k_Immediate:
528 Imm = o.Imm;
529 break;
530 case k_ShiftedImm:
531 ShiftedImm = o.ShiftedImm;
532 break;
533 case k_ImmRange:
534 ImmRange = o.ImmRange;
535 break;
536 case k_CondCode:
537 CondCode = o.CondCode;
538 break;
539 case k_FPImm:
540 FPImm = o.FPImm;
541 break;
542 case k_Barrier:
543 Barrier = o.Barrier;
544 break;
545 case k_Register:
546 Reg = o.Reg;
547 break;
548 case k_MatrixRegister:
549 MatrixReg = o.MatrixReg;
550 break;
551 case k_MatrixTileList:
552 MatrixTileList = o.MatrixTileList;
553 break;
554 case k_VectorList:
555 VectorList = o.VectorList;
556 break;
557 case k_VectorIndex:
558 VectorIndex = o.VectorIndex;
559 break;
560 case k_SysReg:
561 SysReg = o.SysReg;
562 break;
563 case k_SysCR:
564 SysCRImm = o.SysCRImm;
565 break;
566 case k_Prefetch:
567 Prefetch = o.Prefetch;
568 break;
569 case k_PSBHint:
570 PSBHint = o.PSBHint;
571 break;
572 case k_BTIHint:
573 BTIHint = o.BTIHint;
574 break;
575 case k_ShiftExtend:
576 ShiftExtend = o.ShiftExtend;
577 break;
578 case k_SVCR:
579 SVCR = o.SVCR;
580 break;
581 }
582 }
583
584 /// getStartLoc - Get the location of the first token of this operand.
585 SMLoc getStartLoc() const override { return StartLoc; }
586 /// getEndLoc - Get the location of the last token of this operand.
587 SMLoc getEndLoc() const override { return EndLoc; }
588
589 StringRef getToken() const {
590 assert(Kind == k_Token && "Invalid access!");
591 return StringRef(Tok.Data, Tok.Length);
592 }
593
594 bool isTokenSuffix() const {
595 assert(Kind == k_Token && "Invalid access!");
596 return Tok.IsSuffix;
597 }
598
599 const MCExpr *getImm() const {
600 assert(Kind == k_Immediate && "Invalid access!");
601 return Imm.Val;
602 }
603
604 const MCExpr *getShiftedImmVal() const {
605 assert(Kind == k_ShiftedImm && "Invalid access!");
606 return ShiftedImm.Val;
607 }
608
609 unsigned getShiftedImmShift() const {
610 assert(Kind == k_ShiftedImm && "Invalid access!");
611 return ShiftedImm.ShiftAmount;
612 }
613
614 unsigned getFirstImmVal() const {
615 assert(Kind == k_ImmRange && "Invalid access!");
616 return ImmRange.First;
617 }
618
619 unsigned getLastImmVal() const {
620 assert(Kind == k_ImmRange && "Invalid access!");
621 return ImmRange.Last;
622 }
623
625 assert(Kind == k_CondCode && "Invalid access!");
626 return CondCode.Code;
627 }
628
629 APFloat getFPImm() const {
630 assert (Kind == k_FPImm && "Invalid access!");
631 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
632 }
633
634 bool getFPImmIsExact() const {
635 assert (Kind == k_FPImm && "Invalid access!");
636 return FPImm.IsExact;
637 }
638
639 unsigned getBarrier() const {
640 assert(Kind == k_Barrier && "Invalid access!");
641 return Barrier.Val;
642 }
643
644 StringRef getBarrierName() const {
645 assert(Kind == k_Barrier && "Invalid access!");
646 return StringRef(Barrier.Data, Barrier.Length);
647 }
648
649 bool getBarriernXSModifier() const {
650 assert(Kind == k_Barrier && "Invalid access!");
651 return Barrier.HasnXSModifier;
652 }
653
654 unsigned getReg() const override {
655 assert(Kind == k_Register && "Invalid access!");
656 return Reg.RegNum;
657 }
658
659 unsigned getMatrixReg() const {
660 assert(Kind == k_MatrixRegister && "Invalid access!");
661 return MatrixReg.RegNum;
662 }
663
664 unsigned getMatrixElementWidth() const {
665 assert(Kind == k_MatrixRegister && "Invalid access!");
666 return MatrixReg.ElementWidth;
667 }
668
669 MatrixKind getMatrixKind() const {
670 assert(Kind == k_MatrixRegister && "Invalid access!");
671 return MatrixReg.Kind;
672 }
673
674 unsigned getMatrixTileListRegMask() const {
675 assert(isMatrixTileList() && "Invalid access!");
676 return MatrixTileList.RegMask;
677 }
678
679 RegConstraintEqualityTy getRegEqualityTy() const {
680 assert(Kind == k_Register && "Invalid access!");
681 return Reg.EqualityTy;
682 }
683
684 unsigned getVectorListStart() const {
685 assert(Kind == k_VectorList && "Invalid access!");
686 return VectorList.RegNum;
687 }
688
689 unsigned getVectorListCount() const {
690 assert(Kind == k_VectorList && "Invalid access!");
691 return VectorList.Count;
692 }
693
694 unsigned getVectorListStride() const {
695 assert(Kind == k_VectorList && "Invalid access!");
696 return VectorList.Stride;
697 }
698
699 int getVectorIndex() const {
700 assert(Kind == k_VectorIndex && "Invalid access!");
701 return VectorIndex.Val;
702 }
703
704 StringRef getSysReg() const {
705 assert(Kind == k_SysReg && "Invalid access!");
706 return StringRef(SysReg.Data, SysReg.Length);
707 }
708
709 unsigned getSysCR() const {
710 assert(Kind == k_SysCR && "Invalid access!");
711 return SysCRImm.Val;
712 }
713
714 unsigned getPrefetch() const {
715 assert(Kind == k_Prefetch && "Invalid access!");
716 return Prefetch.Val;
717 }
718
719 unsigned getPSBHint() const {
720 assert(Kind == k_PSBHint && "Invalid access!");
721 return PSBHint.Val;
722 }
723
724 StringRef getPSBHintName() const {
725 assert(Kind == k_PSBHint && "Invalid access!");
726 return StringRef(PSBHint.Data, PSBHint.Length);
727 }
728
729 unsigned getBTIHint() const {
730 assert(Kind == k_BTIHint && "Invalid access!");
731 return BTIHint.Val;
732 }
733
734 StringRef getBTIHintName() const {
735 assert(Kind == k_BTIHint && "Invalid access!");
736 return StringRef(BTIHint.Data, BTIHint.Length);
737 }
738
739 StringRef getSVCR() const {
740 assert(Kind == k_SVCR && "Invalid access!");
741 return StringRef(SVCR.Data, SVCR.Length);
742 }
743
744 StringRef getPrefetchName() const {
745 assert(Kind == k_Prefetch && "Invalid access!");
746 return StringRef(Prefetch.Data, Prefetch.Length);
747 }
748
749 AArch64_AM::ShiftExtendType getShiftExtendType() const {
750 if (Kind == k_ShiftExtend)
751 return ShiftExtend.Type;
752 if (Kind == k_Register)
753 return Reg.ShiftExtend.Type;
754 llvm_unreachable("Invalid access!");
755 }
756
757 unsigned getShiftExtendAmount() const {
758 if (Kind == k_ShiftExtend)
759 return ShiftExtend.Amount;
760 if (Kind == k_Register)
761 return Reg.ShiftExtend.Amount;
762 llvm_unreachable("Invalid access!");
763 }
764
765 bool hasShiftExtendAmount() const {
766 if (Kind == k_ShiftExtend)
767 return ShiftExtend.HasExplicitAmount;
768 if (Kind == k_Register)
769 return Reg.ShiftExtend.HasExplicitAmount;
770 llvm_unreachable("Invalid access!");
771 }
772
773 bool isImm() const override { return Kind == k_Immediate; }
774 bool isMem() const override { return false; }
775
776 bool isUImm6() const {
777 if (!isImm())
778 return false;
779 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
780 if (!MCE)
781 return false;
782 int64_t Val = MCE->getValue();
783 return (Val >= 0 && Val < 64);
784 }
785
786 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
787
788 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
789 return isImmScaled<Bits, Scale>(true);
790 }
791
792 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
793 DiagnosticPredicate isUImmScaled() const {
794 if (IsRange && isImmRange() &&
795 (getLastImmVal() != getFirstImmVal() + Offset))
796 return DiagnosticPredicateTy::NoMatch;
797
798 return isImmScaled<Bits, Scale, IsRange>(false);
799 }
800
801 template <int Bits, int Scale, bool IsRange = false>
802 DiagnosticPredicate isImmScaled(bool Signed) const {
803 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
804 (isImmRange() && !IsRange))
805 return DiagnosticPredicateTy::NoMatch;
806
807 int64_t Val;
808 if (isImmRange())
809 Val = getFirstImmVal();
810 else {
811 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
812 if (!MCE)
813 return DiagnosticPredicateTy::NoMatch;
814 Val = MCE->getValue();
815 }
816
817 int64_t MinVal, MaxVal;
818 if (Signed) {
819 int64_t Shift = Bits - 1;
820 MinVal = (int64_t(1) << Shift) * -Scale;
821 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
822 } else {
823 MinVal = 0;
824 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
825 }
826
827 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
828 return DiagnosticPredicateTy::Match;
829
830 return DiagnosticPredicateTy::NearMatch;
831 }
832
833 DiagnosticPredicate isSVEPattern() const {
834 if (!isImm())
835 return DiagnosticPredicateTy::NoMatch;
836 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
837 if (!MCE)
838 return DiagnosticPredicateTy::NoMatch;
839 int64_t Val = MCE->getValue();
840 if (Val >= 0 && Val < 32)
841 return DiagnosticPredicateTy::Match;
842 return DiagnosticPredicateTy::NearMatch;
843 }
844
845 DiagnosticPredicate isSVEVecLenSpecifier() const {
846 if (!isImm())
847 return DiagnosticPredicateTy::NoMatch;
848 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
849 if (!MCE)
850 return DiagnosticPredicateTy::NoMatch;
851 int64_t Val = MCE->getValue();
852 if (Val >= 0 && Val <= 1)
853 return DiagnosticPredicateTy::Match;
854 return DiagnosticPredicateTy::NearMatch;
855 }
856
857 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
859 MCSymbolRefExpr::VariantKind DarwinRefKind;
860 int64_t Addend;
861 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
862 Addend)) {
863 // If we don't understand the expression, assume the best and
864 // let the fixup and relocation code deal with it.
865 return true;
866 }
867
868 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
869 ELFRefKind == AArch64MCExpr::VK_LO12 ||
870 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
871 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
872 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
873 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
874 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
876 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
877 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
878 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
879 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
880 // Note that we don't range-check the addend. It's adjusted modulo page
881 // size when converted, so there is no "out of range" condition when using
882 // @pageoff.
883 return true;
884 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
885 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
886 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
887 return Addend == 0;
888 }
889
890 return false;
891 }
892
893 template <int Scale> bool isUImm12Offset() const {
894 if (!isImm())
895 return false;
896
897 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
898 if (!MCE)
899 return isSymbolicUImm12Offset(getImm());
900
901 int64_t Val = MCE->getValue();
902 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
903 }
904
905 template <int N, int M>
906 bool isImmInRange() const {
907 if (!isImm())
908 return false;
909 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
910 if (!MCE)
911 return false;
912 int64_t Val = MCE->getValue();
913 return (Val >= N && Val <= M);
914 }
915
916 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
917 // a logical immediate can always be represented when inverted.
918 template <typename T>
919 bool isLogicalImm() const {
920 if (!isImm())
921 return false;
922 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
923 if (!MCE)
924 return false;
925
926 int64_t Val = MCE->getValue();
927 // Avoid left shift by 64 directly.
928 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
929 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
930 if ((Val & Upper) && (Val & Upper) != Upper)
931 return false;
932
933 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
934 }
935
936 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
937
938 bool isImmRange() const { return Kind == k_ImmRange; }
939
940 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
941 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
942 /// immediate that can be shifted by 'Shift'.
943 template <unsigned Width>
944 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
945 if (isShiftedImm() && Width == getShiftedImmShift())
946 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
947 return std::make_pair(CE->getValue(), Width);
948
949 if (isImm())
950 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
951 int64_t Val = CE->getValue();
952 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
953 return std::make_pair(Val >> Width, Width);
954 else
955 return std::make_pair(Val, 0u);
956 }
957
958 return {};
959 }
960
961 bool isAddSubImm() const {
962 if (!isShiftedImm() && !isImm())
963 return false;
964
965 const MCExpr *Expr;
966
967 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
968 if (isShiftedImm()) {
969 unsigned Shift = ShiftedImm.ShiftAmount;
970 Expr = ShiftedImm.Val;
971 if (Shift != 0 && Shift != 12)
972 return false;
973 } else {
974 Expr = getImm();
975 }
976
978 MCSymbolRefExpr::VariantKind DarwinRefKind;
979 int64_t Addend;
980 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
981 DarwinRefKind, Addend)) {
982 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
983 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
984 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
985 || ELFRefKind == AArch64MCExpr::VK_LO12
986 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
987 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
988 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
989 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
990 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
991 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
992 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
993 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
994 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
995 }
996
997 // If it's a constant, it should be a real immediate in range.
998 if (auto ShiftedVal = getShiftedVal<12>())
999 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1000
1001 // If it's an expression, we hope for the best and let the fixup/relocation
1002 // code deal with it.
1003 return true;
1004 }
1005
1006 bool isAddSubImmNeg() const {
1007 if (!isShiftedImm() && !isImm())
1008 return false;
1009
1010 // Otherwise it should be a real negative immediate in range.
1011 if (auto ShiftedVal = getShiftedVal<12>())
1012 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1013
1014 return false;
1015 }
1016
1017 // Signed value in the range -128 to +127. For element widths of
1018 // 16 bits or higher it may also be a signed multiple of 256 in the
1019 // range -32768 to +32512.
1020 // For element-width of 8 bits a range of -128 to 255 is accepted,
1021 // since a copy of a byte can be either signed/unsigned.
1022 template <typename T>
1023 DiagnosticPredicate isSVECpyImm() const {
1024 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1025 return DiagnosticPredicateTy::NoMatch;
1026
1027 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1028 std::is_same<int8_t, T>::value;
1029 if (auto ShiftedImm = getShiftedVal<8>())
1030 if (!(IsByte && ShiftedImm->second) &&
1031 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1032 << ShiftedImm->second))
1033 return DiagnosticPredicateTy::Match;
1034
1035 return DiagnosticPredicateTy::NearMatch;
1036 }
1037
1038 // Unsigned value in the range 0 to 255. For element widths of
1039 // 16 bits or higher it may also be a signed multiple of 256 in the
1040 // range 0 to 65280.
1041 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1042 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1043 return DiagnosticPredicateTy::NoMatch;
1044
1045 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1046 std::is_same<int8_t, T>::value;
1047 if (auto ShiftedImm = getShiftedVal<8>())
1048 if (!(IsByte && ShiftedImm->second) &&
1049 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1050 << ShiftedImm->second))
1051 return DiagnosticPredicateTy::Match;
1052
1053 return DiagnosticPredicateTy::NearMatch;
1054 }
1055
1056 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1057 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1058 return DiagnosticPredicateTy::Match;
1059 return DiagnosticPredicateTy::NoMatch;
1060 }
1061
1062 bool isCondCode() const { return Kind == k_CondCode; }
1063
1064 bool isSIMDImmType10() const {
1065 if (!isImm())
1066 return false;
1067 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1068 if (!MCE)
1069 return false;
1071 }
1072
1073 template<int N>
1074 bool isBranchTarget() const {
1075 if (!isImm())
1076 return false;
1077 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1078 if (!MCE)
1079 return true;
1080 int64_t Val = MCE->getValue();
1081 if (Val & 0x3)
1082 return false;
1083 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1084 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1085 }
1086
1087 bool
1088 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1089 if (!isImm())
1090 return false;
1091
1092 AArch64MCExpr::VariantKind ELFRefKind;
1093 MCSymbolRefExpr::VariantKind DarwinRefKind;
1094 int64_t Addend;
1095 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1096 DarwinRefKind, Addend)) {
1097 return false;
1098 }
1099 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1100 return false;
1101
1102 return llvm::is_contained(AllowedModifiers, ELFRefKind);
1103 }
1104
1105 bool isMovWSymbolG3() const {
1107 }
1108
1109 bool isMovWSymbolG2() const {
1110 return isMovWSymbol(
1115 }
1116
1117 bool isMovWSymbolG1() const {
1118 return isMovWSymbol(
1124 }
1125
1126 bool isMovWSymbolG0() const {
1127 return isMovWSymbol(
1133 }
1134
1135 template<int RegWidth, int Shift>
1136 bool isMOVZMovAlias() const {
1137 if (!isImm()) return false;
1138
1139 const MCExpr *E = getImm();
1140 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1141 uint64_t Value = CE->getValue();
1142
1143 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1144 }
1145 // Only supports the case of Shift being 0 if an expression is used as an
1146 // operand
1147 return !Shift && E;
1148 }
1149
1150 template<int RegWidth, int Shift>
1151 bool isMOVNMovAlias() const {
1152 if (!isImm()) return false;
1153
1154 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1155 if (!CE) return false;
1156 uint64_t Value = CE->getValue();
1157
1158 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1159 }
1160
1161 bool isFPImm() const {
1162 return Kind == k_FPImm &&
1163 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1164 }
1165
1166 bool isBarrier() const {
1167 return Kind == k_Barrier && !getBarriernXSModifier();
1168 }
1169 bool isBarriernXS() const {
1170 return Kind == k_Barrier && getBarriernXSModifier();
1171 }
1172 bool isSysReg() const { return Kind == k_SysReg; }
1173
1174 bool isMRSSystemRegister() const {
1175 if (!isSysReg()) return false;
1176
1177 return SysReg.MRSReg != -1U;
1178 }
1179
1180 bool isMSRSystemRegister() const {
1181 if (!isSysReg()) return false;
1182 return SysReg.MSRReg != -1U;
1183 }
1184
1185 bool isSystemPStateFieldWithImm0_1() const {
1186 if (!isSysReg()) return false;
1187 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1188 }
1189
1190 bool isSystemPStateFieldWithImm0_15() const {
1191 if (!isSysReg())
1192 return false;
1193 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1194 }
1195
1196 bool isSVCR() const {
1197 if (Kind != k_SVCR)
1198 return false;
1199 return SVCR.PStateField != -1U;
1200 }
1201
1202 bool isReg() const override {
1203 return Kind == k_Register;
1204 }
1205
1206 bool isVectorList() const { return Kind == k_VectorList; }
1207
1208 bool isScalarReg() const {
1209 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1210 }
1211
1212 bool isNeonVectorReg() const {
1213 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1214 }
1215
1216 bool isNeonVectorRegLo() const {
1217 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1218 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1219 Reg.RegNum) ||
1220 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1221 Reg.RegNum));
1222 }
1223
1224 bool isMatrix() const { return Kind == k_MatrixRegister; }
1225 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1226
1227 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1228 RegKind RK;
1229 switch (Class) {
1230 case AArch64::PPRRegClassID:
1231 case AArch64::PPR_3bRegClassID:
1232 case AArch64::PPR_p8to15RegClassID:
1233 RK = RegKind::SVEPredicateAsCounter;
1234 break;
1235 default:
1236 llvm_unreachable("Unsupport register class");
1237 }
1238
1239 return (Kind == k_Register && Reg.Kind == RK) &&
1240 AArch64MCRegisterClasses[Class].contains(getReg());
1241 }
1242
1243 template <unsigned Class> bool isSVEVectorReg() const {
1244 RegKind RK;
1245 switch (Class) {
1246 case AArch64::ZPRRegClassID:
1247 case AArch64::ZPR_3bRegClassID:
1248 case AArch64::ZPR_4bRegClassID:
1249 RK = RegKind::SVEDataVector;
1250 break;
1251 case AArch64::PPRRegClassID:
1252 case AArch64::PPR_3bRegClassID:
1253 RK = RegKind::SVEPredicateVector;
1254 break;
1255 default:
1256 llvm_unreachable("Unsupport register class");
1257 }
1258
1259 return (Kind == k_Register && Reg.Kind == RK) &&
1260 AArch64MCRegisterClasses[Class].contains(getReg());
1261 }
1262
1263 template <unsigned Class> bool isFPRasZPR() const {
1264 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1265 AArch64MCRegisterClasses[Class].contains(getReg());
1266 }
1267
1268 template <int ElementWidth, unsigned Class>
1269 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1270 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1271 return DiagnosticPredicateTy::NoMatch;
1272
1273 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1274 return DiagnosticPredicateTy::Match;
1275
1276 return DiagnosticPredicateTy::NearMatch;
1277 }
1278
1279 template <int ElementWidth, unsigned Class>
1280 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1281 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1282 return DiagnosticPredicateTy::NoMatch;
1283
1284 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1285 return DiagnosticPredicateTy::Match;
1286
1287 return DiagnosticPredicateTy::NearMatch;
1288 }
1289
1290 template <int ElementWidth, unsigned Class>
1291 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1292 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1293 return DiagnosticPredicateTy::NoMatch;
1294
1295 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1296 return DiagnosticPredicateTy::Match;
1297
1298 return DiagnosticPredicateTy::NearMatch;
1299 }
1300
1301 template <int ElementWidth, unsigned Class,
1302 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1303 bool ShiftWidthAlwaysSame>
1304 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1305 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1306 if (!VectorMatch.isMatch())
1307 return DiagnosticPredicateTy::NoMatch;
1308
1309 // Give a more specific diagnostic when the user has explicitly typed in
1310 // a shift-amount that does not match what is expected, but for which
1311 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1312 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1313 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1314 ShiftExtendTy == AArch64_AM::SXTW) &&
1315 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1316 return DiagnosticPredicateTy::NoMatch;
1317
1318 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1319 return DiagnosticPredicateTy::Match;
1320
1321 return DiagnosticPredicateTy::NearMatch;
1322 }
1323
1324 bool isGPR32as64() const {
1325 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1326 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1327 }
1328
1329 bool isGPR64as32() const {
1330 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1331 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1332 }
1333
1334 bool isGPR64x8() const {
1335 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1336 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1337 Reg.RegNum);
1338 }
1339
1340 bool isWSeqPair() const {
1341 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1342 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1343 Reg.RegNum);
1344 }
1345
1346 bool isXSeqPair() const {
1347 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1348 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1349 Reg.RegNum);
1350 }
1351
1352 bool isSyspXzrPair() const {
1353 return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1354 }
1355
1356 template<int64_t Angle, int64_t Remainder>
1357 DiagnosticPredicate isComplexRotation() const {
1358 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1359
1360 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1361 if (!CE) return DiagnosticPredicateTy::NoMatch;
1362 uint64_t Value = CE->getValue();
1363
1364 if (Value % Angle == Remainder && Value <= 270)
1365 return DiagnosticPredicateTy::Match;
1366 return DiagnosticPredicateTy::NearMatch;
1367 }
1368
1369 template <unsigned RegClassID> bool isGPR64() const {
1370 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1371 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1372 }
1373
1374 template <unsigned RegClassID, int ExtWidth>
1375 DiagnosticPredicate isGPR64WithShiftExtend() const {
1376 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1377 return DiagnosticPredicateTy::NoMatch;
1378
1379 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1380 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1381 return DiagnosticPredicateTy::Match;
1382 return DiagnosticPredicateTy::NearMatch;
1383 }
1384
1385 /// Is this a vector list with the type implicit (presumably attached to the
1386 /// instruction itself)?
1387 template <RegKind VectorKind, unsigned NumRegs>
1388 bool isImplicitlyTypedVectorList() const {
1389 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1390 VectorList.NumElements == 0 &&
1391 VectorList.RegisterKind == VectorKind;
1392 }
1393
1394 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1395 unsigned ElementWidth, unsigned Stride = 1>
1396 bool isTypedVectorList() const {
1397 if (Kind != k_VectorList)
1398 return false;
1399 if (VectorList.Count != NumRegs)
1400 return false;
1401 if (VectorList.RegisterKind != VectorKind)
1402 return false;
1403 if (VectorList.ElementWidth != ElementWidth)
1404 return false;
1405 if (VectorList.Stride != Stride)
1406 return false;
1407 return VectorList.NumElements == NumElements;
1408 }
1409
1410 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1411 unsigned ElementWidth>
1412 DiagnosticPredicate isTypedVectorListMultiple() const {
1413 bool Res =
1414 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1415 if (!Res)
1416 return DiagnosticPredicateTy::NoMatch;
1417 if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0)
1418 return DiagnosticPredicateTy::NearMatch;
1419 return DiagnosticPredicateTy::Match;
1420 }
1421
1422 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1423 unsigned ElementWidth>
1424 DiagnosticPredicate isTypedVectorListStrided() const {
1425 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1426 ElementWidth, Stride>();
1427 if (!Res)
1428 return DiagnosticPredicateTy::NoMatch;
1429 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1430 ((VectorList.RegNum >= AArch64::Z16) &&
1431 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1432 return DiagnosticPredicateTy::Match;
1433 return DiagnosticPredicateTy::NoMatch;
1434 }
1435
1436 template <int Min, int Max>
1437 DiagnosticPredicate isVectorIndex() const {
1438 if (Kind != k_VectorIndex)
1439 return DiagnosticPredicateTy::NoMatch;
1440 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1441 return DiagnosticPredicateTy::Match;
1442 return DiagnosticPredicateTy::NearMatch;
1443 }
1444
1445 bool isToken() const override { return Kind == k_Token; }
1446
1447 bool isTokenEqual(StringRef Str) const {
1448 return Kind == k_Token && getToken() == Str;
1449 }
1450 bool isSysCR() const { return Kind == k_SysCR; }
1451 bool isPrefetch() const { return Kind == k_Prefetch; }
1452 bool isPSBHint() const { return Kind == k_PSBHint; }
1453 bool isBTIHint() const { return Kind == k_BTIHint; }
1454 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1455 bool isShifter() const {
1456 if (!isShiftExtend())
1457 return false;
1458
1459 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1460 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1461 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1462 ST == AArch64_AM::MSL);
1463 }
1464
1465 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1466 if (Kind != k_FPImm)
1467 return DiagnosticPredicateTy::NoMatch;
1468
1469 if (getFPImmIsExact()) {
1470 // Lookup the immediate from table of supported immediates.
1471 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1472 assert(Desc && "Unknown enum value");
1473
1474 // Calculate its FP value.
1475 APFloat RealVal(APFloat::IEEEdouble());
1476 auto StatusOrErr =
1477 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1478 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1479 llvm_unreachable("FP immediate is not exact");
1480
1481 if (getFPImm().bitwiseIsEqual(RealVal))
1482 return DiagnosticPredicateTy::Match;
1483 }
1484
1485 return DiagnosticPredicateTy::NearMatch;
1486 }
1487
1488 template <unsigned ImmA, unsigned ImmB>
1489 DiagnosticPredicate isExactFPImm() const {
1490 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1491 if ((Res = isExactFPImm<ImmA>()))
1492 return DiagnosticPredicateTy::Match;
1493 if ((Res = isExactFPImm<ImmB>()))
1494 return DiagnosticPredicateTy::Match;
1495 return Res;
1496 }
1497
1498 bool isExtend() const {
1499 if (!isShiftExtend())
1500 return false;
1501
1502 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1503 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1504 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1505 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1506 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1507 ET == AArch64_AM::LSL) &&
1508 getShiftExtendAmount() <= 4;
1509 }
1510
1511 bool isExtend64() const {
1512 if (!isExtend())
1513 return false;
1514 // Make sure the extend expects a 32-bit source register.
1515 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1516 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1517 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1518 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1519 }
1520
1521 bool isExtendLSL64() const {
1522 if (!isExtend())
1523 return false;
1524 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1525 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1526 ET == AArch64_AM::LSL) &&
1527 getShiftExtendAmount() <= 4;
1528 }
1529
1530 template<int Width> bool isMemXExtend() const {
1531 if (!isExtend())
1532 return false;
1533 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1534 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1535 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1536 getShiftExtendAmount() == 0);
1537 }
1538
1539 template<int Width> bool isMemWExtend() const {
1540 if (!isExtend())
1541 return false;
1542 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1543 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1544 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1545 getShiftExtendAmount() == 0);
1546 }
1547
1548 template <unsigned width>
1549 bool isArithmeticShifter() const {
1550 if (!isShifter())
1551 return false;
1552
1553 // An arithmetic shifter is LSL, LSR, or ASR.
1554 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1555 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1556 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1557 }
1558
1559 template <unsigned width>
1560 bool isLogicalShifter() const {
1561 if (!isShifter())
1562 return false;
1563
1564 // A logical shifter is LSL, LSR, ASR or ROR.
1565 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1566 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1567 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1568 getShiftExtendAmount() < width;
1569 }
1570
1571 bool isMovImm32Shifter() const {
1572 if (!isShifter())
1573 return false;
1574
1575 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1576 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1577 if (ST != AArch64_AM::LSL)
1578 return false;
1579 uint64_t Val = getShiftExtendAmount();
1580 return (Val == 0 || Val == 16);
1581 }
1582
1583 bool isMovImm64Shifter() const {
1584 if (!isShifter())
1585 return false;
1586
1587 // A MOVi shifter is LSL of 0 or 16.
1588 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1589 if (ST != AArch64_AM::LSL)
1590 return false;
1591 uint64_t Val = getShiftExtendAmount();
1592 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1593 }
1594
1595 bool isLogicalVecShifter() const {
1596 if (!isShifter())
1597 return false;
1598
1599 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1600 unsigned Shift = getShiftExtendAmount();
1601 return getShiftExtendType() == AArch64_AM::LSL &&
1602 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1603 }
1604
1605 bool isLogicalVecHalfWordShifter() const {
1606 if (!isLogicalVecShifter())
1607 return false;
1608
1609 // A logical vector shifter is a left shift by 0 or 8.
1610 unsigned Shift = getShiftExtendAmount();
1611 return getShiftExtendType() == AArch64_AM::LSL &&
1612 (Shift == 0 || Shift == 8);
1613 }
1614
1615 bool isMoveVecShifter() const {
1616 if (!isShiftExtend())
1617 return false;
1618
1619 // A logical vector shifter is a left shift by 8 or 16.
1620 unsigned Shift = getShiftExtendAmount();
1621 return getShiftExtendType() == AArch64_AM::MSL &&
1622 (Shift == 8 || Shift == 16);
1623 }
1624
1625 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1626 // to LDUR/STUR when the offset is not legal for the former but is for
1627 // the latter. As such, in addition to checking for being a legal unscaled
1628 // address, also check that it is not a legal scaled address. This avoids
1629 // ambiguity in the matcher.
1630 template<int Width>
1631 bool isSImm9OffsetFB() const {
1632 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1633 }
1634
1635 bool isAdrpLabel() const {
1636 // Validation was handled during parsing, so we just verify that
1637 // something didn't go haywire.
1638 if (!isImm())
1639 return false;
1640
1641 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1642 int64_t Val = CE->getValue();
1643 int64_t Min = - (4096 * (1LL << (21 - 1)));
1644 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1645 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1646 }
1647
1648 return true;
1649 }
1650
1651 bool isAdrLabel() const {
1652 // Validation was handled during parsing, so we just verify that
1653 // something didn't go haywire.
1654 if (!isImm())
1655 return false;
1656
1657 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1658 int64_t Val = CE->getValue();
1659 int64_t Min = - (1LL << (21 - 1));
1660 int64_t Max = ((1LL << (21 - 1)) - 1);
1661 return Val >= Min && Val <= Max;
1662 }
1663
1664 return true;
1665 }
1666
1667 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1668 DiagnosticPredicate isMatrixRegOperand() const {
1669 if (!isMatrix())
1670 return DiagnosticPredicateTy::NoMatch;
1671 if (getMatrixKind() != Kind ||
1672 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1673 EltSize != getMatrixElementWidth())
1674 return DiagnosticPredicateTy::NearMatch;
1675 return DiagnosticPredicateTy::Match;
1676 }
1677
1678 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1679 // Add as immediates when possible. Null MCExpr = 0.
1680 if (!Expr)
1682 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1683 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1684 else
1686 }
1687
1688 void addRegOperands(MCInst &Inst, unsigned N) const {
1689 assert(N == 1 && "Invalid number of operands!");
1691 }
1692
1693 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1694 assert(N == 1 && "Invalid number of operands!");
1695 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1696 }
1697
1698 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1699 assert(N == 1 && "Invalid number of operands!");
1700 assert(
1701 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1702
1703 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1704 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1705 RI->getEncodingValue(getReg()));
1706
1708 }
1709
1710 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1711 assert(N == 1 && "Invalid number of operands!");
1712 assert(
1713 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1714
1715 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1716 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1717 RI->getEncodingValue(getReg()));
1718
1720 }
1721
1722 template <int Width>
1723 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1724 unsigned Base;
1725 switch (Width) {
1726 case 8: Base = AArch64::B0; break;
1727 case 16: Base = AArch64::H0; break;
1728 case 32: Base = AArch64::S0; break;
1729 case 64: Base = AArch64::D0; break;
1730 case 128: Base = AArch64::Q0; break;
1731 default:
1732 llvm_unreachable("Unsupported width");
1733 }
1734 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1735 }
1736
1737 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1738 assert(N == 1 && "Invalid number of operands!");
1739 assert(
1740 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1741 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1742 }
1743
1744 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1745 assert(N == 1 && "Invalid number of operands!");
1746 assert(
1747 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1749 }
1750
1751 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1752 assert(N == 1 && "Invalid number of operands!");
1754 }
1755
1756 enum VecListIndexType {
1757 VecListIdx_DReg = 0,
1758 VecListIdx_QReg = 1,
1759 VecListIdx_ZReg = 2,
1760 VecListIdx_PReg = 3,
1761 };
1762
1763 template <VecListIndexType RegTy, unsigned NumRegs>
1764 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1765 assert(N == 1 && "Invalid number of operands!");
1766 static const unsigned FirstRegs[][5] = {
1767 /* DReg */ { AArch64::Q0,
1768 AArch64::D0, AArch64::D0_D1,
1769 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1770 /* QReg */ { AArch64::Q0,
1771 AArch64::Q0, AArch64::Q0_Q1,
1772 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1773 /* ZReg */ { AArch64::Z0,
1774 AArch64::Z0, AArch64::Z0_Z1,
1775 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1776 /* PReg */ { AArch64::P0,
1777 AArch64::P0, AArch64::P0_P1 }
1778 };
1779
1780 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1781 " NumRegs must be <= 4 for ZRegs");
1782
1783 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1784 " NumRegs must be <= 2 for PRegs");
1785
1786 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1787 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1788 FirstRegs[(unsigned)RegTy][0]));
1789 }
1790
1791 template <unsigned NumRegs>
1792 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1793 assert(N == 1 && "Invalid number of operands!");
1794 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1795
1796 switch (NumRegs) {
1797 case 2:
1798 if (getVectorListStart() < AArch64::Z16) {
1799 assert((getVectorListStart() < AArch64::Z8) &&
1800 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1802 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1803 } else {
1804 assert((getVectorListStart() < AArch64::Z24) &&
1805 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1807 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1808 }
1809 break;
1810 case 4:
1811 if (getVectorListStart() < AArch64::Z16) {
1812 assert((getVectorListStart() < AArch64::Z4) &&
1813 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1815 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1816 } else {
1817 assert((getVectorListStart() < AArch64::Z20) &&
1818 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1820 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1821 }
1822 break;
1823 default:
1824 llvm_unreachable("Unsupported number of registers for strided vec list");
1825 }
1826 }
1827
1828 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1829 assert(N == 1 && "Invalid number of operands!");
1830 unsigned RegMask = getMatrixTileListRegMask();
1831 assert(RegMask <= 0xFF && "Invalid mask!");
1832 Inst.addOperand(MCOperand::createImm(RegMask));
1833 }
1834
1835 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1836 assert(N == 1 && "Invalid number of operands!");
1837 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1838 }
1839
1840 template <unsigned ImmIs0, unsigned ImmIs1>
1841 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1842 assert(N == 1 && "Invalid number of operands!");
1843 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1844 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1845 }
1846
1847 void addImmOperands(MCInst &Inst, unsigned N) const {
1848 assert(N == 1 && "Invalid number of operands!");
1849 // If this is a pageoff symrefexpr with an addend, adjust the addend
1850 // to be only the page-offset portion. Otherwise, just add the expr
1851 // as-is.
1852 addExpr(Inst, getImm());
1853 }
1854
1855 template <int Shift>
1856 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1857 assert(N == 2 && "Invalid number of operands!");
1858 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1859 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1860 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1861 } else if (isShiftedImm()) {
1862 addExpr(Inst, getShiftedImmVal());
1863 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1864 } else {
1865 addExpr(Inst, getImm());
1867 }
1868 }
1869
1870 template <int Shift>
1871 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1872 assert(N == 2 && "Invalid number of operands!");
1873 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1874 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1875 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1876 } else
1877 llvm_unreachable("Not a shifted negative immediate");
1878 }
1879
1880 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1881 assert(N == 1 && "Invalid number of operands!");
1883 }
1884
1885 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1886 assert(N == 1 && "Invalid number of operands!");
1887 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1888 if (!MCE)
1889 addExpr(Inst, getImm());
1890 else
1891 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1892 }
1893
1894 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1895 addImmOperands(Inst, N);
1896 }
1897
1898 template<int Scale>
1899 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1900 assert(N == 1 && "Invalid number of operands!");
1901 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1902
1903 if (!MCE) {
1904 Inst.addOperand(MCOperand::createExpr(getImm()));
1905 return;
1906 }
1907 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1908 }
1909
1910 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1911 assert(N == 1 && "Invalid number of operands!");
1912 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1914 }
1915
1916 template <int Scale>
1917 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1918 assert(N == 1 && "Invalid number of operands!");
1919 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1920 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1921 }
1922
1923 template <int Scale>
1924 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
1925 assert(N == 1 && "Invalid number of operands!");
1926 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
1927 }
1928
1929 template <typename T>
1930 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1931 assert(N == 1 && "Invalid number of operands!");
1932 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1933 std::make_unsigned_t<T> Val = MCE->getValue();
1934 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1935 Inst.addOperand(MCOperand::createImm(encoding));
1936 }
1937
1938 template <typename T>
1939 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1940 assert(N == 1 && "Invalid number of operands!");
1941 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1942 std::make_unsigned_t<T> Val = ~MCE->getValue();
1943 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1944 Inst.addOperand(MCOperand::createImm(encoding));
1945 }
1946
1947 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1948 assert(N == 1 && "Invalid number of operands!");
1949 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1951 Inst.addOperand(MCOperand::createImm(encoding));
1952 }
1953
1954 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1955 // Branch operands don't encode the low bits, so shift them off
1956 // here. If it's a label, however, just put it on directly as there's
1957 // not enough information now to do anything.
1958 assert(N == 1 && "Invalid number of operands!");
1959 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1960 if (!MCE) {
1961 addExpr(Inst, getImm());
1962 return;
1963 }
1964 assert(MCE && "Invalid constant immediate operand!");
1965 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1966 }
1967
1968 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1969 // Branch operands don't encode the low bits, so shift them off
1970 // here. If it's a label, however, just put it on directly as there's
1971 // not enough information now to do anything.
1972 assert(N == 1 && "Invalid number of operands!");
1973 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1974 if (!MCE) {
1975 addExpr(Inst, getImm());
1976 return;
1977 }
1978 assert(MCE && "Invalid constant immediate operand!");
1979 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1980 }
1981
1982 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1983 // Branch operands don't encode the low bits, so shift them off
1984 // here. If it's a label, however, just put it on directly as there's
1985 // not enough information now to do anything.
1986 assert(N == 1 && "Invalid number of operands!");
1987 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1988 if (!MCE) {
1989 addExpr(Inst, getImm());
1990 return;
1991 }
1992 assert(MCE && "Invalid constant immediate operand!");
1993 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1994 }
1995
1996 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1997 assert(N == 1 && "Invalid number of operands!");
1999 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2000 }
2001
2002 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2003 assert(N == 1 && "Invalid number of operands!");
2004 Inst.addOperand(MCOperand::createImm(getBarrier()));
2005 }
2006
2007 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2008 assert(N == 1 && "Invalid number of operands!");
2009 Inst.addOperand(MCOperand::createImm(getBarrier()));
2010 }
2011
2012 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2013 assert(N == 1 && "Invalid number of operands!");
2014
2015 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2016 }
2017
2018 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2019 assert(N == 1 && "Invalid number of operands!");
2020
2021 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2022 }
2023
2024 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2025 assert(N == 1 && "Invalid number of operands!");
2026
2027 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2028 }
2029
2030 void addSVCROperands(MCInst &Inst, unsigned N) const {
2031 assert(N == 1 && "Invalid number of operands!");
2032
2033 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2034 }
2035
2036 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2037 assert(N == 1 && "Invalid number of operands!");
2038
2039 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2040 }
2041
2042 void addSysCROperands(MCInst &Inst, unsigned N) const {
2043 assert(N == 1 && "Invalid number of operands!");
2044 Inst.addOperand(MCOperand::createImm(getSysCR()));
2045 }
2046
2047 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2048 assert(N == 1 && "Invalid number of operands!");
2049 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2050 }
2051
2052 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2053 assert(N == 1 && "Invalid number of operands!");
2054 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2055 }
2056
2057 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2058 assert(N == 1 && "Invalid number of operands!");
2059 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2060 }
2061
2062 void addShifterOperands(MCInst &Inst, unsigned N) const {
2063 assert(N == 1 && "Invalid number of operands!");
2064 unsigned Imm =
2065 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2067 }
2068
2069 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2070 assert(N == 1 && "Invalid number of operands!");
2071
2072 if (!isScalarReg())
2073 return;
2074
2075 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2076 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2078 if (Reg != AArch64::XZR)
2079 llvm_unreachable("wrong register");
2080
2081 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2082 }
2083
2084 void addExtendOperands(MCInst &Inst, unsigned N) const {
2085 assert(N == 1 && "Invalid number of operands!");
2086 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2087 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2088 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2090 }
2091
2092 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2093 assert(N == 1 && "Invalid number of operands!");
2094 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2095 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2096 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2098 }
2099
2100 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2101 assert(N == 2 && "Invalid number of operands!");
2102 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2103 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2104 Inst.addOperand(MCOperand::createImm(IsSigned));
2105 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2106 }
2107
2108 // For 8-bit load/store instructions with a register offset, both the
2109 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2110 // they're disambiguated by whether the shift was explicit or implicit rather
2111 // than its size.
2112 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2113 assert(N == 2 && "Invalid number of operands!");
2114 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2115 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2116 Inst.addOperand(MCOperand::createImm(IsSigned));
2117 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2118 }
2119
2120 template<int Shift>
2121 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2122 assert(N == 1 && "Invalid number of operands!");
2123
2124 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2125 if (CE) {
2126 uint64_t Value = CE->getValue();
2127 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2128 } else {
2129 addExpr(Inst, getImm());
2130 }
2131 }
2132
2133 template<int Shift>
2134 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2135 assert(N == 1 && "Invalid number of operands!");
2136
2137 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2138 uint64_t Value = CE->getValue();
2139 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2140 }
2141
2142 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2143 assert(N == 1 && "Invalid number of operands!");
2144 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2145 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2146 }
2147
2148 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2149 assert(N == 1 && "Invalid number of operands!");
2150 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2151 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2152 }
2153
2154 void print(raw_ostream &OS) const override;
2155
2156 static std::unique_ptr<AArch64Operand>
2157 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2158 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2159 Op->Tok.Data = Str.data();
2160 Op->Tok.Length = Str.size();
2161 Op->Tok.IsSuffix = IsSuffix;
2162 Op->StartLoc = S;
2163 Op->EndLoc = S;
2164 return Op;
2165 }
2166
2167 static std::unique_ptr<AArch64Operand>
2168 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2169 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2171 unsigned ShiftAmount = 0,
2172 unsigned HasExplicitAmount = false) {
2173 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2174 Op->Reg.RegNum = RegNum;
2175 Op->Reg.Kind = Kind;
2176 Op->Reg.ElementWidth = 0;
2177 Op->Reg.EqualityTy = EqTy;
2178 Op->Reg.ShiftExtend.Type = ExtTy;
2179 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2180 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2181 Op->StartLoc = S;
2182 Op->EndLoc = E;
2183 return Op;
2184 }
2185
2186 static std::unique_ptr<AArch64Operand>
2187 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2188 SMLoc S, SMLoc E, MCContext &Ctx,
2190 unsigned ShiftAmount = 0,
2191 unsigned HasExplicitAmount = false) {
2192 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2193 Kind == RegKind::SVEPredicateVector ||
2194 Kind == RegKind::SVEPredicateAsCounter) &&
2195 "Invalid vector kind");
2196 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2197 HasExplicitAmount);
2198 Op->Reg.ElementWidth = ElementWidth;
2199 return Op;
2200 }
2201
2202 static std::unique_ptr<AArch64Operand>
2203 CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2204 unsigned NumElements, unsigned ElementWidth,
2205 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2206 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2207 Op->VectorList.RegNum = RegNum;
2208 Op->VectorList.Count = Count;
2209 Op->VectorList.Stride = Stride;
2210 Op->VectorList.NumElements = NumElements;
2211 Op->VectorList.ElementWidth = ElementWidth;
2212 Op->VectorList.RegisterKind = RegisterKind;
2213 Op->StartLoc = S;
2214 Op->EndLoc = E;
2215 return Op;
2216 }
2217
2218 static std::unique_ptr<AArch64Operand>
2219 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2220 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2221 Op->VectorIndex.Val = Idx;
2222 Op->StartLoc = S;
2223 Op->EndLoc = E;
2224 return Op;
2225 }
2226
2227 static std::unique_ptr<AArch64Operand>
2228 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2229 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2230 Op->MatrixTileList.RegMask = RegMask;
2231 Op->StartLoc = S;
2232 Op->EndLoc = E;
2233 return Op;
2234 }
2235
2236 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2237 const unsigned ElementWidth) {
2238 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2239 RegMap = {
2240 {{0, AArch64::ZAB0},
2241 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2242 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2243 {{8, AArch64::ZAB0},
2244 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2245 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2246 {{16, AArch64::ZAH0},
2247 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2248 {{16, AArch64::ZAH1},
2249 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2250 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2251 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2252 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2253 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2254 };
2255
2256 if (ElementWidth == 64)
2257 OutRegs.insert(Reg);
2258 else {
2259 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2260 assert(!Regs.empty() && "Invalid tile or element width!");
2261 for (auto OutReg : Regs)
2262 OutRegs.insert(OutReg);
2263 }
2264 }
2265
2266 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2267 SMLoc E, MCContext &Ctx) {
2268 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2269 Op->Imm.Val = Val;
2270 Op->StartLoc = S;
2271 Op->EndLoc = E;
2272 return Op;
2273 }
2274
2275 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2276 unsigned ShiftAmount,
2277 SMLoc S, SMLoc E,
2278 MCContext &Ctx) {
2279 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2280 Op->ShiftedImm .Val = Val;
2281 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2282 Op->StartLoc = S;
2283 Op->EndLoc = E;
2284 return Op;
2285 }
2286
2287 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2288 unsigned Last, SMLoc S,
2289 SMLoc E,
2290 MCContext &Ctx) {
2291 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2292 Op->ImmRange.First = First;
2293 Op->ImmRange.Last = Last;
2294 Op->EndLoc = E;
2295 return Op;
2296 }
2297
2298 static std::unique_ptr<AArch64Operand>
2299 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2300 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2301 Op->CondCode.Code = Code;
2302 Op->StartLoc = S;
2303 Op->EndLoc = E;
2304 return Op;
2305 }
2306
2307 static std::unique_ptr<AArch64Operand>
2308 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2309 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2310 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2311 Op->FPImm.IsExact = IsExact;
2312 Op->StartLoc = S;
2313 Op->EndLoc = S;
2314 return Op;
2315 }
2316
2317 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2318 StringRef Str,
2319 SMLoc S,
2320 MCContext &Ctx,
2321 bool HasnXSModifier) {
2322 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2323 Op->Barrier.Val = Val;
2324 Op->Barrier.Data = Str.data();
2325 Op->Barrier.Length = Str.size();
2326 Op->Barrier.HasnXSModifier = HasnXSModifier;
2327 Op->StartLoc = S;
2328 Op->EndLoc = S;
2329 return Op;
2330 }
2331
2332 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2333 uint32_t MRSReg,
2334 uint32_t MSRReg,
2335 uint32_t PStateField,
2336 MCContext &Ctx) {
2337 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2338 Op->SysReg.Data = Str.data();
2339 Op->SysReg.Length = Str.size();
2340 Op->SysReg.MRSReg = MRSReg;
2341 Op->SysReg.MSRReg = MSRReg;
2342 Op->SysReg.PStateField = PStateField;
2343 Op->StartLoc = S;
2344 Op->EndLoc = S;
2345 return Op;
2346 }
2347
2348 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2349 SMLoc E, MCContext &Ctx) {
2350 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2351 Op->SysCRImm.Val = Val;
2352 Op->StartLoc = S;
2353 Op->EndLoc = E;
2354 return Op;
2355 }
2356
2357 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2358 StringRef Str,
2359 SMLoc S,
2360 MCContext &Ctx) {
2361 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2362 Op->Prefetch.Val = Val;
2363 Op->Barrier.Data = Str.data();
2364 Op->Barrier.Length = Str.size();
2365 Op->StartLoc = S;
2366 Op->EndLoc = S;
2367 return Op;
2368 }
2369
2370 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2371 StringRef Str,
2372 SMLoc S,
2373 MCContext &Ctx) {
2374 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2375 Op->PSBHint.Val = Val;
2376 Op->PSBHint.Data = Str.data();
2377 Op->PSBHint.Length = Str.size();
2378 Op->StartLoc = S;
2379 Op->EndLoc = S;
2380 return Op;
2381 }
2382
2383 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2384 StringRef Str,
2385 SMLoc S,
2386 MCContext &Ctx) {
2387 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2388 Op->BTIHint.Val = Val | 32;
2389 Op->BTIHint.Data = Str.data();
2390 Op->BTIHint.Length = Str.size();
2391 Op->StartLoc = S;
2392 Op->EndLoc = S;
2393 return Op;
2394 }
2395
2396 static std::unique_ptr<AArch64Operand>
2397 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2398 SMLoc S, SMLoc E, MCContext &Ctx) {
2399 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2400 Op->MatrixReg.RegNum = RegNum;
2401 Op->MatrixReg.ElementWidth = ElementWidth;
2402 Op->MatrixReg.Kind = Kind;
2403 Op->StartLoc = S;
2404 Op->EndLoc = E;
2405 return Op;
2406 }
2407
2408 static std::unique_ptr<AArch64Operand>
2409 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2410 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2411 Op->SVCR.PStateField = PStateField;
2412 Op->SVCR.Data = Str.data();
2413 Op->SVCR.Length = Str.size();
2414 Op->StartLoc = S;
2415 Op->EndLoc = S;
2416 return Op;
2417 }
2418
2419 static std::unique_ptr<AArch64Operand>
2420 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2421 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2422 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2423 Op->ShiftExtend.Type = ShOp;
2424 Op->ShiftExtend.Amount = Val;
2425 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2426 Op->StartLoc = S;
2427 Op->EndLoc = E;
2428 return Op;
2429 }
2430};
2431
2432} // end anonymous namespace.
2433
2434void AArch64Operand::print(raw_ostream &OS) const {
2435 switch (Kind) {
2436 case k_FPImm:
2437 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2438 if (!getFPImmIsExact())
2439 OS << " (inexact)";
2440 OS << ">";
2441 break;
2442 case k_Barrier: {
2443 StringRef Name = getBarrierName();
2444 if (!Name.empty())
2445 OS << "<barrier " << Name << ">";
2446 else
2447 OS << "<barrier invalid #" << getBarrier() << ">";
2448 break;
2449 }
2450 case k_Immediate:
2451 OS << *getImm();
2452 break;
2453 case k_ShiftedImm: {
2454 unsigned Shift = getShiftedImmShift();
2455 OS << "<shiftedimm ";
2456 OS << *getShiftedImmVal();
2457 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2458 break;
2459 }
2460 case k_ImmRange: {
2461 OS << "<immrange ";
2462 OS << getFirstImmVal();
2463 OS << ":" << getLastImmVal() << ">";
2464 break;
2465 }
2466 case k_CondCode:
2467 OS << "<condcode " << getCondCode() << ">";
2468 break;
2469 case k_VectorList: {
2470 OS << "<vectorlist ";
2471 unsigned Reg = getVectorListStart();
2472 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2473 OS << Reg + i * getVectorListStride() << " ";
2474 OS << ">";
2475 break;
2476 }
2477 case k_VectorIndex:
2478 OS << "<vectorindex " << getVectorIndex() << ">";
2479 break;
2480 case k_SysReg:
2481 OS << "<sysreg: " << getSysReg() << '>';
2482 break;
2483 case k_Token:
2484 OS << "'" << getToken() << "'";
2485 break;
2486 case k_SysCR:
2487 OS << "c" << getSysCR();
2488 break;
2489 case k_Prefetch: {
2490 StringRef Name = getPrefetchName();
2491 if (!Name.empty())
2492 OS << "<prfop " << Name << ">";
2493 else
2494 OS << "<prfop invalid #" << getPrefetch() << ">";
2495 break;
2496 }
2497 case k_PSBHint:
2498 OS << getPSBHintName();
2499 break;
2500 case k_BTIHint:
2501 OS << getBTIHintName();
2502 break;
2503 case k_MatrixRegister:
2504 OS << "<matrix " << getMatrixReg() << ">";
2505 break;
2506 case k_MatrixTileList: {
2507 OS << "<matrixlist ";
2508 unsigned RegMask = getMatrixTileListRegMask();
2509 unsigned MaxBits = 8;
2510 for (unsigned I = MaxBits; I > 0; --I)
2511 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2512 OS << '>';
2513 break;
2514 }
2515 case k_SVCR: {
2516 OS << getSVCR();
2517 break;
2518 }
2519 case k_Register:
2520 OS << "<register " << getReg() << ">";
2521 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2522 break;
2523 [[fallthrough]];
2524 case k_ShiftExtend:
2525 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2526 << getShiftExtendAmount();
2527 if (!hasShiftExtendAmount())
2528 OS << "<imp>";
2529 OS << '>';
2530 break;
2531 }
2532}
2533
2534/// @name Auto-generated Match Functions
2535/// {
2536
2538
2539/// }
2540
2542 return StringSwitch<unsigned>(Name.lower())
2543 .Case("v0", AArch64::Q0)
2544 .Case("v1", AArch64::Q1)
2545 .Case("v2", AArch64::Q2)
2546 .Case("v3", AArch64::Q3)
2547 .Case("v4", AArch64::Q4)
2548 .Case("v5", AArch64::Q5)
2549 .Case("v6", AArch64::Q6)
2550 .Case("v7", AArch64::Q7)
2551 .Case("v8", AArch64::Q8)
2552 .Case("v9", AArch64::Q9)
2553 .Case("v10", AArch64::Q10)
2554 .Case("v11", AArch64::Q11)
2555 .Case("v12", AArch64::Q12)
2556 .Case("v13", AArch64::Q13)
2557 .Case("v14", AArch64::Q14)
2558 .Case("v15", AArch64::Q15)
2559 .Case("v16", AArch64::Q16)
2560 .Case("v17", AArch64::Q17)
2561 .Case("v18", AArch64::Q18)
2562 .Case("v19", AArch64::Q19)
2563 .Case("v20", AArch64::Q20)
2564 .Case("v21", AArch64::Q21)
2565 .Case("v22", AArch64::Q22)
2566 .Case("v23", AArch64::Q23)
2567 .Case("v24", AArch64::Q24)
2568 .Case("v25", AArch64::Q25)
2569 .Case("v26", AArch64::Q26)
2570 .Case("v27", AArch64::Q27)
2571 .Case("v28", AArch64::Q28)
2572 .Case("v29", AArch64::Q29)
2573 .Case("v30", AArch64::Q30)
2574 .Case("v31", AArch64::Q31)
2575 .Default(0);
2576}
2577
2578/// Returns an optional pair of (#elements, element-width) if Suffix
2579/// is a valid vector kind. Where the number of elements in a vector
2580/// or the vector width is implicit or explicitly unknown (but still a
2581/// valid suffix kind), 0 is used.
2582static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2583 RegKind VectorKind) {
2584 std::pair<int, int> Res = {-1, -1};
2585
2586 switch (VectorKind) {
2587 case RegKind::NeonVector:
2588 Res =
2590 .Case("", {0, 0})
2591 .Case(".1d", {1, 64})
2592 .Case(".1q", {1, 128})
2593 // '.2h' needed for fp16 scalar pairwise reductions
2594 .Case(".2h", {2, 16})
2595 .Case(".2s", {2, 32})
2596 .Case(".2d", {2, 64})
2597 // '.4b' is another special case for the ARMv8.2a dot product
2598 // operand
2599 .Case(".4b", {4, 8})
2600 .Case(".4h", {4, 16})
2601 .Case(".4s", {4, 32})
2602 .Case(".8b", {8, 8})
2603 .Case(".8h", {8, 16})
2604 .Case(".16b", {16, 8})
2605 // Accept the width neutral ones, too, for verbose syntax. If those
2606 // aren't used in the right places, the token operand won't match so
2607 // all will work out.
2608 .Case(".b", {0, 8})
2609 .Case(".h", {0, 16})
2610 .Case(".s", {0, 32})
2611 .Case(".d", {0, 64})
2612 .Default({-1, -1});
2613 break;
2614 case RegKind::SVEPredicateAsCounter:
2615 case RegKind::SVEPredicateVector:
2616 case RegKind::SVEDataVector:
2617 case RegKind::Matrix:
2619 .Case("", {0, 0})
2620 .Case(".b", {0, 8})
2621 .Case(".h", {0, 16})
2622 .Case(".s", {0, 32})
2623 .Case(".d", {0, 64})
2624 .Case(".q", {0, 128})
2625 .Default({-1, -1});
2626 break;
2627 default:
2628 llvm_unreachable("Unsupported RegKind");
2629 }
2630
2631 if (Res == std::make_pair(-1, -1))
2632 return std::nullopt;
2633
2634 return std::optional<std::pair<int, int>>(Res);
2635}
2636
2637static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2638 return parseVectorKind(Suffix, VectorKind).has_value();
2639}
2640
2642 return StringSwitch<unsigned>(Name.lower())
2643 .Case("z0", AArch64::Z0)
2644 .Case("z1", AArch64::Z1)
2645 .Case("z2", AArch64::Z2)
2646 .Case("z3", AArch64::Z3)
2647 .Case("z4", AArch64::Z4)
2648 .Case("z5", AArch64::Z5)
2649 .Case("z6", AArch64::Z6)
2650 .Case("z7", AArch64::Z7)
2651 .Case("z8", AArch64::Z8)
2652 .Case("z9", AArch64::Z9)
2653 .Case("z10", AArch64::Z10)
2654 .Case("z11", AArch64::Z11)
2655 .Case("z12", AArch64::Z12)
2656 .Case("z13", AArch64::Z13)
2657 .Case("z14", AArch64::Z14)
2658 .Case("z15", AArch64::Z15)
2659 .Case("z16", AArch64::Z16)
2660 .Case("z17", AArch64::Z17)
2661 .Case("z18", AArch64::Z18)
2662 .Case("z19", AArch64::Z19)
2663 .Case("z20", AArch64::Z20)
2664 .Case("z21", AArch64::Z21)
2665 .Case("z22", AArch64::Z22)
2666 .Case("z23", AArch64::Z23)
2667 .Case("z24", AArch64::Z24)
2668 .Case("z25", AArch64::Z25)
2669 .Case("z26", AArch64::Z26)
2670 .Case("z27", AArch64::Z27)
2671 .Case("z28", AArch64::Z28)
2672 .Case("z29", AArch64::Z29)
2673 .Case("z30", AArch64::Z30)
2674 .Case("z31", AArch64::Z31)
2675 .Default(0);
2676}
2677
2679 return StringSwitch<unsigned>(Name.lower())
2680 .Case("p0", AArch64::P0)
2681 .Case("p1", AArch64::P1)
2682 .Case("p2", AArch64::P2)
2683 .Case("p3", AArch64::P3)
2684 .Case("p4", AArch64::P4)
2685 .Case("p5", AArch64::P5)
2686 .Case("p6", AArch64::P6)
2687 .Case("p7", AArch64::P7)
2688 .Case("p8", AArch64::P8)
2689 .Case("p9", AArch64::P9)
2690 .Case("p10", AArch64::P10)
2691 .Case("p11", AArch64::P11)
2692 .Case("p12", AArch64::P12)
2693 .Case("p13", AArch64::P13)
2694 .Case("p14", AArch64::P14)
2695 .Case("p15", AArch64::P15)
2696 .Default(0);
2697}
2698
2700 return StringSwitch<unsigned>(Name.lower())
2701 .Case("pn0", AArch64::P0)
2702 .Case("pn1", AArch64::P1)
2703 .Case("pn2", AArch64::P2)
2704 .Case("pn3", AArch64::P3)
2705 .Case("pn4", AArch64::P4)
2706 .Case("pn5", AArch64::P5)
2707 .Case("pn6", AArch64::P6)
2708 .Case("pn7", AArch64::P7)
2709 .Case("pn8", AArch64::P8)
2710 .Case("pn9", AArch64::P9)
2711 .Case("pn10", AArch64::P10)
2712 .Case("pn11", AArch64::P11)
2713 .Case("pn12", AArch64::P12)
2714 .Case("pn13", AArch64::P13)
2715 .Case("pn14", AArch64::P14)
2716 .Case("pn15", AArch64::P15)
2717 .Default(0);
2718}
2719
2721 return StringSwitch<unsigned>(Name.lower())
2722 .Case("za0.d", AArch64::ZAD0)
2723 .Case("za1.d", AArch64::ZAD1)
2724 .Case("za2.d", AArch64::ZAD2)
2725 .Case("za3.d", AArch64::ZAD3)
2726 .Case("za4.d", AArch64::ZAD4)
2727 .Case("za5.d", AArch64::ZAD5)
2728 .Case("za6.d", AArch64::ZAD6)
2729 .Case("za7.d", AArch64::ZAD7)
2730 .Case("za0.s", AArch64::ZAS0)
2731 .Case("za1.s", AArch64::ZAS1)
2732 .Case("za2.s", AArch64::ZAS2)
2733 .Case("za3.s", AArch64::ZAS3)
2734 .Case("za0.h", AArch64::ZAH0)
2735 .Case("za1.h", AArch64::ZAH1)
2736 .Case("za0.b", AArch64::ZAB0)
2737 .Default(0);
2738}
2739
2741 return StringSwitch<unsigned>(Name.lower())
2742 .Case("za", AArch64::ZA)
2743 .Case("za0.q", AArch64::ZAQ0)
2744 .Case("za1.q", AArch64::ZAQ1)
2745 .Case("za2.q", AArch64::ZAQ2)
2746 .Case("za3.q", AArch64::ZAQ3)
2747 .Case("za4.q", AArch64::ZAQ4)
2748 .Case("za5.q", AArch64::ZAQ5)
2749 .Case("za6.q", AArch64::ZAQ6)
2750 .Case("za7.q", AArch64::ZAQ7)
2751 .Case("za8.q", AArch64::ZAQ8)
2752 .Case("za9.q", AArch64::ZAQ9)
2753 .Case("za10.q", AArch64::ZAQ10)
2754 .Case("za11.q", AArch64::ZAQ11)
2755 .Case("za12.q", AArch64::ZAQ12)
2756 .Case("za13.q", AArch64::ZAQ13)
2757 .Case("za14.q", AArch64::ZAQ14)
2758 .Case("za15.q", AArch64::ZAQ15)
2759 .Case("za0.d", AArch64::ZAD0)
2760 .Case("za1.d", AArch64::ZAD1)
2761 .Case("za2.d", AArch64::ZAD2)
2762 .Case("za3.d", AArch64::ZAD3)
2763 .Case("za4.d", AArch64::ZAD4)
2764 .Case("za5.d", AArch64::ZAD5)
2765 .Case("za6.d", AArch64::ZAD6)
2766 .Case("za7.d", AArch64::ZAD7)
2767 .Case("za0.s", AArch64::ZAS0)
2768 .Case("za1.s", AArch64::ZAS1)
2769 .Case("za2.s", AArch64::ZAS2)
2770 .Case("za3.s", AArch64::ZAS3)
2771 .Case("za0.h", AArch64::ZAH0)
2772 .Case("za1.h", AArch64::ZAH1)
2773 .Case("za0.b", AArch64::ZAB0)
2774 .Case("za0h.q", AArch64::ZAQ0)
2775 .Case("za1h.q", AArch64::ZAQ1)
2776 .Case("za2h.q", AArch64::ZAQ2)
2777 .Case("za3h.q", AArch64::ZAQ3)
2778 .Case("za4h.q", AArch64::ZAQ4)
2779 .Case("za5h.q", AArch64::ZAQ5)
2780 .Case("za6h.q", AArch64::ZAQ6)
2781 .Case("za7h.q", AArch64::ZAQ7)
2782 .Case("za8h.q", AArch64::ZAQ8)
2783 .Case("za9h.q", AArch64::ZAQ9)
2784 .Case("za10h.q", AArch64::ZAQ10)
2785 .Case("za11h.q", AArch64::ZAQ11)
2786 .Case("za12h.q", AArch64::ZAQ12)
2787 .Case("za13h.q", AArch64::ZAQ13)
2788 .Case("za14h.q", AArch64::ZAQ14)
2789 .Case("za15h.q", AArch64::ZAQ15)
2790 .Case("za0h.d", AArch64::ZAD0)
2791 .Case("za1h.d", AArch64::ZAD1)
2792 .Case("za2h.d", AArch64::ZAD2)
2793 .Case("za3h.d", AArch64::ZAD3)
2794 .Case("za4h.d", AArch64::ZAD4)
2795 .Case("za5h.d", AArch64::ZAD5)
2796 .Case("za6h.d", AArch64::ZAD6)
2797 .Case("za7h.d", AArch64::ZAD7)
2798 .Case("za0h.s", AArch64::ZAS0)
2799 .Case("za1h.s", AArch64::ZAS1)
2800 .Case("za2h.s", AArch64::ZAS2)
2801 .Case("za3h.s", AArch64::ZAS3)
2802 .Case("za0h.h", AArch64::ZAH0)
2803 .Case("za1h.h", AArch64::ZAH1)
2804 .Case("za0h.b", AArch64::ZAB0)
2805 .Case("za0v.q", AArch64::ZAQ0)
2806 .Case("za1v.q", AArch64::ZAQ1)
2807 .Case("za2v.q", AArch64::ZAQ2)
2808 .Case("za3v.q", AArch64::ZAQ3)
2809 .Case("za4v.q", AArch64::ZAQ4)
2810 .Case("za5v.q", AArch64::ZAQ5)
2811 .Case("za6v.q", AArch64::ZAQ6)
2812 .Case("za7v.q", AArch64::ZAQ7)
2813 .Case("za8v.q", AArch64::ZAQ8)
2814 .Case("za9v.q", AArch64::ZAQ9)
2815 .Case("za10v.q", AArch64::ZAQ10)
2816 .Case("za11v.q", AArch64::ZAQ11)
2817 .Case("za12v.q", AArch64::ZAQ12)
2818 .Case("za13v.q", AArch64::ZAQ13)
2819 .Case("za14v.q", AArch64::ZAQ14)
2820 .Case("za15v.q", AArch64::ZAQ15)
2821 .Case("za0v.d", AArch64::ZAD0)
2822 .Case("za1v.d", AArch64::ZAD1)
2823 .Case("za2v.d", AArch64::ZAD2)
2824 .Case("za3v.d", AArch64::ZAD3)
2825 .Case("za4v.d", AArch64::ZAD4)
2826 .Case("za5v.d", AArch64::ZAD5)
2827 .Case("za6v.d", AArch64::ZAD6)
2828 .Case("za7v.d", AArch64::ZAD7)
2829 .Case("za0v.s", AArch64::ZAS0)
2830 .Case("za1v.s", AArch64::ZAS1)
2831 .Case("za2v.s", AArch64::ZAS2)
2832 .Case("za3v.s", AArch64::ZAS3)
2833 .Case("za0v.h", AArch64::ZAH0)
2834 .Case("za1v.h", AArch64::ZAH1)
2835 .Case("za0v.b", AArch64::ZAB0)
2836 .Default(0);
2837}
2838
2839bool AArch64AsmParser::parseRegister(MCRegister &RegNo, SMLoc &StartLoc,
2840 SMLoc &EndLoc) {
2841 return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
2842}
2843
2844OperandMatchResultTy AArch64AsmParser::tryParseRegister(MCRegister &RegNo,
2845 SMLoc &StartLoc,
2846 SMLoc &EndLoc) {
2847 StartLoc = getLoc();
2848 auto Res = tryParseScalarRegister(RegNo);
2849 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2850 return Res;
2851}
2852
2853// Matches a register name or register alias previously defined by '.req'
2854unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2855 RegKind Kind) {
2856 unsigned RegNum = 0;
2857 if ((RegNum = matchSVEDataVectorRegName(Name)))
2858 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2859
2860 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2861 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2862
2864 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
2865
2866 if ((RegNum = MatchNeonVectorRegName(Name)))
2867 return Kind == RegKind::NeonVector ? RegNum : 0;
2868
2869 if ((RegNum = matchMatrixRegName(Name)))
2870 return Kind == RegKind::Matrix ? RegNum : 0;
2871
2872 if (Name.equals_insensitive("zt0"))
2873 return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0;
2874
2875 // The parsed register must be of RegKind Scalar
2876 if ((RegNum = MatchRegisterName(Name)))
2877 return (Kind == RegKind::Scalar) ? RegNum : 0;
2878
2879 if (!RegNum) {
2880 // Handle a few common aliases of registers.
2881 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2882 .Case("fp", AArch64::FP)
2883 .Case("lr", AArch64::LR)
2884 .Case("x31", AArch64::XZR)
2885 .Case("w31", AArch64::WZR)
2886 .Default(0))
2887 return Kind == RegKind::Scalar ? RegNum : 0;
2888
2889 // Check for aliases registered via .req. Canonicalize to lower case.
2890 // That's more consistent since register names are case insensitive, and
2891 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2892 auto Entry = RegisterReqs.find(Name.lower());
2893 if (Entry == RegisterReqs.end())
2894 return 0;
2895
2896 // set RegNum if the match is the right kind of register
2897 if (Kind == Entry->getValue().first)
2898 RegNum = Entry->getValue().second;
2899 }
2900 return RegNum;
2901}
2902
2903unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
2904 switch (K) {
2905 case RegKind::Scalar:
2906 case RegKind::NeonVector:
2907 case RegKind::SVEDataVector:
2908 return 32;
2909 case RegKind::Matrix:
2910 case RegKind::SVEPredicateVector:
2911 case RegKind::SVEPredicateAsCounter:
2912 return 16;
2913 case RegKind::LookupTable:
2914 return 1;
2915 }
2916 llvm_unreachable("Unsupported RegKind");
2917}
2918
2919/// tryParseScalarRegister - Try to parse a register name. The token must be an
2920/// Identifier when called, and if it is a register name the token is eaten and
2921/// the register is added to the operand list.
2923AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
2924 const AsmToken &Tok = getTok();
2925 if (Tok.isNot(AsmToken::Identifier))
2926 return MatchOperand_NoMatch;
2927
2928 std::string lowerCase = Tok.getString().lower();
2929 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2930 if (Reg == 0)
2931 return MatchOperand_NoMatch;
2932
2933 RegNum = Reg;
2934 Lex(); // Eat identifier token.
2935 return MatchOperand_Success;
2936}
2937
2938/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2940AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2941 SMLoc S = getLoc();
2942
2943 if (getTok().isNot(AsmToken::Identifier)) {
2944 Error(S, "Expected cN operand where 0 <= N <= 15");
2946 }
2947
2948 StringRef Tok = getTok().getIdentifier();
2949 if (Tok[0] != 'c' && Tok[0] != 'C') {
2950 Error(S, "Expected cN operand where 0 <= N <= 15");
2952 }
2953
2954 uint32_t CRNum;
2955 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2956 if (BadNum || CRNum > 15) {
2957 Error(S, "Expected cN operand where 0 <= N <= 15");
2959 }
2960
2961 Lex(); // Eat identifier token.
2962 Operands.push_back(
2963 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2964 return MatchOperand_Success;
2965}
2966
2967// Either an identifier for named values or a 6-bit immediate.
2969AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
2970 SMLoc S = getLoc();
2971 const AsmToken &Tok = getTok();
2972
2973 unsigned MaxVal = 63;
2974
2975 // Immediate case, with optional leading hash:
2976 if (parseOptionalToken(AsmToken::Hash) ||
2977 Tok.is(AsmToken::Integer)) {
2978 const MCExpr *ImmVal;
2979 if (getParser().parseExpression(ImmVal))
2981
2982 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2983 if (!MCE) {
2984 TokError("immediate value expected for prefetch operand");
2986 }
2987 unsigned prfop = MCE->getValue();
2988 if (prfop > MaxVal) {
2989 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2990 "] expected");
2992 }
2993
2994 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
2995 Operands.push_back(AArch64Operand::CreatePrefetch(
2996 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
2997 return MatchOperand_Success;
2998 }
2999
3000 if (Tok.isNot(AsmToken::Identifier)) {
3001 TokError("prefetch hint expected");
3003 }
3004
3005 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3006 if (!RPRFM) {
3007 TokError("prefetch hint expected");
3009 }
3010
3011 Operands.push_back(AArch64Operand::CreatePrefetch(
3012 RPRFM->Encoding, Tok.getString(), S, getContext()));
3013 Lex(); // Eat identifier token.
3014 return MatchOperand_Success;
3015}
3016
3017/// tryParsePrefetch - Try to parse a prefetch operand.
3018template <bool IsSVEPrefetch>
3020AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3021 SMLoc S = getLoc();
3022 const AsmToken &Tok = getTok();
3023
3024 auto LookupByName = [](StringRef N) {
3025 if (IsSVEPrefetch) {
3026 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3027 return std::optional<unsigned>(Res->Encoding);
3028 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3029 return std::optional<unsigned>(Res->Encoding);
3030 return std::optional<unsigned>();
3031 };
3032
3033 auto LookupByEncoding = [](unsigned E) {
3034 if (IsSVEPrefetch) {
3035 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3036 return std::optional<StringRef>(Res->Name);
3037 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3038 return std::optional<StringRef>(Res->Name);
3039 return std::optional<StringRef>();
3040 };
3041 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3042
3043 // Either an identifier for named values or a 5-bit immediate.
3044 // Eat optional hash.
3045 if (parseOptionalToken(AsmToken::Hash) ||
3046 Tok.is(AsmToken::Integer)) {
3047 const MCExpr *ImmVal;
3048 if (getParser().parseExpression(ImmVal))
3050
3051 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3052 if (!MCE) {
3053 TokError("immediate value expected for prefetch operand");
3055 }
3056 unsigned prfop = MCE->getValue();
3057 if (prfop > MaxVal) {
3058 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3059 "] expected");
3061 }
3062
3063 auto PRFM = LookupByEncoding(MCE->getValue());
3064 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3065 S, getContext()));
3066 return MatchOperand_Success;
3067 }
3068
3069 if (Tok.isNot(AsmToken::Identifier)) {
3070 TokError("prefetch hint expected");
3072 }
3073
3074 auto PRFM = LookupByName(Tok.getString());
3075 if (!PRFM) {
3076 TokError("prefetch hint expected");
3078 }
3079
3080 Operands.push_back(AArch64Operand::CreatePrefetch(
3081 *PRFM, Tok.getString(), S, getContext()));
3082 Lex(); // Eat identifier token.
3083 return MatchOperand_Success;
3084}
3085
3086/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3088AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3089 SMLoc S = getLoc();
3090 const AsmToken &Tok = getTok();
3091 if (Tok.isNot(AsmToken::Identifier)) {
3092 TokError("invalid operand for instruction");
3094 }
3095
3096 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3097 if (!PSB) {
3098 TokError("invalid operand for instruction");
3100 }
3101
3102 Operands.push_back(AArch64Operand::CreatePSBHint(
3103 PSB->Encoding, Tok.getString(), S, getContext()));
3104 Lex(); // Eat identifier token.
3105 return MatchOperand_Success;
3106}
3107
3109AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3110 SMLoc StartLoc = getLoc();
3111
3112 MCRegister RegNum;
3113
3114 // The case where xzr, xzr is not present is handled by an InstAlias.
3115
3116 auto RegTok = getTok(); // in case we need to backtrack
3117 if (tryParseScalarRegister(RegNum) != MatchOperand_Success)
3118 return MatchOperand_NoMatch;
3119
3120 if (RegNum != AArch64::XZR) {
3121 getLexer().UnLex(RegTok);
3122 return MatchOperand_NoMatch;
3123 }
3124
3125 if (parseComma())
3127
3128 if (tryParseScalarRegister(RegNum) != MatchOperand_Success) {
3129 TokError("expected register operand");
3131 }
3132
3133 if (RegNum != AArch64::XZR) {
3134 TokError("xzr must be followed by xzr");
3136 }
3137
3138 // We need to push something, since we claim this is an operand in .td.
3139 // See also AArch64AsmParser::parseKeywordOperand.
3140 Operands.push_back(AArch64Operand::CreateReg(
3141 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3142
3143 return MatchOperand_Success;
3144}
3145
3146/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3148AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3149 SMLoc S = getLoc();
3150 const AsmToken &Tok = getTok();
3151 if (Tok.isNot(AsmToken::Identifier)) {
3152 TokError("invalid operand for instruction");
3154 }
3155
3156 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3157 if (!BTI) {
3158 TokError("invalid operand for instruction");
3160 }
3161
3162 Operands.push_back(AArch64Operand::CreateBTIHint(
3163 BTI->Encoding, Tok.getString(), S, getContext()));
3164 Lex(); // Eat identifier token.
3165 return MatchOperand_Success;
3166}
3167
3168/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3169/// instruction.
3171AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3172 SMLoc S = getLoc();
3173 const MCExpr *Expr = nullptr;
3174
3175 if (getTok().is(AsmToken::Hash)) {
3176 Lex(); // Eat hash token.
3177 }
3178
3179 if (parseSymbolicImmVal(Expr))
3181
3182 AArch64MCExpr::VariantKind ELFRefKind;
3183 MCSymbolRefExpr::VariantKind DarwinRefKind;
3184 int64_t Addend;
3185 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3186 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3187 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3188 // No modifier was specified at all; this is the syntax for an ELF basic
3189 // ADRP relocation (unfortunately).
3190 Expr =
3192 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3193 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3194 Addend != 0) {
3195 Error(S, "gotpage label reference not allowed an addend");
3197 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3198 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3199 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3200 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3201 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3202 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3203 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3204 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
3205 // The operand must be an @page or @gotpage qualified symbolref.
3206 Error(S, "page or gotpage label reference expected");
3208 }
3209 }
3210
3211 // We have either a label reference possibly with addend or an immediate. The
3212 // addend is a raw value here. The linker will adjust it to only reference the
3213 // page.
3214 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3215 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3216
3217 return MatchOperand_Success;
3218}
3219
3220/// tryParseAdrLabel - Parse and validate a source label for the ADR
3221/// instruction.
3223AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3224 SMLoc S = getLoc();
3225 const MCExpr *Expr = nullptr;
3226
3227 // Leave anything with a bracket to the default for SVE
3228 if (getTok().is(AsmToken::LBrac))
3229 return MatchOperand_NoMatch;
3230
3231 if (getTok().is(AsmToken::Hash))
3232 Lex(); // Eat hash token.
3233
3234 if (parseSymbolicImmVal(Expr))
3236
3237 AArch64MCExpr::VariantKind ELFRefKind;
3238 MCSymbolRefExpr::VariantKind DarwinRefKind;
3239 int64_t Addend;
3240 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3241 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3242 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3243 // No modifier was specified at all; this is the syntax for an ELF basic
3244 // ADR relocation (unfortunately).
3245 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3246 } else {
3247 Error(S, "unexpected adr label");
3249 }
3250 }
3251
3252 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3253 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3254 return MatchOperand_Success;
3255}
3256
3257/// tryParseFPImm - A floating point immediate expression operand.
3258template<bool AddFPZeroAsLiteral>
3260AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3261 SMLoc S = getLoc();
3262
3263 bool Hash = parseOptionalToken(AsmToken::Hash);
3264
3265 // Handle negation, as that still comes through as a separate token.
3266 bool isNegative = parseOptionalToken(AsmToken::Minus);
3267
3268 const AsmToken &Tok = getTok();
3269 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3270 if (!Hash)
3271 return MatchOperand_NoMatch;
3272 TokError("invalid floating point immediate");
3274 }
3275
3276 // Parse hexadecimal representation.
3277 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
3278 if (Tok.getIntVal() > 255 || isNegative) {
3279 TokError("encoded floating point value out of range");
3281 }
3282
3284 Operands.push_back(
3285 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3286 } else {
3287 // Parse FP representation.
3288 APFloat RealVal(APFloat::IEEEdouble());
3289 auto StatusOrErr =
3290 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3291 if (errorToBool(StatusOrErr.takeError())) {
3292 TokError("invalid floating point representation");
3294 }
3295
3296 if (isNegative)
3297 RealVal.changeSign();
3298
3299 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3300 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3301 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3302 } else
3303 Operands.push_back(AArch64Operand::CreateFPImm(
3304 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3305 }
3306
3307 Lex(); // Eat the token.
3308
3309 return MatchOperand_Success;
3310}
3311
3312/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3313/// a shift suffix, for example '#1, lsl #12'.
3315AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3316 SMLoc S = getLoc();
3317
3318 if (getTok().is(AsmToken::Hash))
3319 Lex(); // Eat '#'
3320 else if (getTok().isNot(AsmToken::Integer))
3321 // Operand should start from # or should be integer, emit error otherwise.
3322 return MatchOperand_NoMatch;
3323
3324 if (getTok().is(AsmToken::Integer) &&
3325 getLexer().peekTok().is(AsmToken::Colon))
3326 return tryParseImmRange(Operands);
3327
3328 const MCExpr *Imm = nullptr;
3329 if (parseSymbolicImmVal(Imm))
3331 else if (getTok().isNot(AsmToken::Comma)) {
3332 Operands.push_back(
3333 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3334 return MatchOperand_Success;
3335 }
3336
3337 // Eat ','
3338 Lex();
3339 StringRef VecGroup;
3340 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3341 Operands.push_back(
3342 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3343 Operands.push_back(
3344 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3345 return MatchOperand_Success;
3346 }
3347
3348 // The optional operand must be "lsl #N" where N is non-negative.
3349 if (!getTok().is(AsmToken::Identifier) ||
3350 !getTok().getIdentifier().equals_insensitive("lsl")) {
3351 Error(getLoc(), "only 'lsl #+N' valid after immediate");
3353 }
3354
3355 // Eat 'lsl'
3356 Lex();
3357
3358 parseOptionalToken(AsmToken::Hash);
3359
3360 if (getTok().isNot(AsmToken::Integer)) {
3361 Error(getLoc(), "only 'lsl #+N' valid after immediate");
3363 }
3364
3365 int64_t ShiftAmount = getTok().getIntVal();
3366
3367 if (ShiftAmount < 0) {
3368 Error(getLoc(), "positive shift amount required");
3370 }
3371 Lex(); // Eat the number
3372
3373 // Just in case the optional lsl #0 is used for immediates other than zero.
3374 if (ShiftAmount == 0 && Imm != nullptr) {
3375 Operands.push_back(
3376 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3377 return MatchOperand_Success;
3378 }
3379
3380 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3381 getLoc(), getContext()));
3382 return MatchOperand_Success;
3383}
3384
3385/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3386/// suggestion to help common typos.
3388AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3390 .Case("eq", AArch64CC::EQ)
3391 .Case("ne", AArch64CC::NE)
3392 .Case("cs", AArch64CC::HS)
3393 .Case("hs", AArch64CC::HS)
3394 .Case("cc", AArch64CC::LO)
3395 .Case("lo", AArch64CC::LO)
3396 .Case("mi", AArch64CC::MI)
3397 .Case("pl", AArch64CC::PL)
3398 .Case("vs", AArch64CC::VS)
3399 .Case("vc", AArch64CC::VC)
3400 .Case("hi", AArch64CC::HI)
3401 .Case("ls", AArch64CC::LS)
3402 .Case("ge", AArch64CC::GE)
3403 .Case("lt", AArch64CC::LT)
3404 .Case("gt", AArch64CC::GT)
3405 .Case("le", AArch64CC::LE)
3406 .Case("al", AArch64CC::AL)
3407 .Case("nv", AArch64CC::NV)
3409
3410 if (CC == AArch64CC::Invalid && getSTI().hasFeature(AArch64::FeatureSVE)) {
3412 .Case("none", AArch64CC::EQ)
3413 .Case("any", AArch64CC::NE)
3414 .Case("nlast", AArch64CC::HS)
3415 .Case("last", AArch64CC::LO)
3416 .Case("first", AArch64CC::MI)
3417 .Case("nfrst", AArch64CC::PL)
3418 .Case("pmore", AArch64CC::HI)
3419 .Case("plast", AArch64CC::LS)
3420 .Case("tcont", AArch64CC::GE)
3421 .Case("tstop", AArch64CC::LT)
3423
3424 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3425 Suggestion = "nfrst";
3426 }
3427 return CC;
3428}
3429
3430/// parseCondCode - Parse a Condition Code operand.
3431bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3432 bool invertCondCode) {
3433 SMLoc S = getLoc();
3434 const AsmToken &Tok = getTok();
3435 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3436
3437 StringRef Cond = Tok.getString();
3438 std::string Suggestion;
3439 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3440 if (CC == AArch64CC::Invalid) {
3441 std::string Msg = "invalid condition code";
3442 if (!Suggestion.empty())
3443 Msg += ", did you mean " + Suggestion + "?";
3444 return TokError(Msg);
3445 }
3446 Lex(); // Eat identifier token.
3447
3448 if (invertCondCode) {
3449 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3450 return TokError("condition codes AL and NV are invalid for this instruction");
3452 }
3453
3454 Operands.push_back(
3455 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3456 return false;
3457}
3458
3460AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3461 const AsmToken &Tok = getTok();
3462 SMLoc S = getLoc();
3463
3464 if (Tok.isNot(AsmToken::Identifier)) {
3465 TokError("invalid operand for instruction");
3467 }
3468
3469 unsigned PStateImm = -1;
3470 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3471 if (!SVCR)
3472 return MatchOperand_NoMatch;
3473 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3474 PStateImm = SVCR->Encoding;
3475
3476 Operands.push_back(
3477 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3478 Lex(); // Eat identifier token.
3479 return MatchOperand_Success;
3480}
3481
3483AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3484 const AsmToken &Tok = getTok();
3485 SMLoc S = getLoc();
3486
3487 StringRef Name = Tok.getString();
3488
3489 if (Name.equals_insensitive("za") || Name.startswith_insensitive("za.")) {
3490 Lex(); // eat "za[.(b|h|s|d)]"
3491 unsigned ElementWidth = 0;
3492 auto DotPosition = Name.find('.');
3493 if (DotPosition != StringRef::npos) {
3494 const auto &KindRes =
3495 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3496 if (!KindRes) {
3497 TokError(
3498 "Expected the register to be followed by element width suffix");
3500 }
3501 ElementWidth = KindRes->second;
3502 }
3503 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3504 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3505 getContext()));
3506 if (getLexer().is(AsmToken::LBrac)) {
3507 // There's no comma after matrix operand, so we can parse the next operand
3508 // immediately.
3509 if (parseOperand(Operands, false, false))
3510 return MatchOperand_NoMatch;
3511 }
3512 return MatchOperand_Success;
3513 }
3514
3515 // Try to parse matrix register.
3516 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3517 if (!Reg)
3518 return MatchOperand_NoMatch;
3519
3520 size_t DotPosition = Name.find('.');
3521 assert(DotPosition != StringRef::npos && "Unexpected register");
3522
3523 StringRef Head = Name.take_front(DotPosition);
3524 StringRef Tail = Name.drop_front(DotPosition);
3525 StringRef RowOrColumn = Head.take_back();
3526
3527 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3528 .Case("h", MatrixKind::Row)
3529 .Case("v", MatrixKind::Col)
3530 .Default(MatrixKind::Tile);
3531
3532 // Next up, parsing the suffix
3533 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3534 if (!KindRes) {
3535 TokError("Expected the register to be followed by element width suffix");
3537 }
3538 unsigned ElementWidth = KindRes->second;
3539
3540 Lex();
3541
3542 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3543 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3544
3545 if (getLexer().is(AsmToken::LBrac)) {
3546 // There's no comma after matrix operand, so we can parse the next operand
3547 // immediately.
3548 if (parseOperand(Operands, false, false))
3549 return MatchOperand_NoMatch;
3550 }
3551 return MatchOperand_Success;
3552}
3553
3554/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3555/// them if present.
3557AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3558 const AsmToken &Tok = getTok();
3559 std::string LowerID = Tok.getString().lower();
3562 .Case("lsl", AArch64_AM::LSL)
3563 .Case("lsr", AArch64_AM::LSR)
3564 .Case("asr", AArch64_AM::ASR)
3565 .Case("ror", AArch64_AM::ROR)
3566 .Case("msl", AArch64_AM::MSL)
3567 .Case("uxtb", AArch64_AM::UXTB)
3568 .Case("uxth", AArch64_AM::UXTH)
3569 .Case("uxtw", AArch64_AM::UXTW)
3570 .Case("uxtx", AArch64_AM::UXTX)
3571 .Case("sxtb", AArch64_AM::SXTB)
3572 .Case("sxth", AArch64_AM::SXTH)
3573 .Case("sxtw", AArch64_AM::SXTW)
3574 .Case("sxtx", AArch64_AM::SXTX)
3576
3578 return MatchOperand_NoMatch;
3579
3580 SMLoc S = Tok.getLoc();
3581 Lex();
3582
3583 bool Hash = parseOptionalToken(AsmToken::Hash);
3584
3585 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3586 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3587 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3588 ShOp == AArch64_AM::MSL) {
3589 // We expect a number here.
3590 TokError("expected #imm after shift specifier");
3592 }
3593
3594 // "extend" type operations don't need an immediate, #0 is implicit.
3595 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3596 Operands.push_back(
3597 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3598 return MatchOperand_Success;
3599 }
3600
3601 // Make sure we do actually have a number, identifier or a parenthesized
3602 // expression.
3603 SMLoc E = getLoc();
3604 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3605 !getTok().is(AsmToken::Identifier)) {
3606 Error(E, "expected integer shift amount");
3608 }
3609
3610 const MCExpr *ImmVal;
3611 if (getParser().parseExpression(ImmVal))
3613
3614 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3615 if (!MCE) {
3616 Error(E, "expected constant '#imm' after shift specifier");
3618 }
3619
3620 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3621 Operands.push_back(AArch64Operand::CreateShiftExtend(
3622 ShOp, MCE->getValue(), true, S, E, getContext()));
3623 return MatchOperand_Success;
3624}
3625
3626static const struct Extension {
3627 const char *Name;
3629} ExtensionMap[] = {
3630 {"crc", {AArch64::FeatureCRC}},
3631 {"sm4", {AArch64::FeatureSM4}},
3632 {"sha3", {AArch64::FeatureSHA3}},
3633 {"sha2", {AArch64::FeatureSHA2}},
3634 {"aes", {AArch64::FeatureAES}},
3635 {"crypto", {AArch64::FeatureCrypto}},
3636 {"fp", {AArch64::FeatureFPARMv8}},
3637 {"simd", {AArch64::FeatureNEON}},
3638 {"ras", {AArch64::FeatureRAS}},
3639 {"rasv2", {AArch64::FeatureRASv2}},
3640 {"lse", {AArch64::FeatureLSE}},
3641 {"predres", {AArch64::FeaturePredRes}},
3642 {"predres2", {AArch64::FeatureSPECRES2}},
3643 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3644 {"mte", {AArch64::FeatureMTE}},
3645 {"memtag", {AArch64::FeatureMTE}},
3646 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3647 {"pan", {AArch64::FeaturePAN}},
3648 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3649 {"ccpp", {AArch64::FeatureCCPP}},
3650 {"rcpc", {AArch64::FeatureRCPC}},
3651 {"rng", {AArch64::FeatureRandGen}},
3652 {"sve", {AArch64::FeatureSVE}},
3653 {"sve2", {AArch64::FeatureSVE2}},
3654 {"sve2-aes", {AArch64::FeatureSVE2AES}},
3655 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3656 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3657 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3658 {"sve2p1", {AArch64::FeatureSVE2p1}},
3659 {"b16b16", {AArch64::FeatureB16B16}},
3660 {"ls64", {AArch64::FeatureLS64}},
3661 {"xs", {AArch64::FeatureXS}},
3662 {"pauth", {AArch64::FeaturePAuth}},
3663 {"flagm", {AArch64::FeatureFlagM}},
3664 {"rme", {AArch64::FeatureRME}},
3665 {"sme", {AArch64::FeatureSME}},
3666 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3667 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3668 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3669 {"sme2", {AArch64::FeatureSME2}},
3670 {"sme2p1", {AArch64::FeatureSME2p1}},
3671 {"hbc", {AArch64::FeatureHBC}},
3672 {"mops", {AArch64::FeatureMOPS}},
3673 {"mec", {AArch64::FeatureMEC}},
3674 {"the", {AArch64::FeatureTHE}},
3675 {"d128", {AArch64::FeatureD128}},
3676 {"lse128", {AArch64::FeatureLSE128}},
3677 {"ite", {AArch64::FeatureITE}},
3678 {"cssc", {AArch64::FeatureCSSC}},
3679 {"rcpc3", {AArch64::FeatureRCPC3}},
3680 {"gcs", {AArch64::FeatureGCS}},
3681 // FIXME: Unsupported extensions
3682 {"lor", {}},
3683 {"rdma", {}},
3684 {"profile", {}},
3686
3687static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3688 if (FBS[AArch64::HasV8_0aOps])
3689 Str += "ARMv8a";
3690 if (FBS[AArch64::HasV8_1aOps])
3691 Str += "ARMv8.1a";
3692 else if (FBS[AArch64::HasV8_2aOps])
3693 Str += "ARMv8.2a";
3694 else if (FBS[AArch64::HasV8_3aOps])
3695 Str += "ARMv8.3a";
3696 else if (FBS[AArch64::HasV8_4aOps])
3697 Str += "ARMv8.4a";
3698 else if (FBS[AArch64::HasV8_5aOps])
3699 Str += "ARMv8.5a";
3700 else if (FBS[AArch64::HasV8_6aOps])
3701 Str += "ARMv8.6a";
3702 else if (FBS[AArch64::HasV8_7aOps])
3703 Str += "ARMv8.7a";
3704 else if (FBS[AArch64::HasV8_8aOps])
3705 Str += "ARMv8.8a";
3706 else if (FBS[AArch64::HasV8_9aOps])
3707 Str += "ARMv8.9a";
3708 else if (FBS[AArch64::HasV9_0aOps])
3709 Str += "ARMv9-a";
3710 else if (FBS[AArch64::HasV9_1aOps])
3711 Str += "ARMv9.1a";
3712 else if (FBS[AArch64::HasV9_2aOps])
3713 Str += "ARMv9.2a";
3714 else if (FBS[AArch64::HasV9_3aOps])
3715 Str += "ARMv9.3a";
3716 else if (FBS[AArch64::HasV9_4aOps])
3717 Str += "ARMv9.4a";
3718 else if (FBS[AArch64::HasV8_0rOps])
3719 Str += "ARMv8r";
3720 else {
3721 SmallVector<std::string, 2> ExtMatches;
3722 for (const auto& Ext : ExtensionMap) {
3723 // Use & in case multiple features are enabled
3724 if ((FBS & Ext.Features) != FeatureBitset())
3725 ExtMatches.push_back(Ext.Name);
3726 }
3727 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3728 }
3729}
3730
3731void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3732 SMLoc S) {
3733 const uint16_t Op2 = Encoding & 7;
3734 const uint16_t Cm = (Encoding & 0x78) >> 3;
3735 const uint16_t Cn = (Encoding & 0x780) >> 7;
3736 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3737
3738 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3739
3740 Operands.push_back(
3741 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3742 Operands.push_back(
3743 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3744 Operands.push_back(
3745 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3746 Expr = MCConstantExpr::create(Op2, getContext());
3747 Operands.push_back(
3748 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3749}
3750
3751/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3752/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3753bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3755 if (Name.contains('.'))
3756 return TokError("invalid operand");
3757
3758 Mnemonic = Name;
3759 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3760
3761 const AsmToken &Tok = getTok();
3762 StringRef Op = Tok.getString();
3763 SMLoc S = Tok.getLoc();
3764
3765 if (Mnemonic == "ic") {
3766 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3767 if (!IC)
3768 return TokError("invalid operand for IC instruction");
3769 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3770 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3772 return TokError(Str);
3773 }
3774 createSysAlias(IC->Encoding, Operands, S);
3775 } else if (Mnemonic == "dc") {
3776 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3777 if (!DC)
3778 return TokError("invalid operand for DC instruction");
3779 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3780 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3782 return TokError(Str);
3783 }
3784 createSysAlias(DC->Encoding, Operands, S);
3785 } else if (Mnemonic == "at") {
3786 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3787 if (!AT)
3788 return TokError("invalid operand for AT instruction");
3789 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3790 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3792 return TokError(Str);
3793 }
3794 createSysAlias(AT->Encoding, Operands, S);
3795 } else if (Mnemonic == "tlbi") {
3796 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3797 if (!TLBI)
3798 return TokError("invalid operand for TLBI instruction");
3799 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3800 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3802 return TokError(Str);
3803 }
3804 createSysAlias(TLBI->Encoding, Operands, S);
3805 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3806
3807 if (Op.lower() != "rctx")
3808 return TokError("invalid operand for prediction restriction instruction");
3809
3810 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3811 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3812 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3813
3814 if (Mnemonic == "cosp" && !hasSpecres2)
3815 return TokError("COSP requires: predres2");
3816 if (!hasPredres)
3817 return TokError(Mnemonic.upper() + "RCTX requires: predres");
3818
3819 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
3820 : Mnemonic == "dvp" ? 0b101
3821 : Mnemonic == "cosp" ? 0b110
3822 : Mnemonic == "cpp" ? 0b111
3823 : 0;
3824 assert(PRCTX_Op2 &&
3825 "Invalid mnemonic for prediction restriction instruction");
3826 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3827 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3828
3829 createSysAlias(Encoding, Operands, S);
3830 }
3831
3832 Lex(); // Eat operand.
3833
3834 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3835 bool HasRegister = false;
3836
3837 // Check for the optional register operand.
3838 if (parseOptionalToken(AsmToken::Comma)) {
3839 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3840 return TokError("expected register operand");
3841 HasRegister = true;
3842 }
3843
3844 if (ExpectRegister && !HasRegister)
3845 return TokError("specified " + Mnemonic + " op requires a register");
3846 else if (!ExpectRegister && HasRegister)
3847 return TokError("specified " + Mnemonic + " op does not use a register");
3848
3849 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3850 return true;
3851
3852 return false;
3853}
3854
3855/// parseSyspAlias - The TLBIP instructions are simple aliases for
3856/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
3857bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
3859 if (Name.contains('.'))
3860 return TokError("invalid operand");
3861
3862 Mnemonic = Name;
3863 Operands.push_back(
3864 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
3865
3866 const AsmToken &Tok = getTok();
3867 StringRef Op = Tok.getString();
3868 SMLoc S = Tok.getLoc();
3869
3870 if (Mnemonic == "tlbip") {
3871 bool HasnXSQualifier = Op.endswith_insensitive("nXS");
3872 if (HasnXSQualifier) {
3873 Op = Op.drop_back(3);
3874 }
3875 const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
3876 if (!TLBIorig)
3877 return TokError("invalid operand for TLBIP instruction");
3878 const AArch64TLBI::TLBI TLBI(
3879 TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
3880 TLBIorig->NeedsReg,
3881 HasnXSQualifier
3882 ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
3883 : TLBIorig->FeaturesRequired);
3884 if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
3885 std::string Name =
3886 std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
3887 std::string Str("TLBIP " + Name + " requires: ");
3889 return TokError(Str);
3890 }
3891 createSysAlias(TLBI.Encoding, Operands, S);
3892 }
3893
3894 Lex(); // Eat operand.
3895
3896 if (parseComma())
3897 return true;
3898
3899 if (Tok.isNot(AsmToken::Identifier))
3900 return TokError("expected register identifier");
3901 auto Result = tryParseSyspXzrPair(Operands);
3902 if (Result == MatchOperand_NoMatch)
3903 Result = tryParseGPRSeqPair(Operands);
3904 if (Result != MatchOperand_Success)
3905 return TokError("specified " + Mnemonic +
3906 " op requires a pair of registers");
3907
3908 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3909 return true;
3910
3911 return false;
3912}
3913
3915AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3916 MCAsmParser &Parser = getParser();
3917 const AsmToken &Tok = getTok();
3918
3919 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3920 TokError("'csync' operand expected");
3922 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3923 // Immediate operand.
3924 const MCExpr *ImmVal;
3925 SMLoc ExprLoc = getLoc();
3926 AsmToken IntTok = Tok;
3927 if (getParser().parseExpression(ImmVal))
3929 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3930 if (!MCE) {
3931 Error(ExprLoc, "immediate value expected for barrier operand");
3933 }
3934 int64_t Value = MCE->getValue();
3935 if (Mnemonic == "dsb" && Value > 15) {
3936 // This case is a no match here, but it might be matched by the nXS
3937 // variant. Deliberately not unlex the optional '#' as it is not necessary
3938 // to characterize an integer immediate.
3939 Parser.getLexer().UnLex(IntTok);
3940 return MatchOperand_NoMatch;
3941 }
3942 if (Value < 0 || Value > 15) {
3943 Error(ExprLoc, "barrier operand out of range");
3945 }
3946 auto DB = AArch64DB::lookupDBByEncoding(Value);
3947 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3948 ExprLoc, getContext(),
3949 false /*hasnXSModifier*/));
3950 return MatchOperand_Success;
3951 }
3952
3953 if (Tok.isNot(AsmToken::Identifier)) {
3954 TokError("invalid operand for instruction");
3956 }
3957
3958 StringRef Operand = Tok.getString();
3959 auto TSB = AArch64TSB::lookupTSBByName(Operand);
3960 auto DB = AArch64DB::lookupDBByName(Operand);
3961 // The only valid named option for ISB is 'sy'
3962 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3963 TokError("'sy' or #imm operand expected");
3965 // The only valid named option for TSB is 'csync'
3966 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3967 TokError("'csync' operand expected");
3969 } else if (!DB && !TSB) {
3970 if (Mnemonic == "dsb") {
3971 // This case is a no match here, but it might be matched by the nXS
3972 // variant.
3973 return MatchOperand_NoMatch;
3974 }
3975 TokError("invalid barrier option name");
3977 }
3978
3979 Operands.push_back(AArch64Operand::CreateBarrier(
3980 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3981 getContext(), false /*hasnXSModifier*/));
3982 Lex(); // Consume the option
3983
3984 return MatchOperand_Success;
3985}
3986
3988AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3989 const AsmToken &Tok = getTok();
3990
3991 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
3992 if (Mnemonic != "dsb")
3994
3995 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3996 // Immediate operand.
3997 const MCExpr *ImmVal;
3998 SMLoc ExprLoc = getLoc();
3999 if (getParser().parseExpression(ImmVal))
4001 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4002 if (!MCE) {
4003 Error(ExprLoc, "immediate value expected for barrier operand");
4005 }
4006 int64_t Value = MCE->getValue();
4007 // v8.7-A DSB in the nXS variant accepts only the following immediate
4008 // values: 16, 20, 24, 28.
4009 if (Value != 16 && Value != 20 && Value != 24 && Value != 28) {
4010 Error(ExprLoc, "barrier operand out of range");
4012 }
4013 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4014 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4015 ExprLoc, getContext(),
4016 true /*hasnXSModifier*/));
4017 return MatchOperand_Success;
4018 }
4019
4020 if (Tok.isNot(AsmToken::Identifier)) {
4021 TokError("invalid operand for instruction");
4023 }
4024
4025 StringRef Operand = Tok.getString();
4026 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4027
4028 if (!DB) {
4029 TokError("invalid barrier option name");
4031 }
4032
4033 Operands.push_back(
4034 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4035 getContext(), true /*hasnXSModifier*/));
4036 Lex(); // Consume the option
4037
4038 return MatchOperand_Success;
4039}
4040
4042AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4043 const AsmToken &Tok = getTok();
4044
4045 if (Tok.isNot(AsmToken::Identifier))
4046 return MatchOperand_NoMatch;
4047
4048 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4049 return MatchOperand_NoMatch;
4050
4051 int MRSReg, MSRReg;
4052 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4053 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4054 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4055 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4056 } else
4057 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4058
4059 unsigned PStateImm = -1;
4060 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4061 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4062 PStateImm = PState15->Encoding;
4063 if (!PState15) {
4064 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4065 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4066 PStateImm = PState1->Encoding;
4067 }
4068
4069 Operands.push_back(
4070 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4071 PStateImm, getContext()));
4072 Lex(); // Eat identifier
4073
4074 return MatchOperand_Success;
4075}
4076
4077/// tryParseNeonVectorRegister - Parse a vector register operand.
4078bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4079 if (getTok().isNot(AsmToken::Identifier))
4080 return true;
4081
4082 SMLoc S = getLoc();
4083 // Check for a vector register specifier first.
4087 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4088 if (Res != MatchOperand_Success)
4089 return true;
4090
4091 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4092 if (!KindRes)
4093 return true;
4094
4095 unsigned ElementWidth = KindRes->second;
4096 Operands.push_back(
4097 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4098 S, getLoc(), getContext()));
4099
4100 // If there was an explicit qualifier, that goes on as a literal text
4101 // operand.
4102 if (!Kind.empty())
4103 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4104
4105 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
4106}
4107
4109AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4110 SMLoc SIdx = getLoc();
4111 if (parseOptionalToken(AsmToken::LBrac)) {
4112 const MCExpr *ImmVal;
4113 if (getParser().parseExpression(ImmVal))
4114 return MatchOperand_NoMatch;
4115 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4116 if (!MCE) {
4117 TokError("immediate value expected for vector index");
4118 return MatchOperand_ParseFail;;
4119 }
4120
4121 SMLoc E = getLoc();
4122
4123 if (parseToken(AsmToken::RBrac, "']' expected"))
4124 return MatchOperand_ParseFail;;
4125
4126 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4127 E, getContext()));
4128 return MatchOperand_Success;
4129 }
4130
4131 return MatchOperand_NoMatch;
4132}
4133
4134// tryParseVectorRegister - Try to parse a vector register name with
4135// optional kind specifier. If it is a register specifier, eat the token
4136// and return it.
4138AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
4139 RegKind MatchKind) {
4140 const AsmToken &Tok = getTok();
4141
4142 if (Tok.isNot(AsmToken::Identifier))
4143 return MatchOperand_NoMatch;
4144
4145 StringRef Name = Tok.getString();
4146 // If there is a kind specifier, it's separated from the register name by
4147 // a '.'.
4148 size_t Start = 0, Next = Name.find('.');
4149 StringRef Head = Name.slice(Start, Next);
4150 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4151
4152 if (RegNum) {
4153 if (Next != StringRef::npos) {
4154 Kind = Name.slice(Next, StringRef::npos);
4155 if (!isValidVectorKind(Kind, MatchKind)) {
4156 TokError("invalid vector kind qualifier");
4158 }
4159 }
4160 Lex(); // Eat the register token.
4161
4162 Reg = RegNum;
4163 return MatchOperand_Success;
4164 }
4165
4166 return MatchOperand_NoMatch;
4167}
4168
4169/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4170template <RegKind RK> OperandMatchResultTy
4171AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4172 // Check for a SVE predicate register specifier first.
4173 const SMLoc S = getLoc();
4175 MCRegister RegNum;
4176 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4177 if (Res != MatchOperand_Success)
4178 return Res;
4179
4180 const auto &KindRes = parseVectorKind(Kind, RK);
4181 if (!KindRes)
4182 return MatchOperand_NoMatch;
4183
4184 unsigned ElementWidth = KindRes->second;
4185 Operands.push_back(AArch64Operand::CreateVectorReg(
4186 RegNum, RK, ElementWidth, S,
4187 getLoc(), getContext()));
4188
4189 if (getLexer().is(AsmToken::LBrac)) {
4190 if (RK == RegKind::SVEPredicateAsCounter) {
4191 OperandMatchResultTy ResIndex = tryParseVectorIndex(Operands);
4192 if (ResIndex == MatchOperand_Success)
4193 return MatchOperand_Success;
4194 } else {
4195 // Indexed predicate, there's no comma so try parse the next operand
4196 // immediately.
4197 if (parseOperand(Operands, false, false))
4198 return MatchOperand_NoMatch;
4199 }
4200 }
4201
4202 // Not all predicates are followed by a '/m' or '/z'.
4203 if (getTok().isNot(AsmToken::Slash))
4204 return MatchOperand_Success;
4205
4206 // But when they do they shouldn't have an element type suffix.
4207 if (!Kind.empty()) {
4208 Error(S, "not expecting size suffix");
4210 }
4211
4212 // Add a literal slash as operand
4213 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4214
4215 Lex(); // Eat the slash.
4216
4217 // Zeroing or merging?
4218 auto Pred = getTok().getString().lower();
4219 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z") {
4220 Error(getLoc(), "expecting 'z' predication");
4222 }
4223
4224 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m") {
4225 Error(getLoc(), "expecting 'm' or 'z' predication");
4227 }
4228
4229 // Add zero/merge token.
4230 const char *ZM = Pred == "z" ? "z" : "m";
4231 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4232
4233 Lex(); // Eat zero/merge token.
4234 return MatchOperand_Success;
4235}
4236
4237/// parseRegister - Parse a register operand.
4238bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4239 // Try for a Neon vector register.
4240 if (!tryParseNeonVectorRegister(Operands))
4241 return false;
4242
4243 if (tryParseZTOperand(Operands) == MatchOperand_Success)
4244 return false;
4245
4246 // Otherwise try for a scalar register.
4247 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
4248 return false;
4249
4250 return true;
4251}
4252
4253bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4254 bool HasELFModifier = false;
4256
4257 if (parseOptionalToken(AsmToken::Colon)) {
4258 HasELFModifier = true;
4259
4260 if (getTok().isNot(AsmToken::Identifier))
4261 return TokError("expect relocation specifier in operand after ':'");
4262
4263 std::string LowerCase = getTok().getIdentifier().lower();
4264 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
4266 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4267 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4268 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4269 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4270 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4271 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4272 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4273 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4274 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4275 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4276 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4277 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4278 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4279 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4280 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4281 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4282 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4283 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4284 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4285 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4286 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4287 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4288 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4289 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4290 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4291 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4292 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4293 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4294 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4295 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4296 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4297 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4298 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4299 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4300 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4302 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4303 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4305 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4306 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4307 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4309 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4310 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4312
4313 if (RefKind == AArch64MCExpr::VK_INVALID)
4314 return TokError("expect relocation specifier in operand after ':'");
4315
4316 Lex(); // Eat identifier
4317
4318 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4319 return true;
4320 }
4321
4322 if (getParser().parseExpression(ImmVal))
4323 return true;
4324
4325 if (HasELFModifier)
4326 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4327
4328 return false;
4329}
4330
4332AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4333 if (getTok().isNot(AsmToken::LCurly))
4334 return MatchOperand_NoMatch;
4335
4336 auto ParseMatrixTile = [this](unsigned &Reg, unsigned &ElementWidth) {
4337 StringRef Name = getTok().getString();
4338 size_t DotPosition = Name.find('.');
4339 if (DotPosition == StringRef::npos)
4340 return MatchOperand_NoMatch;
4341
4342 unsigned RegNum = matchMatrixTileListRegName(Name);
4343 if (!RegNum)
4344 return MatchOperand_NoMatch;
4345
4346 StringRef Tail = Name.drop_front(DotPosition);
4347 const std::optional<std::pair<int, int>> &KindRes =
4348 parseVectorKind(Tail, RegKind::Matrix);
4349 if (!KindRes) {
4350 TokError("Expected the register to be followed by element width suffix");
4352 }
4353 ElementWidth = KindRes->second;
4354 Reg = RegNum;
4355 Lex(); // Eat the register.
4356 return MatchOperand_Success;
4357 };
4358
4359 SMLoc S = getLoc();
4360 auto LCurly = getTok();
4361 Lex(); // Eat left bracket token.
4362
4363 // Empty matrix list
4364 if (parseOptionalToken(AsmToken::RCurly)) {
4365 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4366 /*RegMask=*/0, S, getLoc(), getContext()));
4367 return MatchOperand_Success;
4368 }
4369
4370 // Try parse {za} alias early
4371 if (getTok().getString().equals_insensitive("za")) {
4372 Lex(); // Eat 'za'
4373
4374 if (parseToken(AsmToken::RCurly, "'}' expected"))
4376
4377 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4378 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4379 return MatchOperand_Success;
4380 }
4381
4382 SMLoc TileLoc = getLoc();
4383
4384 unsigned FirstReg, ElementWidth;
4385 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4386 if (ParseRes != MatchOperand_Success) {
4387 getLexer().UnLex(LCurly);
4388 return ParseRes;
4389 }
4390
4391 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4392
4393 unsigned PrevReg = FirstReg;
4394
4396 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4397
4398 SmallSet<unsigned, 8> SeenRegs;
4399 SeenRegs.insert(FirstReg);
4400
4401 while (parseOptionalToken(AsmToken::Comma)) {
4402 TileLoc = getLoc();
4403 unsigned Reg, NextElementWidth;
4404 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4405 if (ParseRes != MatchOperand_Success)
4406 return ParseRes;
4407
4408 // Element size must match on all regs in the list.
4409 if (ElementWidth != NextElementWidth) {
4410 Error(TileLoc, "mismatched register size suffix");
4412 }
4413
4414 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4415 Warning(TileLoc, "tile list not in ascending order");
4416
4417 if (SeenRegs.contains(Reg))
4418 Warning(TileLoc, "duplicate tile in list");
4419 else {
4420 SeenRegs.insert(Reg);
4421 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4422 }
4423
4424 PrevReg = Reg;
4425 }
4426
4427 if (parseToken(AsmToken::RCurly, "'}' expected"))
4429
4430 unsigned RegMask = 0;
4431 for (auto Reg : DRegs)
4432 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4433 RI->getEncodingValue(AArch64::ZAD0));
4434 Operands.push_back(
4435 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4436
4437 return MatchOperand_Success;
4438}
4439
4440template <RegKind VectorKind>
4442AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4443 bool ExpectMatch) {
4444 MCAsmParser &Parser = getParser();
4445 if (!getTok().is(AsmToken::LCurly))
4446 return MatchOperand_NoMatch;
4447
4448 // Wrapper around parse function
4449 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4450 bool NoMatchIsError) {
4451 auto RegTok = getTok();
4452 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4453 if (ParseRes == MatchOperand_Success) {
4454 if (parseVectorKind(Kind, VectorKind))
4455 return ParseRes;
4456 llvm_unreachable("Expected a valid vector kind");
4457 }
4458
4459 if (RegTok.is(AsmToken::Identifier) && ParseRes == MatchOperand_NoMatch &&
4460 RegTok.getString().equals_insensitive("zt0"))
4461 return MatchOperand_NoMatch;
4462
4463 if (RegTok.isNot(AsmToken::Identifier) ||
4464 ParseRes == MatchOperand_ParseFail ||
4465 (ParseRes == MatchOperand_NoMatch && NoMatchIsError &&
4466 !RegTok.getString().startswith_insensitive("za"))) {
4467 Error(Loc, "vector register expected");
4469 }
4470
4471 return MatchOperand_NoMatch;
4472 };
4473
4474 int NumRegs = getNumRegsForRegKind(VectorKind);
4475 SMLoc S = getLoc();
4476 auto LCurly = getTok();
4477 Lex(); // Eat left bracket token.
4478
4480 MCRegister FirstReg;
4481 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4482
4483 // Put back the original left bracket if there was no match, so that
4484 // different types of list-operands can be matched (e.g. SVE, Neon).
4485 if (ParseRes == MatchOperand_NoMatch)
4486 Parser.getLexer().UnLex(LCurly);
4487
4488 if (ParseRes != MatchOperand_Success)
4489 return ParseRes;
4490
4491 int64_t PrevReg = FirstReg;
4492 unsigned Count = 1;
4493
4494 int Stride = 1;
4495 if (parseOptionalToken(AsmToken::Minus)) {
4496 SMLoc Loc = getLoc();
4497 StringRef NextKind;
4498
4500 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4501 if (ParseRes != MatchOperand_Success)
4502 return ParseRes;
4503
4504 // Any Kind suffices must match on all regs in the list.
4505 if (Kind != NextKind) {
4506 Error(Loc, "mismatched register size suffix");
4508 }
4509
4510 unsigned Space =
4511 (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4512
4513 if (Space == 0 || Space > 3) {
4514 Error(Loc, "invalid number of vectors");
4516 }
4517
4518 Count += Space;
4519 }
4520 else {
4521 bool HasCalculatedStride = false;
4522 while (parseOptionalToken(AsmToken::Comma)) {
4523 SMLoc Loc = getLoc();
4524 StringRef NextKind;
4526 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4527 if (ParseRes != MatchOperand_Success)
4528 return ParseRes;
4529
4530 // Any Kind suffices must match on all regs in the list.
4531 if (Kind != NextKind) {
4532 Error(Loc, "mismatched register size suffix");
4534 }
4535
4536 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4537 unsigned PrevRegVal =
4538 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4539 if (!HasCalculatedStride) {
4540 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4541 : (RegVal + NumRegs - PrevRegVal);
4542 HasCalculatedStride = true;
4543 }
4544
4545 // Register must be incremental (with a wraparound at last register).
4546 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs)) {
4547 Error(Loc, "registers must have the same sequential stride");
4549 }
4550
4551 PrevReg = Reg;
4552 ++Count;
4553 }
4554 }
4555
4556 if (parseToken(AsmToken::RCurly, "'}' expected"))
4558
4559 if (Count > 4) {
4560 Error(S, "invalid number of vectors");
4562 }
4563
4564 unsigned NumElements = 0;
4565 unsigned ElementWidth = 0;
4566 if (!Kind.empty()) {
4567 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4568 std::tie(NumElements, ElementWidth) = *VK;
4569 }
4570
4571 Operands.push_back(AArch64Operand::CreateVectorList(
4572 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4573 getLoc(), getContext()));
4574
4575 return MatchOperand_Success;
4576}
4577
4578/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4579bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4580 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4581 if (ParseRes != MatchOperand_Success)
4582 return true;
4583
4584 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
4585}
4586
4588AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4589 SMLoc StartLoc = getLoc();
4590
4591 MCRegister RegNum;
4592 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4593 if (Res != MatchOperand_Success)
4594 return Res;
4595
4596 if (!parseOptionalToken(AsmToken::Comma)) {
4597 Operands.push_back(AArch64Operand::CreateReg(
4598 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4599 return MatchOperand_Success;
4600 }
4601
4602 parseOptionalToken(AsmToken::Hash);
4603
4604 if (getTok().isNot(AsmToken::Integer)) {
4605 Error(getLoc(), "index must be absent or #0");
4607 }
4608
4609 const MCExpr *ImmVal;
4610 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4611 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
4612 Error(getLoc(), "index must be absent or #0");
4614 }
4615
4616 Operands.push_back(AArch64Operand::CreateReg(
4617 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4618 return MatchOperand_Success;
4619}
4620
4622AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4623 SMLoc StartLoc = getLoc();
4624 const AsmToken &Tok = getTok();
4625 std::string Name = Tok.getString().lower();
4626
4627 unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4628
4629 if (RegNum == 0)
4630 return MatchOperand_NoMatch;
4631
4632 Operands.push_back(AArch64Operand::CreateReg(
4633 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4634 Lex(); // Eat identifier token.
4635
4636 // Check if register is followed by an index
4637 if (parseOptionalToken(AsmToken::LBrac)) {
4638 const MCExpr *ImmVal;
4639 if (getParser().parseExpression(ImmVal))
4640 return MatchOperand_NoMatch;
4641 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4642 if (!MCE) {
4643 TokError("immediate value expected for vector index");
4645 }
4646 if (parseToken(AsmToken::RBrac, "']' expected"))
4648
4649 Operands.push_back(AArch64Operand::CreateImm(
4650 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4651 getLoc(), getContext()));
4652 }
4653
4654 return MatchOperand_Success;
4655}
4656
4657template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4659AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4660 SMLoc StartLoc = getLoc();
4661
4662 MCRegister RegNum;
4663 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4664 if (Res != MatchOperand_Success)
4665 return Res;
4666
4667 // No shift/extend is the default.
4668 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4669 Operands.push_back(AArch64Operand::CreateReg(
4670 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4671 return MatchOperand_Success;
4672 }
4673
4674 // Eat the comma
4675 Lex();
4676
4677 // Match the shift
4679 Res = tryParseOptionalShiftExtend(ExtOpnd);
4680 if (Res != MatchOperand_Success)
4681 return Res;
4682
4683 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4684 Operands.push_back(AArch64Operand::CreateReg(
4685 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4686 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4687 Ext->hasShiftExtendAmount()));
4688
4689 return MatchOperand_Success;
4690}
4691
4692bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4693 MCAsmParser &Parser = getParser();
4694
4695 // Some SVE instructions have a decoration after the immediate, i.e.
4696 // "mul vl". We parse them here and add tokens, which must be present in the
4697 // asm string in the tablegen instruction.
4698 bool NextIsVL =
4699 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4700 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4701 if (!getTok().getString().equals_insensitive("mul") ||
4702 !(NextIsVL || NextIsHash))
4703 return true;
4704
4705 Operands.push_back(
4706 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4707 Lex(); // Eat the "mul"
4708
4709 if (NextIsVL) {
4710 Operands.push_back(
4711 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4712 Lex(); // Eat the "vl"
4713 return false;
4714 }
4715
4716 if (NextIsHash) {
4717 Lex(); // Eat the #
4718 SMLoc S = getLoc();
4719
4720 // Parse immediate operand.
4721 const MCExpr *ImmVal;
4722 if (!Parser.parseExpression(ImmVal))
4723 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4724 Operands.push_back(AArch64Operand::CreateImm(
4725 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4726 getContext()));
4727 return MatchOperand_Success;
4728 }
4729 }
4730
4731 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4732}
4733
4734bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4735 StringRef &VecGroup) {
4736 MCAsmParser &Parser = getParser();
4737 auto Tok = Parser.getTok();
4738 if (Tok.isNot(AsmToken::Identifier))
4739 return true;
4740
4742 .Case("vgx2", "vgx2")
4743 .Case("vgx4", "vgx4")
4744 .Default("");
4745
4746 if (VG.empty())
4747 return true;
4748
4749 VecGroup = VG;
4750 Parser.Lex(); // Eat vgx[2|4]
4751 return false;
4752}
4753
4754bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4755 auto Tok = getTok();
4756 if (Tok.isNot(AsmToken::Identifier))
4757 return true;
4758
4759 auto Keyword = Tok.getString();
4761 .Case("sm", "sm")
4762 .Case("za", "za")
4763 .Default(Keyword);
4764 Operands.push_back(
4765 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4766
4767 Lex();
4768 return false;
4769}
4770
4771/// parseOperand - Parse a arm instruction operand. For now this parses the
4772/// operand regardless of the mnemonic.
4773bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4774 bool invertCondCode) {
4775 MCAsmParser &Parser = getParser();
4776
4777 OperandMatchResultTy ResTy =
4778 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
4779
4780 // Check if the current operand has a custom associated parser, if so, try to
4781 // custom parse the operand, or fallback to the general approach.
4782 if (ResTy == MatchOperand_Success)
4783 return false;
4784 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4785 // there was a match, but an error occurred, in which case, just return that
4786 // the operand parsing failed.
4787 if (ResTy == MatchOperand_ParseFail)
4788 return true;
4789
4790 // Nothing custom, so do general case parsing.
4791 SMLoc S, E;
4792 switch (getLexer().getKind()) {
4793 default: {
4794 SMLoc S = getLoc();
4795 const MCExpr *Expr;
4796 if (parseSymbolicImmVal(Expr))
4797 return Error(S, "invalid operand");
4798
4799 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4800 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4801 return false;
4802 }
4803 case AsmToken::LBrac: {
4804 Operands.push_back(
4805 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4806 Lex(); // Eat '['
4807
4808 // There's no comma after a '[', so we can parse the next operand
4809 // immediately.
4810 return parseOperand(Operands, false, false);
4811 }
4812 case AsmToken::LCurly: {
4813 if (!parseNeonVectorList(Operands))
4814 return false;
4815
4816 Operands.push_back(
4817 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4818 Lex(); // Eat '{'
4819
4820 // There's no comma after a '{', so we can parse the next operand
4821 // immediately.
4822 return parseOperand(Operands, false, false);
4823 }
4824 case AsmToken::Identifier: {
4825 // See if this is a "VG" decoration used by SME instructions.
4826 StringRef VecGroup;
4827 if (!parseOptionalVGOperand(Operands, VecGroup)) {
4828 Operands.push_back(
4829 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4830 return false;
4831 }
4832 // If we're expecting a Condition Code operand, then just parse that.
4833 if (isCondCode)
4834 return parseCondCode(Operands, invertCondCode);
4835
4836 // If it's a register name, parse it.
4837 if (!parseRegister(Operands))
4838 return false;
4839
4840 // See if this is a "mul vl" decoration or "mul #<int>" operand used
4841 // by SVE instructions.
4842 if (!parseOptionalMulOperand(Operands))
4843 return false;
4844
4845 // This could be an optional "shift" or "extend" operand.
4846 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
4847 // We can only continue if no tokens were eaten.
4848 if (GotShift != MatchOperand_NoMatch)
4849 return GotShift;
4850
4851 // If this is a two-word mnemonic, parse its special keyword
4852 // operand as an identifier.
4853 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
4854 Mnemonic == "gcsb")
4855 return parseKeywordOperand(Operands);
4856
4857 // This was not a register so parse other operands that start with an
4858 // identifier (like labels) as expressions and create them as immediates.
4859 const MCExpr *IdVal;
4860 S = getLoc();
4861 if (getParser().parseExpression(IdVal))
4862 return true;
4863 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4864 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4865 return false;
4866 }
4867 case AsmToken::Integer:
4868 case AsmToken::Real:
4869 case AsmToken::Hash: {
4870 // #42 -> immediate.
4871 S = getLoc();
4872
4873 parseOptionalToken(AsmToken::Hash);
4874
4875 // Parse a negative sign
4876 bool isNegative = false;
4877 if (getTok().is(AsmToken::Minus)) {
4878 isNegative = true;
4879 // We need to consume this token only when we have a Real, otherwise
4880 // we let parseSymbolicImmVal take care of it
4881 if (Parser.getLexer().peekTok().is(AsmToken::Real))