LLVM 18.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
39#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSymbol.h"
43#include "llvm/MC/MCValue.h"
49#include "llvm/Support/SMLoc.h"
53#include <cassert>
54#include <cctype>
55#include <cstdint>
56#include <cstdio>
57#include <optional>
58#include <string>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65namespace {
66
67enum class RegKind {
68 Scalar,
69 NeonVector,
70 SVEDataVector,
71 SVEPredicateAsCounter,
72 SVEPredicateVector,
73 Matrix,
74 LookupTable
75};
76
77enum class MatrixKind { Array, Tile, Row, Col };
78
79enum RegConstraintEqualityTy {
80 EqualsReg,
81 EqualsSuperReg,
82 EqualsSubReg
83};
84
85class AArch64AsmParser : public MCTargetAsmParser {
86private:
87 StringRef Mnemonic; ///< Instruction mnemonic.
88
89 // Map of register aliases registers via the .req directive.
91
92 class PrefixInfo {
93 public:
94 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
95 PrefixInfo Prefix;
96 switch (Inst.getOpcode()) {
97 case AArch64::MOVPRFX_ZZ:
98 Prefix.Active = true;
99 Prefix.Dst = Inst.getOperand(0).getReg();
100 break;
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
105 Prefix.Active = true;
106 Prefix.Predicated = true;
108 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
109 "No destructive element size set for movprfx");
110 Prefix.Dst = Inst.getOperand(0).getReg();
111 Prefix.Pg = Inst.getOperand(2).getReg();
112 break;
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
117 Prefix.Active = true;
118 Prefix.Predicated = true;
120 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
121 "No destructive element size set for movprfx");
122 Prefix.Dst = Inst.getOperand(0).getReg();
123 Prefix.Pg = Inst.getOperand(1).getReg();
124 break;
125 default:
126 break;
127 }
128
129 return Prefix;
130 }
131
132 PrefixInfo() = default;
133 bool isActive() const { return Active; }
134 bool isPredicated() const { return Predicated; }
135 unsigned getElementSize() const {
136 assert(Predicated);
137 return ElementSize;
138 }
139 unsigned getDstReg() const { return Dst; }
140 unsigned getPgReg() const {
141 assert(Predicated);
142 return Pg;
143 }
144
145 private:
146 bool Active = false;
147 bool Predicated = false;
148 unsigned ElementSize;
149 unsigned Dst;
150 unsigned Pg;
151 } NextPrefix;
152
153 AArch64TargetStreamer &getTargetStreamer() {
155 return static_cast<AArch64TargetStreamer &>(TS);
156 }
157
158 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
159
160 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
161 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
163 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
164 std::string &Suggestion);
165 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
166 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
168 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
169 bool parseNeonVectorList(OperandVector &Operands);
170 bool parseOptionalMulOperand(OperandVector &Operands);
171 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
172 bool parseKeywordOperand(OperandVector &Operands);
173 bool parseOperand(OperandVector &Operands, bool isCondCode,
174 bool invertCondCode);
175 bool parseImmExpr(int64_t &Out);
176 bool parseComma();
177 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
178 unsigned Last);
179
180 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
182
183 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
184
185 bool parseDirectiveArch(SMLoc L);
186 bool parseDirectiveArchExtension(SMLoc L);
187 bool parseDirectiveCPU(SMLoc L);
188 bool parseDirectiveInst(SMLoc L);
189
190 bool parseDirectiveTLSDescCall(SMLoc L);
191
192 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
193 bool parseDirectiveLtorg(SMLoc L);
194
195 bool parseDirectiveReq(StringRef Name, SMLoc L);
196 bool parseDirectiveUnreq(SMLoc L);
197 bool parseDirectiveCFINegateRAState();
198 bool parseDirectiveCFIBKeyFrame();
199 bool parseDirectiveCFIMTETaggedFrame();
200
201 bool parseDirectiveVariantPCS(SMLoc L);
202
203 bool parseDirectiveSEHAllocStack(SMLoc L);
204 bool parseDirectiveSEHPrologEnd(SMLoc L);
205 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
206 bool parseDirectiveSEHSaveFPLR(SMLoc L);
207 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
208 bool parseDirectiveSEHSaveReg(SMLoc L);
209 bool parseDirectiveSEHSaveRegX(SMLoc L);
210 bool parseDirectiveSEHSaveRegP(SMLoc L);
211 bool parseDirectiveSEHSaveRegPX(SMLoc L);
212 bool parseDirectiveSEHSaveLRPair(SMLoc L);
213 bool parseDirectiveSEHSaveFReg(SMLoc L);
214 bool parseDirectiveSEHSaveFRegX(SMLoc L);
215 bool parseDirectiveSEHSaveFRegP(SMLoc L);
216 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
217 bool parseDirectiveSEHSetFP(SMLoc L);
218 bool parseDirectiveSEHAddFP(SMLoc L);
219 bool parseDirectiveSEHNop(SMLoc L);
220 bool parseDirectiveSEHSaveNext(SMLoc L);
221 bool parseDirectiveSEHEpilogStart(SMLoc L);
222 bool parseDirectiveSEHEpilogEnd(SMLoc L);
223 bool parseDirectiveSEHTrapFrame(SMLoc L);
224 bool parseDirectiveSEHMachineFrame(SMLoc L);
225 bool parseDirectiveSEHContext(SMLoc L);
226 bool parseDirectiveSEHECContext(SMLoc L);
227 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
228 bool parseDirectiveSEHPACSignLR(SMLoc L);
229 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
230
231 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
233 unsigned getNumRegsForRegKind(RegKind K);
234 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
237 bool MatchingInlineAsm) override;
238/// @name Auto-generated Match Functions
239/// {
240
241#define GET_ASSEMBLER_HEADER
242#include "AArch64GenAsmMatcher.inc"
243
244 /// }
245
246 ParseStatus tryParseScalarRegister(MCRegister &Reg);
247 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
248 RegKind MatchKind);
249 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
250 ParseStatus tryParseSVCR(OperandVector &Operands);
251 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
252 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
253 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
254 ParseStatus tryParseSysReg(OperandVector &Operands);
255 ParseStatus tryParseSysCROperand(OperandVector &Operands);
256 template <bool IsSVEPrefetch = false>
257 ParseStatus tryParsePrefetch(OperandVector &Operands);
258 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
259 ParseStatus tryParsePSBHint(OperandVector &Operands);
260 ParseStatus tryParseBTIHint(OperandVector &Operands);
261 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
262 ParseStatus tryParseAdrLabel(OperandVector &Operands);
263 template <bool AddFPZeroAsLiteral>
264 ParseStatus tryParseFPImm(OperandVector &Operands);
265 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
266 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
267 bool tryParseNeonVectorRegister(OperandVector &Operands);
268 ParseStatus tryParseVectorIndex(OperandVector &Operands);
269 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
270 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
271 template <bool ParseShiftExtend,
272 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
273 ParseStatus tryParseGPROperand(OperandVector &Operands);
274 ParseStatus tryParseZTOperand(OperandVector &Operands);
275 template <bool ParseShiftExtend, bool ParseSuffix>
276 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
277 template <RegKind RK>
278 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
279 template <RegKind VectorKind>
280 ParseStatus tryParseVectorList(OperandVector &Operands,
281 bool ExpectMatch = false);
282 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
283 ParseStatus tryParseSVEPattern(OperandVector &Operands);
284 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
285 ParseStatus tryParseGPR64x8(OperandVector &Operands);
286 ParseStatus tryParseImmRange(OperandVector &Operands);
287
288public:
289 enum AArch64MatchResultTy {
290 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
291#define GET_OPERAND_DIAGNOSTIC_TYPES
292#include "AArch64GenAsmMatcher.inc"
293 };
294 bool IsILP32;
295
296 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
297 const MCInstrInfo &MII, const MCTargetOptions &Options)
298 : MCTargetAsmParser(Options, STI, MII) {
302 if (S.getTargetStreamer() == nullptr)
304
305 // Alias .hword/.word/.[dx]word to the target-independent
306 // .2byte/.4byte/.8byte directives as they have the same form and
307 // semantics:
308 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
309 Parser.addAliasForDirective(".hword", ".2byte");
310 Parser.addAliasForDirective(".word", ".4byte");
311 Parser.addAliasForDirective(".dword", ".8byte");
312 Parser.addAliasForDirective(".xword", ".8byte");
313
314 // Initialize the set of available features.
315 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
316 }
317
318 bool areEqualRegs(const MCParsedAsmOperand &Op1,
319 const MCParsedAsmOperand &Op2) const override;
321 SMLoc NameLoc, OperandVector &Operands) override;
322 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
324 SMLoc &EndLoc) override;
325 bool ParseDirective(AsmToken DirectiveID) override;
327 unsigned Kind) override;
328
329 bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
330
331 static bool classifySymbolRef(const MCExpr *Expr,
332 AArch64MCExpr::VariantKind &ELFRefKind,
333 MCSymbolRefExpr::VariantKind &DarwinRefKind,
334 int64_t &Addend);
335};
336
337/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
338/// instruction.
339class AArch64Operand : public MCParsedAsmOperand {
340private:
341 enum KindTy {
342 k_Immediate,
343 k_ShiftedImm,
344 k_ImmRange,
345 k_CondCode,
346 k_Register,
347 k_MatrixRegister,
348 k_MatrixTileList,
349 k_SVCR,
350 k_VectorList,
351 k_VectorIndex,
352 k_Token,
353 k_SysReg,
354 k_SysCR,
355 k_Prefetch,
356 k_ShiftExtend,
357 k_FPImm,
358 k_Barrier,
359 k_PSBHint,
360 k_BTIHint,
361 } Kind;
362
363 SMLoc StartLoc, EndLoc;
364
365 struct TokOp {
366 const char *Data;
367 unsigned Length;
368 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
369 };
370
371 // Separate shift/extend operand.
372 struct ShiftExtendOp {
374 unsigned Amount;
375 bool HasExplicitAmount;
376 };
377
378 struct RegOp {
379 unsigned RegNum;
380 RegKind Kind;
381 int ElementWidth;
382
383 // The register may be allowed as a different register class,
384 // e.g. for GPR64as32 or GPR32as64.
385 RegConstraintEqualityTy EqualityTy;
386
387 // In some cases the shift/extend needs to be explicitly parsed together
388 // with the register, rather than as a separate operand. This is needed
389 // for addressing modes where the instruction as a whole dictates the
390 // scaling/extend, rather than specific bits in the instruction.
391 // By parsing them as a single operand, we avoid the need to pass an
392 // extra operand in all CodeGen patterns (because all operands need to
393 // have an associated value), and we avoid the need to update TableGen to
394 // accept operands that have no associated bits in the instruction.
395 //
396 // An added benefit of parsing them together is that the assembler
397 // can give a sensible diagnostic if the scaling is not correct.
398 //
399 // The default is 'lsl #0' (HasExplicitAmount = false) if no
400 // ShiftExtend is specified.
401 ShiftExtendOp ShiftExtend;
402 };
403
404 struct MatrixRegOp {
405 unsigned RegNum;
406 unsigned ElementWidth;
407 MatrixKind Kind;
408 };
409
410 struct MatrixTileListOp {
411 unsigned RegMask = 0;
412 };
413
414 struct VectorListOp {
415 unsigned RegNum;
416 unsigned Count;
417 unsigned Stride;
418 unsigned NumElements;
419 unsigned ElementWidth;
420 RegKind RegisterKind;
421 };
422
423 struct VectorIndexOp {
424 int Val;
425 };
426
427 struct ImmOp {
428 const MCExpr *Val;
429 };
430
431 struct ShiftedImmOp {
432 const MCExpr *Val;
433 unsigned ShiftAmount;
434 };
435
436 struct ImmRangeOp {
437 unsigned First;
438 unsigned Last;
439 };
440
441 struct CondCodeOp {
443 };
444
445 struct FPImmOp {
446 uint64_t Val; // APFloat value bitcasted to uint64_t.
447 bool IsExact; // describes whether parsed value was exact.
448 };
449
450 struct BarrierOp {
451 const char *Data;
452 unsigned Length;
453 unsigned Val; // Not the enum since not all values have names.
454 bool HasnXSModifier;
455 };
456
457 struct SysRegOp {
458 const char *Data;
459 unsigned Length;
460 uint32_t MRSReg;
461 uint32_t MSRReg;
462 uint32_t PStateField;
463 };
464
465 struct SysCRImmOp {
466 unsigned Val;
467 };
468
469 struct PrefetchOp {
470 const char *Data;
471 unsigned Length;
472 unsigned Val;
473 };
474
475 struct PSBHintOp {
476 const char *Data;
477 unsigned Length;
478 unsigned Val;
479 };
480
481 struct BTIHintOp {
482 const char *Data;
483 unsigned Length;
484 unsigned Val;
485 };
486
487 struct SVCROp {
488 const char *Data;
489 unsigned Length;
490 unsigned PStateField;
491 };
492
493 union {
494 struct TokOp Tok;
495 struct RegOp Reg;
496 struct MatrixRegOp MatrixReg;
497 struct MatrixTileListOp MatrixTileList;
498 struct VectorListOp VectorList;
499 struct VectorIndexOp VectorIndex;
500 struct ImmOp Imm;
501 struct ShiftedImmOp ShiftedImm;
502 struct ImmRangeOp ImmRange;
503 struct CondCodeOp CondCode;
504 struct FPImmOp FPImm;
505 struct BarrierOp Barrier;
506 struct SysRegOp SysReg;
507 struct SysCRImmOp SysCRImm;
508 struct PrefetchOp Prefetch;
509 struct PSBHintOp PSBHint;
510 struct BTIHintOp BTIHint;
511 struct ShiftExtendOp ShiftExtend;
512 struct SVCROp SVCR;
513 };
514
515 // Keep the MCContext around as the MCExprs may need manipulated during
516 // the add<>Operands() calls.
517 MCContext &Ctx;
518
519public:
520 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
521
522 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
523 Kind = o.Kind;
524 StartLoc = o.StartLoc;
525 EndLoc = o.EndLoc;
526 switch (Kind) {
527 case k_Token:
528 Tok = o.Tok;
529 break;
530 case k_Immediate:
531 Imm = o.Imm;
532 break;
533 case k_ShiftedImm:
534 ShiftedImm = o.ShiftedImm;
535 break;
536 case k_ImmRange:
537 ImmRange = o.ImmRange;
538 break;
539 case k_CondCode:
540 CondCode = o.CondCode;
541 break;
542 case k_FPImm:
543 FPImm = o.FPImm;
544 break;
545 case k_Barrier:
546 Barrier = o.Barrier;
547 break;
548 case k_Register:
549 Reg = o.Reg;
550 break;
551 case k_MatrixRegister:
552 MatrixReg = o.MatrixReg;
553 break;
554 case k_MatrixTileList:
555 MatrixTileList = o.MatrixTileList;
556 break;
557 case k_VectorList:
558 VectorList = o.VectorList;
559 break;
560 case k_VectorIndex:
561 VectorIndex = o.VectorIndex;
562 break;
563 case k_SysReg:
564 SysReg = o.SysReg;
565 break;
566 case k_SysCR:
567 SysCRImm = o.SysCRImm;
568 break;
569 case k_Prefetch:
570 Prefetch = o.Prefetch;
571 break;
572 case k_PSBHint:
573 PSBHint = o.PSBHint;
574 break;
575 case k_BTIHint:
576 BTIHint = o.BTIHint;
577 break;
578 case k_ShiftExtend:
579 ShiftExtend = o.ShiftExtend;
580 break;
581 case k_SVCR:
582 SVCR = o.SVCR;
583 break;
584 }
585 }
586
587 /// getStartLoc - Get the location of the first token of this operand.
588 SMLoc getStartLoc() const override { return StartLoc; }
589 /// getEndLoc - Get the location of the last token of this operand.
590 SMLoc getEndLoc() const override { return EndLoc; }
591
592 StringRef getToken() const {
593 assert(Kind == k_Token && "Invalid access!");
594 return StringRef(Tok.Data, Tok.Length);
595 }
596
597 bool isTokenSuffix() const {
598 assert(Kind == k_Token && "Invalid access!");
599 return Tok.IsSuffix;
600 }
601
602 const MCExpr *getImm() const {
603 assert(Kind == k_Immediate && "Invalid access!");
604 return Imm.Val;
605 }
606
607 const MCExpr *getShiftedImmVal() const {
608 assert(Kind == k_ShiftedImm && "Invalid access!");
609 return ShiftedImm.Val;
610 }
611
612 unsigned getShiftedImmShift() const {
613 assert(Kind == k_ShiftedImm && "Invalid access!");
614 return ShiftedImm.ShiftAmount;
615 }
616
617 unsigned getFirstImmVal() const {
618 assert(Kind == k_ImmRange && "Invalid access!");
619 return ImmRange.First;
620 }
621
622 unsigned getLastImmVal() const {
623 assert(Kind == k_ImmRange && "Invalid access!");
624 return ImmRange.Last;
625 }
626
628 assert(Kind == k_CondCode && "Invalid access!");
629 return CondCode.Code;
630 }
631
632 APFloat getFPImm() const {
633 assert (Kind == k_FPImm && "Invalid access!");
634 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
635 }
636
637 bool getFPImmIsExact() const {
638 assert (Kind == k_FPImm && "Invalid access!");
639 return FPImm.IsExact;
640 }
641
642 unsigned getBarrier() const {
643 assert(Kind == k_Barrier && "Invalid access!");
644 return Barrier.Val;
645 }
646
647 StringRef getBarrierName() const {
648 assert(Kind == k_Barrier && "Invalid access!");
649 return StringRef(Barrier.Data, Barrier.Length);
650 }
651
652 bool getBarriernXSModifier() const {
653 assert(Kind == k_Barrier && "Invalid access!");
654 return Barrier.HasnXSModifier;
655 }
656
657 unsigned getReg() const override {
658 assert(Kind == k_Register && "Invalid access!");
659 return Reg.RegNum;
660 }
661
662 unsigned getMatrixReg() const {
663 assert(Kind == k_MatrixRegister && "Invalid access!");
664 return MatrixReg.RegNum;
665 }
666
667 unsigned getMatrixElementWidth() const {
668 assert(Kind == k_MatrixRegister && "Invalid access!");
669 return MatrixReg.ElementWidth;
670 }
671
672 MatrixKind getMatrixKind() const {
673 assert(Kind == k_MatrixRegister && "Invalid access!");
674 return MatrixReg.Kind;
675 }
676
677 unsigned getMatrixTileListRegMask() const {
678 assert(isMatrixTileList() && "Invalid access!");
679 return MatrixTileList.RegMask;
680 }
681
682 RegConstraintEqualityTy getRegEqualityTy() const {
683 assert(Kind == k_Register && "Invalid access!");
684 return Reg.EqualityTy;
685 }
686
687 unsigned getVectorListStart() const {
688 assert(Kind == k_VectorList && "Invalid access!");
689 return VectorList.RegNum;
690 }
691
692 unsigned getVectorListCount() const {
693 assert(Kind == k_VectorList && "Invalid access!");
694 return VectorList.Count;
695 }
696
697 unsigned getVectorListStride() const {
698 assert(Kind == k_VectorList && "Invalid access!");
699 return VectorList.Stride;
700 }
701
702 int getVectorIndex() const {
703 assert(Kind == k_VectorIndex && "Invalid access!");
704 return VectorIndex.Val;
705 }
706
707 StringRef getSysReg() const {
708 assert(Kind == k_SysReg && "Invalid access!");
709 return StringRef(SysReg.Data, SysReg.Length);
710 }
711
712 unsigned getSysCR() const {
713 assert(Kind == k_SysCR && "Invalid access!");
714 return SysCRImm.Val;
715 }
716
717 unsigned getPrefetch() const {
718 assert(Kind == k_Prefetch && "Invalid access!");
719 return Prefetch.Val;
720 }
721
722 unsigned getPSBHint() const {
723 assert(Kind == k_PSBHint && "Invalid access!");
724 return PSBHint.Val;
725 }
726
727 StringRef getPSBHintName() const {
728 assert(Kind == k_PSBHint && "Invalid access!");
729 return StringRef(PSBHint.Data, PSBHint.Length);
730 }
731
732 unsigned getBTIHint() const {
733 assert(Kind == k_BTIHint && "Invalid access!");
734 return BTIHint.Val;
735 }
736
737 StringRef getBTIHintName() const {
738 assert(Kind == k_BTIHint && "Invalid access!");
739 return StringRef(BTIHint.Data, BTIHint.Length);
740 }
741
742 StringRef getSVCR() const {
743 assert(Kind == k_SVCR && "Invalid access!");
744 return StringRef(SVCR.Data, SVCR.Length);
745 }
746
747 StringRef getPrefetchName() const {
748 assert(Kind == k_Prefetch && "Invalid access!");
749 return StringRef(Prefetch.Data, Prefetch.Length);
750 }
751
752 AArch64_AM::ShiftExtendType getShiftExtendType() const {
753 if (Kind == k_ShiftExtend)
754 return ShiftExtend.Type;
755 if (Kind == k_Register)
756 return Reg.ShiftExtend.Type;
757 llvm_unreachable("Invalid access!");
758 }
759
760 unsigned getShiftExtendAmount() const {
761 if (Kind == k_ShiftExtend)
762 return ShiftExtend.Amount;
763 if (Kind == k_Register)
764 return Reg.ShiftExtend.Amount;
765 llvm_unreachable("Invalid access!");
766 }
767
768 bool hasShiftExtendAmount() const {
769 if (Kind == k_ShiftExtend)
770 return ShiftExtend.HasExplicitAmount;
771 if (Kind == k_Register)
772 return Reg.ShiftExtend.HasExplicitAmount;
773 llvm_unreachable("Invalid access!");
774 }
775
776 bool isImm() const override { return Kind == k_Immediate; }
777 bool isMem() const override { return false; }
778
779 bool isUImm6() const {
780 if (!isImm())
781 return false;
782 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
783 if (!MCE)
784 return false;
785 int64_t Val = MCE->getValue();
786 return (Val >= 0 && Val < 64);
787 }
788
789 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
790
791 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
792 return isImmScaled<Bits, Scale>(true);
793 }
794
795 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
796 DiagnosticPredicate isUImmScaled() const {
797 if (IsRange && isImmRange() &&
798 (getLastImmVal() != getFirstImmVal() + Offset))
799 return DiagnosticPredicateTy::NoMatch;
800
801 return isImmScaled<Bits, Scale, IsRange>(false);
802 }
803
804 template <int Bits, int Scale, bool IsRange = false>
805 DiagnosticPredicate isImmScaled(bool Signed) const {
806 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
807 (isImmRange() && !IsRange))
808 return DiagnosticPredicateTy::NoMatch;
809
810 int64_t Val;
811 if (isImmRange())
812 Val = getFirstImmVal();
813 else {
814 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
815 if (!MCE)
816 return DiagnosticPredicateTy::NoMatch;
817 Val = MCE->getValue();
818 }
819
820 int64_t MinVal, MaxVal;
821 if (Signed) {
822 int64_t Shift = Bits - 1;
823 MinVal = (int64_t(1) << Shift) * -Scale;
824 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
825 } else {
826 MinVal = 0;
827 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
828 }
829
830 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
831 return DiagnosticPredicateTy::Match;
832
833 return DiagnosticPredicateTy::NearMatch;
834 }
835
836 DiagnosticPredicate isSVEPattern() const {
837 if (!isImm())
838 return DiagnosticPredicateTy::NoMatch;
839 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
840 if (!MCE)
841 return DiagnosticPredicateTy::NoMatch;
842 int64_t Val = MCE->getValue();
843 if (Val >= 0 && Val < 32)
844 return DiagnosticPredicateTy::Match;
845 return DiagnosticPredicateTy::NearMatch;
846 }
847
848 DiagnosticPredicate isSVEVecLenSpecifier() const {
849 if (!isImm())
850 return DiagnosticPredicateTy::NoMatch;
851 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
852 if (!MCE)
853 return DiagnosticPredicateTy::NoMatch;
854 int64_t Val = MCE->getValue();
855 if (Val >= 0 && Val <= 1)
856 return DiagnosticPredicateTy::Match;
857 return DiagnosticPredicateTy::NearMatch;
858 }
859
860 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
862 MCSymbolRefExpr::VariantKind DarwinRefKind;
863 int64_t Addend;
864 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
865 Addend)) {
866 // If we don't understand the expression, assume the best and
867 // let the fixup and relocation code deal with it.
868 return true;
869 }
870
871 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
872 ELFRefKind == AArch64MCExpr::VK_LO12 ||
873 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
874 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
875 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
876 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
877 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
879 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
880 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
881 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
882 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
883 // Note that we don't range-check the addend. It's adjusted modulo page
884 // size when converted, so there is no "out of range" condition when using
885 // @pageoff.
886 return true;
887 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
888 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
889 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
890 return Addend == 0;
891 }
892
893 return false;
894 }
895
896 template <int Scale> bool isUImm12Offset() const {
897 if (!isImm())
898 return false;
899
900 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
901 if (!MCE)
902 return isSymbolicUImm12Offset(getImm());
903
904 int64_t Val = MCE->getValue();
905 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
906 }
907
908 template <int N, int M>
909 bool isImmInRange() const {
910 if (!isImm())
911 return false;
912 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
913 if (!MCE)
914 return false;
915 int64_t Val = MCE->getValue();
916 return (Val >= N && Val <= M);
917 }
918
919 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
920 // a logical immediate can always be represented when inverted.
921 template <typename T>
922 bool isLogicalImm() const {
923 if (!isImm())
924 return false;
925 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
926 if (!MCE)
927 return false;
928
929 int64_t Val = MCE->getValue();
930 // Avoid left shift by 64 directly.
931 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
932 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
933 if ((Val & Upper) && (Val & Upper) != Upper)
934 return false;
935
936 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
937 }
938
939 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
940
941 bool isImmRange() const { return Kind == k_ImmRange; }
942
943 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
944 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
945 /// immediate that can be shifted by 'Shift'.
946 template <unsigned Width>
947 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
948 if (isShiftedImm() && Width == getShiftedImmShift())
949 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
950 return std::make_pair(CE->getValue(), Width);
951
952 if (isImm())
953 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
954 int64_t Val = CE->getValue();
955 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
956 return std::make_pair(Val >> Width, Width);
957 else
958 return std::make_pair(Val, 0u);
959 }
960
961 return {};
962 }
963
964 bool isAddSubImm() const {
965 if (!isShiftedImm() && !isImm())
966 return false;
967
968 const MCExpr *Expr;
969
970 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
971 if (isShiftedImm()) {
972 unsigned Shift = ShiftedImm.ShiftAmount;
973 Expr = ShiftedImm.Val;
974 if (Shift != 0 && Shift != 12)
975 return false;
976 } else {
977 Expr = getImm();
978 }
979
981 MCSymbolRefExpr::VariantKind DarwinRefKind;
982 int64_t Addend;
983 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
984 DarwinRefKind, Addend)) {
985 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
986 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
987 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
988 || ELFRefKind == AArch64MCExpr::VK_LO12
989 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
990 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
991 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
992 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
993 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
994 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
995 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
996 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
997 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
998 }
999
1000 // If it's a constant, it should be a real immediate in range.
1001 if (auto ShiftedVal = getShiftedVal<12>())
1002 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1003
1004 // If it's an expression, we hope for the best and let the fixup/relocation
1005 // code deal with it.
1006 return true;
1007 }
1008
1009 bool isAddSubImmNeg() const {
1010 if (!isShiftedImm() && !isImm())
1011 return false;
1012
1013 // Otherwise it should be a real negative immediate in range.
1014 if (auto ShiftedVal = getShiftedVal<12>())
1015 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1016
1017 return false;
1018 }
1019
1020 // Signed value in the range -128 to +127. For element widths of
1021 // 16 bits or higher it may also be a signed multiple of 256 in the
1022 // range -32768 to +32512.
1023 // For element-width of 8 bits a range of -128 to 255 is accepted,
1024 // since a copy of a byte can be either signed/unsigned.
1025 template <typename T>
1026 DiagnosticPredicate isSVECpyImm() const {
1027 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1028 return DiagnosticPredicateTy::NoMatch;
1029
1030 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1031 std::is_same<int8_t, T>::value;
1032 if (auto ShiftedImm = getShiftedVal<8>())
1033 if (!(IsByte && ShiftedImm->second) &&
1034 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1035 << ShiftedImm->second))
1036 return DiagnosticPredicateTy::Match;
1037
1038 return DiagnosticPredicateTy::NearMatch;
1039 }
1040
1041 // Unsigned value in the range 0 to 255. For element widths of
1042 // 16 bits or higher it may also be a signed multiple of 256 in the
1043 // range 0 to 65280.
1044 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1045 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1046 return DiagnosticPredicateTy::NoMatch;
1047
1048 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1049 std::is_same<int8_t, T>::value;
1050 if (auto ShiftedImm = getShiftedVal<8>())
1051 if (!(IsByte && ShiftedImm->second) &&
1052 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1053 << ShiftedImm->second))
1054 return DiagnosticPredicateTy::Match;
1055
1056 return DiagnosticPredicateTy::NearMatch;
1057 }
1058
1059 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1060 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1061 return DiagnosticPredicateTy::Match;
1062 return DiagnosticPredicateTy::NoMatch;
1063 }
1064
1065 bool isCondCode() const { return Kind == k_CondCode; }
1066
1067 bool isSIMDImmType10() const {
1068 if (!isImm())
1069 return false;
1070 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1071 if (!MCE)
1072 return false;
1074 }
1075
1076 template<int N>
1077 bool isBranchTarget() const {
1078 if (!isImm())
1079 return false;
1080 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1081 if (!MCE)
1082 return true;
1083 int64_t Val = MCE->getValue();
1084 if (Val & 0x3)
1085 return false;
1086 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1087 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1088 }
1089
1090 bool
1091 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1092 if (!isImm())
1093 return false;
1094
1095 AArch64MCExpr::VariantKind ELFRefKind;
1096 MCSymbolRefExpr::VariantKind DarwinRefKind;
1097 int64_t Addend;
1098 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1099 DarwinRefKind, Addend)) {
1100 return false;
1101 }
1102 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1103 return false;
1104
1105 return llvm::is_contained(AllowedModifiers, ELFRefKind);
1106 }
1107
1108 bool isMovWSymbolG3() const {
1110 }
1111
1112 bool isMovWSymbolG2() const {
1113 return isMovWSymbol(
1118 }
1119
1120 bool isMovWSymbolG1() const {
1121 return isMovWSymbol(
1127 }
1128
1129 bool isMovWSymbolG0() const {
1130 return isMovWSymbol(
1136 }
1137
1138 template<int RegWidth, int Shift>
1139 bool isMOVZMovAlias() const {
1140 if (!isImm()) return false;
1141
1142 const MCExpr *E = getImm();
1143 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1144 uint64_t Value = CE->getValue();
1145
1146 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1147 }
1148 // Only supports the case of Shift being 0 if an expression is used as an
1149 // operand
1150 return !Shift && E;
1151 }
1152
1153 template<int RegWidth, int Shift>
1154 bool isMOVNMovAlias() const {
1155 if (!isImm()) return false;
1156
1157 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1158 if (!CE) return false;
1159 uint64_t Value = CE->getValue();
1160
1161 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1162 }
1163
1164 bool isFPImm() const {
1165 return Kind == k_FPImm &&
1166 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1167 }
1168
1169 bool isBarrier() const {
1170 return Kind == k_Barrier && !getBarriernXSModifier();
1171 }
1172 bool isBarriernXS() const {
1173 return Kind == k_Barrier && getBarriernXSModifier();
1174 }
1175 bool isSysReg() const { return Kind == k_SysReg; }
1176
1177 bool isMRSSystemRegister() const {
1178 if (!isSysReg()) return false;
1179
1180 return SysReg.MRSReg != -1U;
1181 }
1182
1183 bool isMSRSystemRegister() const {
1184 if (!isSysReg()) return false;
1185 return SysReg.MSRReg != -1U;
1186 }
1187
1188 bool isSystemPStateFieldWithImm0_1() const {
1189 if (!isSysReg()) return false;
1190 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1191 }
1192
1193 bool isSystemPStateFieldWithImm0_15() const {
1194 if (!isSysReg())
1195 return false;
1196 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1197 }
1198
1199 bool isSVCR() const {
1200 if (Kind != k_SVCR)
1201 return false;
1202 return SVCR.PStateField != -1U;
1203 }
1204
1205 bool isReg() const override {
1206 return Kind == k_Register;
1207 }
1208
1209 bool isVectorList() const { return Kind == k_VectorList; }
1210
1211 bool isScalarReg() const {
1212 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1213 }
1214
1215 bool isNeonVectorReg() const {
1216 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1217 }
1218
1219 bool isNeonVectorRegLo() const {
1220 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1221 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1222 Reg.RegNum) ||
1223 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1224 Reg.RegNum));
1225 }
1226
1227 bool isNeonVectorReg0to7() const {
1228 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1229 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1230 Reg.RegNum));
1231 }
1232
1233 bool isMatrix() const { return Kind == k_MatrixRegister; }
1234 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1235
1236 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1237 RegKind RK;
1238 switch (Class) {
1239 case AArch64::PPRRegClassID:
1240 case AArch64::PPR_3bRegClassID:
1241 case AArch64::PPR_p8to15RegClassID:
1242 case AArch64::PNRRegClassID:
1243 case AArch64::PNR_p8to15RegClassID:
1244 RK = RegKind::SVEPredicateAsCounter;
1245 break;
1246 default:
1247 llvm_unreachable("Unsupport register class");
1248 }
1249
1250 return (Kind == k_Register && Reg.Kind == RK) &&
1251 AArch64MCRegisterClasses[Class].contains(getReg());
1252 }
1253
1254 template <unsigned Class> bool isSVEVectorReg() const {
1255 RegKind RK;
1256 switch (Class) {
1257 case AArch64::ZPRRegClassID:
1258 case AArch64::ZPR_3bRegClassID:
1259 case AArch64::ZPR_4bRegClassID:
1260 RK = RegKind::SVEDataVector;
1261 break;
1262 case AArch64::PPRRegClassID:
1263 case AArch64::PPR_3bRegClassID:
1264 case AArch64::PPR_p8to15RegClassID:
1265 case AArch64::PNRRegClassID:
1266 case AArch64::PNR_p8to15RegClassID:
1267 RK = RegKind::SVEPredicateVector;
1268 break;
1269 default:
1270 llvm_unreachable("Unsupport register class");
1271 }
1272
1273 return (Kind == k_Register && Reg.Kind == RK) &&
1274 AArch64MCRegisterClasses[Class].contains(getReg());
1275 }
1276
1277 template <unsigned Class> bool isFPRasZPR() const {
1278 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1279 AArch64MCRegisterClasses[Class].contains(getReg());
1280 }
1281
1282 template <int ElementWidth, unsigned Class>
1283 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1284 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1285 return DiagnosticPredicateTy::NoMatch;
1286
1287 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1288 return DiagnosticPredicateTy::Match;
1289
1290 return DiagnosticPredicateTy::NearMatch;
1291 }
1292
1293 template <int ElementWidth, unsigned Class>
1294 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1295 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1296 return DiagnosticPredicateTy::NoMatch;
1297
1298 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1299 return DiagnosticPredicateTy::Match;
1300
1301 return DiagnosticPredicateTy::NearMatch;
1302 }
1303
1304 template <int ElementWidth, unsigned Class>
1305 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1306 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1307 return DiagnosticPredicateTy::NoMatch;
1308
1309 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1310 return DiagnosticPredicateTy::Match;
1311
1312 return DiagnosticPredicateTy::NearMatch;
1313 }
1314
1315 template <int ElementWidth, unsigned Class,
1316 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1317 bool ShiftWidthAlwaysSame>
1318 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1319 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1320 if (!VectorMatch.isMatch())
1321 return DiagnosticPredicateTy::NoMatch;
1322
1323 // Give a more specific diagnostic when the user has explicitly typed in
1324 // a shift-amount that does not match what is expected, but for which
1325 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1326 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1327 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1328 ShiftExtendTy == AArch64_AM::SXTW) &&
1329 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1330 return DiagnosticPredicateTy::NoMatch;
1331
1332 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1333 return DiagnosticPredicateTy::Match;
1334
1335 return DiagnosticPredicateTy::NearMatch;
1336 }
1337
1338 bool isGPR32as64() const {
1339 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1340 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1341 }
1342
1343 bool isGPR64as32() const {
1344 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1345 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1346 }
1347
1348 bool isGPR64x8() const {
1349 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1350 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1351 Reg.RegNum);
1352 }
1353
1354 bool isWSeqPair() const {
1355 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1356 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1357 Reg.RegNum);
1358 }
1359
1360 bool isXSeqPair() const {
1361 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1362 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1363 Reg.RegNum);
1364 }
1365
1366 bool isSyspXzrPair() const {
1367 return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1368 }
1369
1370 template<int64_t Angle, int64_t Remainder>
1371 DiagnosticPredicate isComplexRotation() const {
1372 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1373
1374 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1375 if (!CE) return DiagnosticPredicateTy::NoMatch;
1376 uint64_t Value = CE->getValue();
1377
1378 if (Value % Angle == Remainder && Value <= 270)
1379 return DiagnosticPredicateTy::Match;
1380 return DiagnosticPredicateTy::NearMatch;
1381 }
1382
1383 template <unsigned RegClassID> bool isGPR64() const {
1384 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1385 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1386 }
1387
1388 template <unsigned RegClassID, int ExtWidth>
1389 DiagnosticPredicate isGPR64WithShiftExtend() const {
1390 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1391 return DiagnosticPredicateTy::NoMatch;
1392
1393 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1394 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1395 return DiagnosticPredicateTy::Match;
1396 return DiagnosticPredicateTy::NearMatch;
1397 }
1398
1399 /// Is this a vector list with the type implicit (presumably attached to the
1400 /// instruction itself)?
1401 template <RegKind VectorKind, unsigned NumRegs>
1402 bool isImplicitlyTypedVectorList() const {
1403 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1404 VectorList.NumElements == 0 &&
1405 VectorList.RegisterKind == VectorKind;
1406 }
1407
1408 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1409 unsigned ElementWidth, unsigned Stride = 1>
1410 bool isTypedVectorList() const {
1411 if (Kind != k_VectorList)
1412 return false;
1413 if (VectorList.Count != NumRegs)
1414 return false;
1415 if (VectorList.RegisterKind != VectorKind)
1416 return false;
1417 if (VectorList.ElementWidth != ElementWidth)
1418 return false;
1419 if (VectorList.Stride != Stride)
1420 return false;
1421 return VectorList.NumElements == NumElements;
1422 }
1423
1424 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1425 unsigned ElementWidth>
1426 DiagnosticPredicate isTypedVectorListMultiple() const {
1427 bool Res =
1428 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1429 if (!Res)
1430 return DiagnosticPredicateTy::NoMatch;
1431 if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0)
1432 return DiagnosticPredicateTy::NearMatch;
1433 return DiagnosticPredicateTy::Match;
1434 }
1435
1436 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1437 unsigned ElementWidth>
1438 DiagnosticPredicate isTypedVectorListStrided() const {
1439 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1440 ElementWidth, Stride>();
1441 if (!Res)
1442 return DiagnosticPredicateTy::NoMatch;
1443 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1444 ((VectorList.RegNum >= AArch64::Z16) &&
1445 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1446 return DiagnosticPredicateTy::Match;
1447 return DiagnosticPredicateTy::NoMatch;
1448 }
1449
1450 template <int Min, int Max>
1451 DiagnosticPredicate isVectorIndex() const {
1452 if (Kind != k_VectorIndex)
1453 return DiagnosticPredicateTy::NoMatch;
1454 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1455 return DiagnosticPredicateTy::Match;
1456 return DiagnosticPredicateTy::NearMatch;
1457 }
1458
1459 bool isToken() const override { return Kind == k_Token; }
1460
1461 bool isTokenEqual(StringRef Str) const {
1462 return Kind == k_Token && getToken() == Str;
1463 }
1464 bool isSysCR() const { return Kind == k_SysCR; }
1465 bool isPrefetch() const { return Kind == k_Prefetch; }
1466 bool isPSBHint() const { return Kind == k_PSBHint; }
1467 bool isBTIHint() const { return Kind == k_BTIHint; }
1468 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1469 bool isShifter() const {
1470 if (!isShiftExtend())
1471 return false;
1472
1473 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1474 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1475 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1476 ST == AArch64_AM::MSL);
1477 }
1478
1479 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1480 if (Kind != k_FPImm)
1481 return DiagnosticPredicateTy::NoMatch;
1482
1483 if (getFPImmIsExact()) {
1484 // Lookup the immediate from table of supported immediates.
1485 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1486 assert(Desc && "Unknown enum value");
1487
1488 // Calculate its FP value.
1489 APFloat RealVal(APFloat::IEEEdouble());
1490 auto StatusOrErr =
1491 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1492 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1493 llvm_unreachable("FP immediate is not exact");
1494
1495 if (getFPImm().bitwiseIsEqual(RealVal))
1496 return DiagnosticPredicateTy::Match;
1497 }
1498
1499 return DiagnosticPredicateTy::NearMatch;
1500 }
1501
1502 template <unsigned ImmA, unsigned ImmB>
1503 DiagnosticPredicate isExactFPImm() const {
1504 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1505 if ((Res = isExactFPImm<ImmA>()))
1506 return DiagnosticPredicateTy::Match;
1507 if ((Res = isExactFPImm<ImmB>()))
1508 return DiagnosticPredicateTy::Match;
1509 return Res;
1510 }
1511
1512 bool isExtend() const {
1513 if (!isShiftExtend())
1514 return false;
1515
1516 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1517 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1518 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1519 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1520 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1521 ET == AArch64_AM::LSL) &&
1522 getShiftExtendAmount() <= 4;
1523 }
1524
1525 bool isExtend64() const {
1526 if (!isExtend())
1527 return false;
1528 // Make sure the extend expects a 32-bit source register.
1529 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1530 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1531 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1532 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1533 }
1534
1535 bool isExtendLSL64() const {
1536 if (!isExtend())
1537 return false;
1538 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1539 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1540 ET == AArch64_AM::LSL) &&
1541 getShiftExtendAmount() <= 4;
1542 }
1543
1544 bool isLSLImm3Shift() const {
1545 if (!isShiftExtend())
1546 return false;
1547 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1548 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1549 }
1550
1551 template<int Width> bool isMemXExtend() const {
1552 if (!isExtend())
1553 return false;
1554 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1555 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1556 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1557 getShiftExtendAmount() == 0);
1558 }
1559
1560 template<int Width> bool isMemWExtend() const {
1561 if (!isExtend())
1562 return false;
1563 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1564 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1565 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1566 getShiftExtendAmount() == 0);
1567 }
1568
1569 template <unsigned width>
1570 bool isArithmeticShifter() const {
1571 if (!isShifter())
1572 return false;
1573
1574 // An arithmetic shifter is LSL, LSR, or ASR.
1575 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1576 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1577 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1578 }
1579
1580 template <unsigned width>
1581 bool isLogicalShifter() const {
1582 if (!isShifter())
1583 return false;
1584
1585 // A logical shifter is LSL, LSR, ASR or ROR.
1586 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1587 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1588 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1589 getShiftExtendAmount() < width;
1590 }
1591
1592 bool isMovImm32Shifter() const {
1593 if (!isShifter())
1594 return false;
1595
1596 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1597 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1598 if (ST != AArch64_AM::LSL)
1599 return false;
1600 uint64_t Val = getShiftExtendAmount();
1601 return (Val == 0 || Val == 16);
1602 }
1603
1604 bool isMovImm64Shifter() const {
1605 if (!isShifter())
1606 return false;
1607
1608 // A MOVi shifter is LSL of 0 or 16.
1609 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1610 if (ST != AArch64_AM::LSL)
1611 return false;
1612 uint64_t Val = getShiftExtendAmount();
1613 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1614 }
1615
1616 bool isLogicalVecShifter() const {
1617 if (!isShifter())
1618 return false;
1619
1620 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1621 unsigned Shift = getShiftExtendAmount();
1622 return getShiftExtendType() == AArch64_AM::LSL &&
1623 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1624 }
1625
1626 bool isLogicalVecHalfWordShifter() const {
1627 if (!isLogicalVecShifter())
1628 return false;
1629
1630 // A logical vector shifter is a left shift by 0 or 8.
1631 unsigned Shift = getShiftExtendAmount();
1632 return getShiftExtendType() == AArch64_AM::LSL &&
1633 (Shift == 0 || Shift == 8);
1634 }
1635
1636 bool isMoveVecShifter() const {
1637 if (!isShiftExtend())
1638 return false;
1639
1640 // A logical vector shifter is a left shift by 8 or 16.
1641 unsigned Shift = getShiftExtendAmount();
1642 return getShiftExtendType() == AArch64_AM::MSL &&
1643 (Shift == 8 || Shift == 16);
1644 }
1645
1646 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1647 // to LDUR/STUR when the offset is not legal for the former but is for
1648 // the latter. As such, in addition to checking for being a legal unscaled
1649 // address, also check that it is not a legal scaled address. This avoids
1650 // ambiguity in the matcher.
1651 template<int Width>
1652 bool isSImm9OffsetFB() const {
1653 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1654 }
1655
1656 bool isAdrpLabel() const {
1657 // Validation was handled during parsing, so we just verify that
1658 // something didn't go haywire.
1659 if (!isImm())
1660 return false;
1661
1662 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1663 int64_t Val = CE->getValue();
1664 int64_t Min = - (4096 * (1LL << (21 - 1)));
1665 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1666 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1667 }
1668
1669 return true;
1670 }
1671
1672 bool isAdrLabel() const {
1673 // Validation was handled during parsing, so we just verify that
1674 // something didn't go haywire.
1675 if (!isImm())
1676 return false;
1677
1678 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1679 int64_t Val = CE->getValue();
1680 int64_t Min = - (1LL << (21 - 1));
1681 int64_t Max = ((1LL << (21 - 1)) - 1);
1682 return Val >= Min && Val <= Max;
1683 }
1684
1685 return true;
1686 }
1687
1688 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1689 DiagnosticPredicate isMatrixRegOperand() const {
1690 if (!isMatrix())
1691 return DiagnosticPredicateTy::NoMatch;
1692 if (getMatrixKind() != Kind ||
1693 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1694 EltSize != getMatrixElementWidth())
1695 return DiagnosticPredicateTy::NearMatch;
1696 return DiagnosticPredicateTy::Match;
1697 }
1698
1699 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1700 // Add as immediates when possible. Null MCExpr = 0.
1701 if (!Expr)
1703 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1704 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1705 else
1707 }
1708
1709 void addRegOperands(MCInst &Inst, unsigned N) const {
1710 assert(N == 1 && "Invalid number of operands!");
1712 }
1713
1714 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1715 assert(N == 1 && "Invalid number of operands!");
1716 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1717 }
1718
1719 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1720 assert(N == 1 && "Invalid number of operands!");
1721 assert(
1722 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1723
1724 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1725 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1726 RI->getEncodingValue(getReg()));
1727
1729 }
1730
1731 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1732 assert(N == 1 && "Invalid number of operands!");
1733 assert(
1734 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1735
1736 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1737 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1738 RI->getEncodingValue(getReg()));
1739
1741 }
1742
1743 template <int Width>
1744 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1745 unsigned Base;
1746 switch (Width) {
1747 case 8: Base = AArch64::B0; break;
1748 case 16: Base = AArch64::H0; break;
1749 case 32: Base = AArch64::S0; break;
1750 case 64: Base = AArch64::D0; break;
1751 case 128: Base = AArch64::Q0; break;
1752 default:
1753 llvm_unreachable("Unsupported width");
1754 }
1755 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1756 }
1757
1758 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1759 assert(N == 1 && "Invalid number of operands!");
1760 Inst.addOperand(
1761 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1762 }
1763
1764 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1765 assert(N == 1 && "Invalid number of operands!");
1766 assert(
1767 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1768 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1769 }
1770
1771 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1772 assert(N == 1 && "Invalid number of operands!");
1773 assert(
1774 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1776 }
1777
1778 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1779 assert(N == 1 && "Invalid number of operands!");
1781 }
1782
1783 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1784 assert(N == 1 && "Invalid number of operands!");
1786 }
1787
1788 enum VecListIndexType {
1789 VecListIdx_DReg = 0,
1790 VecListIdx_QReg = 1,
1791 VecListIdx_ZReg = 2,
1792 VecListIdx_PReg = 3,
1793 };
1794
1795 template <VecListIndexType RegTy, unsigned NumRegs>
1796 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1797 assert(N == 1 && "Invalid number of operands!");
1798 static const unsigned FirstRegs[][5] = {
1799 /* DReg */ { AArch64::Q0,
1800 AArch64::D0, AArch64::D0_D1,
1801 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1802 /* QReg */ { AArch64::Q0,
1803 AArch64::Q0, AArch64::Q0_Q1,
1804 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1805 /* ZReg */ { AArch64::Z0,
1806 AArch64::Z0, AArch64::Z0_Z1,
1807 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1808 /* PReg */ { AArch64::P0,
1809 AArch64::P0, AArch64::P0_P1 }
1810 };
1811
1812 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1813 " NumRegs must be <= 4 for ZRegs");
1814
1815 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1816 " NumRegs must be <= 2 for PRegs");
1817
1818 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1819 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1820 FirstRegs[(unsigned)RegTy][0]));
1821 }
1822
1823 template <unsigned NumRegs>
1824 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1825 assert(N == 1 && "Invalid number of operands!");
1826 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1827
1828 switch (NumRegs) {
1829 case 2:
1830 if (getVectorListStart() < AArch64::Z16) {
1831 assert((getVectorListStart() < AArch64::Z8) &&
1832 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1834 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1835 } else {
1836 assert((getVectorListStart() < AArch64::Z24) &&
1837 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1839 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1840 }
1841 break;
1842 case 4:
1843 if (getVectorListStart() < AArch64::Z16) {
1844 assert((getVectorListStart() < AArch64::Z4) &&
1845 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1847 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1848 } else {
1849 assert((getVectorListStart() < AArch64::Z20) &&
1850 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1852 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1853 }
1854 break;
1855 default:
1856 llvm_unreachable("Unsupported number of registers for strided vec list");
1857 }
1858 }
1859
1860 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1861 assert(N == 1 && "Invalid number of operands!");
1862 unsigned RegMask = getMatrixTileListRegMask();
1863 assert(RegMask <= 0xFF && "Invalid mask!");
1865 }
1866
1867 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1868 assert(N == 1 && "Invalid number of operands!");
1869 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1870 }
1871
1872 template <unsigned ImmIs0, unsigned ImmIs1>
1873 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1874 assert(N == 1 && "Invalid number of operands!");
1875 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1876 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1877 }
1878
1879 void addImmOperands(MCInst &Inst, unsigned N) const {
1880 assert(N == 1 && "Invalid number of operands!");
1881 // If this is a pageoff symrefexpr with an addend, adjust the addend
1882 // to be only the page-offset portion. Otherwise, just add the expr
1883 // as-is.
1884 addExpr(Inst, getImm());
1885 }
1886
1887 template <int Shift>
1888 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1889 assert(N == 2 && "Invalid number of operands!");
1890 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1891 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1892 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1893 } else if (isShiftedImm()) {
1894 addExpr(Inst, getShiftedImmVal());
1895 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1896 } else {
1897 addExpr(Inst, getImm());
1899 }
1900 }
1901
1902 template <int Shift>
1903 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1904 assert(N == 2 && "Invalid number of operands!");
1905 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1906 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1907 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1908 } else
1909 llvm_unreachable("Not a shifted negative immediate");
1910 }
1911
1912 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1913 assert(N == 1 && "Invalid number of operands!");
1915 }
1916
1917 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1918 assert(N == 1 && "Invalid number of operands!");
1919 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1920 if (!MCE)
1921 addExpr(Inst, getImm());
1922 else
1923 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1924 }
1925
1926 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1927 addImmOperands(Inst, N);
1928 }
1929
1930 template<int Scale>
1931 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1932 assert(N == 1 && "Invalid number of operands!");
1933 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1934
1935 if (!MCE) {
1936 Inst.addOperand(MCOperand::createExpr(getImm()));
1937 return;
1938 }
1939 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1940 }
1941
1942 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1943 assert(N == 1 && "Invalid number of operands!");
1944 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1946 }
1947
1948 template <int Scale>
1949 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1950 assert(N == 1 && "Invalid number of operands!");
1951 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1952 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1953 }
1954
1955 template <int Scale>
1956 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
1957 assert(N == 1 && "Invalid number of operands!");
1958 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
1959 }
1960
1961 template <typename T>
1962 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1963 assert(N == 1 && "Invalid number of operands!");
1964 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1965 std::make_unsigned_t<T> Val = MCE->getValue();
1966 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1967 Inst.addOperand(MCOperand::createImm(encoding));
1968 }
1969
1970 template <typename T>
1971 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1972 assert(N == 1 && "Invalid number of operands!");
1973 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1974 std::make_unsigned_t<T> Val = ~MCE->getValue();
1975 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1976 Inst.addOperand(MCOperand::createImm(encoding));
1977 }
1978
1979 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1980 assert(N == 1 && "Invalid number of operands!");
1981 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1983 Inst.addOperand(MCOperand::createImm(encoding));
1984 }
1985
1986 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1987 // Branch operands don't encode the low bits, so shift them off
1988 // here. If it's a label, however, just put it on directly as there's
1989 // not enough information now to do anything.
1990 assert(N == 1 && "Invalid number of operands!");
1991 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1992 if (!MCE) {
1993 addExpr(Inst, getImm());
1994 return;
1995 }
1996 assert(MCE && "Invalid constant immediate operand!");
1997 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1998 }
1999
2000 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2001 // Branch operands don't encode the low bits, so shift them off
2002 // here. If it's a label, however, just put it on directly as there's
2003 // not enough information now to do anything.
2004 assert(N == 1 && "Invalid number of operands!");
2005 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2006 if (!MCE) {
2007 addExpr(Inst, getImm());
2008 return;
2009 }
2010 assert(MCE && "Invalid constant immediate operand!");
2011 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2012 }
2013
2014 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2015 // Branch operands don't encode the low bits, so shift them off
2016 // here. If it's a label, however, just put it on directly as there's
2017 // not enough information now to do anything.
2018 assert(N == 1 && "Invalid number of operands!");
2019 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2020 if (!MCE) {
2021 addExpr(Inst, getImm());
2022 return;
2023 }
2024 assert(MCE && "Invalid constant immediate operand!");
2025 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2026 }
2027
2028 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2029 assert(N == 1 && "Invalid number of operands!");
2031 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2032 }
2033
2034 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2035 assert(N == 1 && "Invalid number of operands!");
2036 Inst.addOperand(MCOperand::createImm(getBarrier()));
2037 }
2038
2039 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2040 assert(N == 1 && "Invalid number of operands!");
2041 Inst.addOperand(MCOperand::createImm(getBarrier()));
2042 }
2043
2044 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2045 assert(N == 1 && "Invalid number of operands!");
2046
2047 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2048 }
2049
2050 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2051 assert(N == 1 && "Invalid number of operands!");
2052
2053 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2054 }
2055
2056 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2057 assert(N == 1 && "Invalid number of operands!");
2058
2059 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2060 }
2061
2062 void addSVCROperands(MCInst &Inst, unsigned N) const {
2063 assert(N == 1 && "Invalid number of operands!");
2064
2065 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2066 }
2067
2068 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2069 assert(N == 1 && "Invalid number of operands!");
2070
2071 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2072 }
2073
2074 void addSysCROperands(MCInst &Inst, unsigned N) const {
2075 assert(N == 1 && "Invalid number of operands!");
2076 Inst.addOperand(MCOperand::createImm(getSysCR()));
2077 }
2078
2079 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2080 assert(N == 1 && "Invalid number of operands!");
2081 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2082 }
2083
2084 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2085 assert(N == 1 && "Invalid number of operands!");
2086 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2087 }
2088
2089 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2090 assert(N == 1 && "Invalid number of operands!");
2091 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2092 }
2093
2094 void addShifterOperands(MCInst &Inst, unsigned N) const {
2095 assert(N == 1 && "Invalid number of operands!");
2096 unsigned Imm =
2097 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2099 }
2100
2101 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2102 assert(N == 1 && "Invalid number of operands!");
2103 unsigned Imm = getShiftExtendAmount();
2105 }
2106
2107 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2108 assert(N == 1 && "Invalid number of operands!");
2109
2110 if (!isScalarReg())
2111 return;
2112
2113 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2114 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2116 if (Reg != AArch64::XZR)
2117 llvm_unreachable("wrong register");
2118
2119 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2120 }
2121
2122 void addExtendOperands(MCInst &Inst, unsigned N) const {
2123 assert(N == 1 && "Invalid number of operands!");
2124 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2125 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2126 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2128 }
2129
2130 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2131 assert(N == 1 && "Invalid number of operands!");
2132 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2133 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2134 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2136 }
2137
2138 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2139 assert(N == 2 && "Invalid number of operands!");
2140 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2141 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2142 Inst.addOperand(MCOperand::createImm(IsSigned));
2143 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2144 }
2145
2146 // For 8-bit load/store instructions with a register offset, both the
2147 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2148 // they're disambiguated by whether the shift was explicit or implicit rather
2149 // than its size.
2150 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2151 assert(N == 2 && "Invalid number of operands!");
2152 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2153 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2154 Inst.addOperand(MCOperand::createImm(IsSigned));
2155 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2156 }
2157
2158 template<int Shift>
2159 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2160 assert(N == 1 && "Invalid number of operands!");
2161
2162 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2163 if (CE) {
2164 uint64_t Value = CE->getValue();
2165 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2166 } else {
2167 addExpr(Inst, getImm());
2168 }
2169 }
2170
2171 template<int Shift>
2172 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2173 assert(N == 1 && "Invalid number of operands!");
2174
2175 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2176 uint64_t Value = CE->getValue();
2177 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2178 }
2179
2180 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2181 assert(N == 1 && "Invalid number of operands!");
2182 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2183 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2184 }
2185
2186 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2187 assert(N == 1 && "Invalid number of operands!");
2188 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2189 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2190 }
2191
2192 void print(raw_ostream &OS) const override;
2193
2194 static std::unique_ptr<AArch64Operand>
2195 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2196 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2197 Op->Tok.Data = Str.data();
2198 Op->Tok.Length = Str.size();
2199 Op->Tok.IsSuffix = IsSuffix;
2200 Op->StartLoc = S;
2201 Op->EndLoc = S;
2202 return Op;
2203 }
2204
2205 static std::unique_ptr<AArch64Operand>
2206 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2207 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2209 unsigned ShiftAmount = 0,
2210 unsigned HasExplicitAmount = false) {
2211 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2212 Op->Reg.RegNum = RegNum;
2213 Op->Reg.Kind = Kind;
2214 Op->Reg.ElementWidth = 0;
2215 Op->Reg.EqualityTy = EqTy;
2216 Op->Reg.ShiftExtend.Type = ExtTy;
2217 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2218 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2219 Op->StartLoc = S;
2220 Op->EndLoc = E;
2221 return Op;
2222 }
2223
2224 static std::unique_ptr<AArch64Operand>
2225 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2226 SMLoc S, SMLoc E, MCContext &Ctx,
2228 unsigned ShiftAmount = 0,
2229 unsigned HasExplicitAmount = false) {
2230 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2231 Kind == RegKind::SVEPredicateVector ||
2232 Kind == RegKind::SVEPredicateAsCounter) &&
2233 "Invalid vector kind");
2234 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2235 HasExplicitAmount);
2236 Op->Reg.ElementWidth = ElementWidth;
2237 return Op;
2238 }
2239
2240 static std::unique_ptr<AArch64Operand>
2241 CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2242 unsigned NumElements, unsigned ElementWidth,
2243 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2244 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2245 Op->VectorList.RegNum = RegNum;
2246 Op->VectorList.Count = Count;
2247 Op->VectorList.Stride = Stride;
2248 Op->VectorList.NumElements = NumElements;
2249 Op->VectorList.ElementWidth = ElementWidth;
2250 Op->VectorList.RegisterKind = RegisterKind;
2251 Op->StartLoc = S;
2252 Op->EndLoc = E;
2253 return Op;
2254 }
2255
2256 static std::unique_ptr<AArch64Operand>
2257 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2258 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2259 Op->VectorIndex.Val = Idx;
2260 Op->StartLoc = S;
2261 Op->EndLoc = E;
2262 return Op;
2263 }
2264
2265 static std::unique_ptr<AArch64Operand>
2266 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2267 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2268 Op->MatrixTileList.RegMask = RegMask;
2269 Op->StartLoc = S;
2270 Op->EndLoc = E;
2271 return Op;
2272 }
2273
2274 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2275 const unsigned ElementWidth) {
2276 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2277 RegMap = {
2278 {{0, AArch64::ZAB0},
2279 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2280 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2281 {{8, AArch64::ZAB0},
2282 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2283 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2284 {{16, AArch64::ZAH0},
2285 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2286 {{16, AArch64::ZAH1},
2287 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2288 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2289 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2290 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2291 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2292 };
2293
2294 if (ElementWidth == 64)
2295 OutRegs.insert(Reg);
2296 else {
2297 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2298 assert(!Regs.empty() && "Invalid tile or element width!");
2299 for (auto OutReg : Regs)
2300 OutRegs.insert(OutReg);
2301 }
2302 }
2303
2304 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2305 SMLoc E, MCContext &Ctx) {
2306 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2307 Op->Imm.Val = Val;
2308 Op->StartLoc = S;
2309 Op->EndLoc = E;
2310 return Op;
2311 }
2312
2313 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2314 unsigned ShiftAmount,
2315 SMLoc S, SMLoc E,
2316 MCContext &Ctx) {
2317 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2318 Op->ShiftedImm .Val = Val;
2319 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2320 Op->StartLoc = S;
2321 Op->EndLoc = E;
2322 return Op;
2323 }
2324
2325 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2326 unsigned Last, SMLoc S,
2327 SMLoc E,
2328 MCContext &Ctx) {
2329 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2330 Op->ImmRange.First = First;
2331 Op->ImmRange.Last = Last;
2332 Op->EndLoc = E;
2333 return Op;
2334 }
2335
2336 static std::unique_ptr<AArch64Operand>
2337 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2338 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2339 Op->CondCode.Code = Code;
2340 Op->StartLoc = S;
2341 Op->EndLoc = E;
2342 return Op;
2343 }
2344
2345 static std::unique_ptr<AArch64Operand>
2346 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2347 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2348 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2349 Op->FPImm.IsExact = IsExact;
2350 Op->StartLoc = S;
2351 Op->EndLoc = S;
2352 return Op;
2353 }
2354
2355 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2356 StringRef Str,
2357 SMLoc S,
2358 MCContext &Ctx,
2359 bool HasnXSModifier) {
2360 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2361 Op->Barrier.Val = Val;
2362 Op->Barrier.Data = Str.data();
2363 Op->Barrier.Length = Str.size();
2364 Op->Barrier.HasnXSModifier = HasnXSModifier;
2365 Op->StartLoc = S;
2366 Op->EndLoc = S;
2367 return Op;
2368 }
2369
2370 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2371 uint32_t MRSReg,
2372 uint32_t MSRReg,
2373 uint32_t PStateField,
2374 MCContext &Ctx) {
2375 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2376 Op->SysReg.Data = Str.data();
2377 Op->SysReg.Length = Str.size();
2378 Op->SysReg.MRSReg = MRSReg;
2379 Op->SysReg.MSRReg = MSRReg;
2380 Op->SysReg.PStateField = PStateField;
2381 Op->StartLoc = S;
2382 Op->EndLoc = S;
2383 return Op;
2384 }
2385
2386 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2387 SMLoc E, MCContext &Ctx) {
2388 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2389 Op->SysCRImm.Val = Val;
2390 Op->StartLoc = S;
2391 Op->EndLoc = E;
2392 return Op;
2393 }
2394
2395 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2396 StringRef Str,
2397 SMLoc S,
2398 MCContext &Ctx) {
2399 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2400 Op->Prefetch.Val = Val;
2401 Op->Barrier.Data = Str.data();
2402 Op->Barrier.Length = Str.size();
2403 Op->StartLoc = S;
2404 Op->EndLoc = S;
2405 return Op;
2406 }
2407
2408 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2409 StringRef Str,
2410 SMLoc S,
2411 MCContext &Ctx) {
2412 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2413 Op->PSBHint.Val = Val;
2414 Op->PSBHint.Data = Str.data();
2415 Op->PSBHint.Length = Str.size();
2416 Op->StartLoc = S;
2417 Op->EndLoc = S;
2418 return Op;
2419 }
2420
2421 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2422 StringRef Str,
2423 SMLoc S,
2424 MCContext &Ctx) {
2425 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2426 Op->BTIHint.Val = Val | 32;
2427 Op->BTIHint.Data = Str.data();
2428 Op->BTIHint.Length = Str.size();
2429 Op->StartLoc = S;
2430 Op->EndLoc = S;
2431 return Op;
2432 }
2433
2434 static std::unique_ptr<AArch64Operand>
2435 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2436 SMLoc S, SMLoc E, MCContext &Ctx) {
2437 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2438 Op->MatrixReg.RegNum = RegNum;
2439 Op->MatrixReg.ElementWidth = ElementWidth;
2440 Op->MatrixReg.Kind = Kind;
2441 Op->StartLoc = S;
2442 Op->EndLoc = E;
2443 return Op;
2444 }
2445
2446 static std::unique_ptr<AArch64Operand>
2447 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2448 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2449 Op->SVCR.PStateField = PStateField;
2450 Op->SVCR.Data = Str.data();
2451 Op->SVCR.Length = Str.size();
2452 Op->StartLoc = S;
2453 Op->EndLoc = S;
2454 return Op;
2455 }
2456
2457 static std::unique_ptr<AArch64Operand>
2458 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2459 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2460 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2461 Op->ShiftExtend.Type = ShOp;
2462 Op->ShiftExtend.Amount = Val;
2463 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2464 Op->StartLoc = S;
2465 Op->EndLoc = E;
2466 return Op;
2467 }
2468};
2469
2470} // end anonymous namespace.
2471
2472void AArch64Operand::print(raw_ostream &OS) const {
2473 switch (Kind) {
2474 case k_FPImm:
2475 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2476 if (!getFPImmIsExact())
2477 OS << " (inexact)";
2478 OS << ">";
2479 break;
2480 case k_Barrier: {
2481 StringRef Name = getBarrierName();
2482 if (!Name.empty())
2483 OS << "<barrier " << Name << ">";
2484 else
2485 OS << "<barrier invalid #" << getBarrier() << ">";
2486 break;
2487 }
2488 case k_Immediate:
2489 OS << *getImm();
2490 break;
2491 case k_ShiftedImm: {
2492 unsigned Shift = getShiftedImmShift();
2493 OS << "<shiftedimm ";
2494 OS << *getShiftedImmVal();
2495 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2496 break;
2497 }
2498 case k_ImmRange: {
2499 OS << "<immrange ";
2500 OS << getFirstImmVal();
2501 OS << ":" << getLastImmVal() << ">";
2502 break;
2503 }
2504 case k_CondCode:
2505 OS << "<condcode " << getCondCode() << ">";
2506 break;
2507 case k_VectorList: {
2508 OS << "<vectorlist ";
2509 unsigned Reg = getVectorListStart();
2510 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2511 OS << Reg + i * getVectorListStride() << " ";
2512 OS << ">";
2513 break;
2514 }
2515 case k_VectorIndex:
2516 OS << "<vectorindex " << getVectorIndex() << ">";
2517 break;
2518 case k_SysReg:
2519 OS << "<sysreg: " << getSysReg() << '>';
2520 break;
2521 case k_Token:
2522 OS << "'" << getToken() << "'";
2523 break;
2524 case k_SysCR:
2525 OS << "c" << getSysCR();
2526 break;
2527 case k_Prefetch: {
2528 StringRef Name = getPrefetchName();
2529 if (!Name.empty())
2530 OS << "<prfop " << Name << ">";
2531 else
2532 OS << "<prfop invalid #" << getPrefetch() << ">";
2533 break;
2534 }
2535 case k_PSBHint:
2536 OS << getPSBHintName();
2537 break;
2538 case k_BTIHint:
2539 OS << getBTIHintName();
2540 break;
2541 case k_MatrixRegister:
2542 OS << "<matrix " << getMatrixReg() << ">";
2543 break;
2544 case k_MatrixTileList: {
2545 OS << "<matrixlist ";
2546 unsigned RegMask = getMatrixTileListRegMask();
2547 unsigned MaxBits = 8;
2548 for (unsigned I = MaxBits; I > 0; --I)
2549 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2550 OS << '>';
2551 break;
2552 }
2553 case k_SVCR: {
2554 OS << getSVCR();
2555 break;
2556 }
2557 case k_Register:
2558 OS << "<register " << getReg() << ">";
2559 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2560 break;
2561 [[fallthrough]];
2562 case k_ShiftExtend:
2563 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2564 << getShiftExtendAmount();
2565 if (!hasShiftExtendAmount())
2566 OS << "<imp>";
2567 OS << '>';
2568 break;
2569 }
2570}
2571
2572/// @name Auto-generated Match Functions
2573/// {
2574
2576
2577/// }
2578
2580 return StringSwitch<unsigned>(Name.lower())
2581 .Case("v0", AArch64::Q0)
2582 .Case("v1", AArch64::Q1)
2583 .Case("v2", AArch64::Q2)
2584 .Case("v3", AArch64::Q3)
2585 .Case("v4", AArch64::Q4)
2586 .Case("v5", AArch64::Q5)
2587 .Case("v6", AArch64::Q6)
2588 .Case("v7", AArch64::Q7)
2589 .Case("v8", AArch64::Q8)
2590 .Case("v9", AArch64::Q9)
2591 .Case("v10", AArch64::Q10)
2592 .Case("v11", AArch64::Q11)
2593 .Case("v12", AArch64::Q12)
2594 .Case("v13", AArch64::Q13)
2595 .Case("v14", AArch64::Q14)
2596 .Case("v15", AArch64::Q15)
2597 .Case("v16", AArch64::Q16)
2598 .Case("v17", AArch64::Q17)
2599 .Case("v18", AArch64::Q18)
2600 .Case("v19", AArch64::Q19)
2601 .Case("v20", AArch64::Q20)
2602 .Case("v21", AArch64::Q21)
2603 .Case("v22", AArch64::Q22)
2604 .Case("v23", AArch64::Q23)
2605 .Case("v24", AArch64::Q24)
2606 .Case("v25", AArch64::Q25)
2607 .Case("v26", AArch64::Q26)
2608 .Case("v27", AArch64::Q27)
2609 .Case("v28", AArch64::Q28)
2610 .Case("v29", AArch64::Q29)
2611 .Case("v30", AArch64::Q30)
2612 .Case("v31", AArch64::Q31)
2613 .Default(0);
2614}
2615
2616/// Returns an optional pair of (#elements, element-width) if Suffix
2617/// is a valid vector kind. Where the number of elements in a vector
2618/// or the vector width is implicit or explicitly unknown (but still a
2619/// valid suffix kind), 0 is used.
2620static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2621 RegKind VectorKind) {
2622 std::pair<int, int> Res = {-1, -1};
2623
2624 switch (VectorKind) {
2625 case RegKind::NeonVector:
2627 .Case("", {0, 0})
2628 .Case(".1d", {1, 64})
2629 .Case(".1q", {1, 128})
2630 // '.2h' needed for fp16 scalar pairwise reductions
2631 .Case(".2h", {2, 16})
2632 .Case(".2b", {2, 8})
2633 .Case(".2s", {2, 32})
2634 .Case(".2d", {2, 64})
2635 // '.4b' is another special case for the ARMv8.2a dot product
2636 // operand
2637 .Case(".4b", {4, 8})
2638 .Case(".4h", {4, 16})
2639 .Case(".4s", {4, 32})
2640 .Case(".8b", {8, 8})
2641 .Case(".8h", {8, 16})
2642 .Case(".16b", {16, 8})
2643 // Accept the width neutral ones, too, for verbose syntax. If
2644 // those aren't used in the right places, the token operand won't
2645 // match so all will work out.
2646 .Case(".b", {0, 8})
2647 .Case(".h", {0, 16})
2648 .Case(".s", {0, 32})
2649 .Case(".d", {0, 64})
2650 .Default({-1, -1});
2651 break;
2652 case RegKind::SVEPredicateAsCounter:
2653 case RegKind::SVEPredicateVector:
2654 case RegKind::SVEDataVector:
2655 case RegKind::Matrix:
2657 .Case("", {0, 0})
2658 .Case(".b", {0, 8})
2659 .Case(".h", {0, 16})
2660 .Case(".s", {0, 32})
2661 .Case(".d", {0, 64})
2662 .Case(".q", {0, 128})
2663 .Default({-1, -1});
2664 break;
2665 default:
2666 llvm_unreachable("Unsupported RegKind");
2667 }
2668
2669 if (Res == std::make_pair(-1, -1))
2670 return std::nullopt;
2671
2672 return std::optional<std::pair<int, int>>(Res);
2673}
2674
2675static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2676 return parseVectorKind(Suffix, VectorKind).has_value();
2677}
2678
2680 return StringSwitch<unsigned>(Name.lower())
2681 .Case("z0", AArch64::Z0)
2682 .Case("z1", AArch64::Z1)
2683 .Case("z2", AArch64::Z2)
2684 .Case("z3", AArch64::Z3)
2685 .Case("z4", AArch64::Z4)
2686 .Case("z5", AArch64::Z5)
2687 .Case("z6", AArch64::Z6)
2688 .Case("z7", AArch64::Z7)
2689 .Case("z8", AArch64::Z8)
2690 .Case("z9", AArch64::Z9)
2691 .Case("z10", AArch64::Z10)
2692 .Case("z11", AArch64::Z11)
2693 .Case("z12", AArch64::Z12)
2694 .Case("z13", AArch64::Z13)
2695 .Case("z14", AArch64::Z14)
2696 .Case("z15", AArch64::Z15)
2697 .Case("z16", AArch64::Z16)
2698 .Case("z17", AArch64::Z17)
2699 .Case("z18", AArch64::Z18)
2700 .Case("z19", AArch64::Z19)
2701 .Case("z20", AArch64::Z20)
2702 .Case("z21", AArch64::Z21)
2703 .Case("z22", AArch64::Z22)
2704 .Case("z23", AArch64::Z23)
2705 .Case("z24", AArch64::Z24)
2706 .Case("z25", AArch64::Z25)
2707 .Case("z26", AArch64::Z26)
2708 .Case("z27", AArch64::Z27)
2709 .Case("z28", AArch64::Z28)
2710 .Case("z29", AArch64::Z29)
2711 .Case("z30", AArch64::Z30)
2712 .Case("z31", AArch64::Z31)
2713 .Default(0);
2714}
2715
2717 return StringSwitch<unsigned>(Name.lower())
2718 .Case("p0", AArch64::P0)
2719 .Case("p1", AArch64::P1)
2720 .Case("p2", AArch64::P2)
2721 .Case("p3", AArch64::P3)
2722 .Case("p4", AArch64::P4)
2723 .Case("p5", AArch64::P5)
2724 .Case("p6", AArch64::P6)
2725 .Case("p7", AArch64::P7)
2726 .Case("p8", AArch64::P8)
2727 .Case("p9", AArch64::P9)
2728 .Case("p10", AArch64::P10)
2729 .Case("p11", AArch64::P11)
2730 .Case("p12", AArch64::P12)
2731 .Case("p13", AArch64::P13)
2732 .Case("p14", AArch64::P14)
2733 .Case("p15", AArch64::P15)
2734 .Default(0);
2735}
2736
2738 return StringSwitch<unsigned>(Name.lower())
2739 .Case("pn0", AArch64::PN0)
2740 .Case("pn1", AArch64::PN1)
2741 .Case("pn2", AArch64::PN2)
2742 .Case("pn3", AArch64::PN3)
2743 .Case("pn4", AArch64::PN4)
2744 .Case("pn5", AArch64::PN5)
2745 .Case("pn6", AArch64::PN6)
2746 .Case("pn7", AArch64::PN7)
2747 .Case("pn8", AArch64::PN8)
2748 .Case("pn9", AArch64::PN9)
2749 .Case("pn10", AArch64::PN10)
2750 .Case("pn11", AArch64::PN11)
2751 .Case("pn12", AArch64::PN12)
2752 .Case("pn13", AArch64::PN13)
2753 .Case("pn14", AArch64::PN14)
2754 .Case("pn15", AArch64::PN15)
2755 .Default(0);
2756}
2757
2759 return StringSwitch<unsigned>(Name.lower())
2760 .Case("za0.d", AArch64::ZAD0)
2761 .Case("za1.d", AArch64::ZAD1)
2762 .Case("za2.d", AArch64::ZAD2)
2763 .Case("za3.d", AArch64::ZAD3)
2764 .Case("za4.d", AArch64::ZAD4)
2765 .Case("za5.d", AArch64::ZAD5)
2766 .Case("za6.d", AArch64::ZAD6)
2767 .Case("za7.d", AArch64::ZAD7)
2768 .Case("za0.s", AArch64::ZAS0)
2769 .Case("za1.s", AArch64::ZAS1)
2770 .Case("za2.s", AArch64::ZAS2)
2771 .Case("za3.s", AArch64::ZAS3)
2772 .Case("za0.h", AArch64::ZAH0)
2773 .Case("za1.h", AArch64::ZAH1)
2774 .Case("za0.b", AArch64::ZAB0)
2775 .Default(0);
2776}
2777
2779 return StringSwitch<unsigned>(Name.lower())
2780 .Case("za", AArch64::ZA)
2781 .Case("za0.q", AArch64::ZAQ0)
2782 .Case("za1.q", AArch64::ZAQ1)
2783 .Case("za2.q", AArch64::ZAQ2)
2784 .Case("za3.q", AArch64::ZAQ3)
2785 .Case("za4.q", AArch64::ZAQ4)
2786 .Case("za5.q", AArch64::ZAQ5)
2787 .Case("za6.q", AArch64::ZAQ6)
2788 .Case("za7.q", AArch64::ZAQ7)
2789 .Case("za8.q", AArch64::ZAQ8)
2790 .Case("za9.q", AArch64::ZAQ9)
2791 .Case("za10.q", AArch64::ZAQ10)
2792 .Case("za11.q", AArch64::ZAQ11)
2793 .Case("za12.q", AArch64::ZAQ12)
2794 .Case("za13.q", AArch64::ZAQ13)
2795 .Case("za14.q", AArch64::ZAQ14)
2796 .Case("za15.q", AArch64::ZAQ15)
2797 .Case("za0.d", AArch64::ZAD0)
2798 .Case("za1.d", AArch64::ZAD1)
2799 .Case("za2.d", AArch64::ZAD2)
2800 .Case("za3.d", AArch64::ZAD3)
2801 .Case("za4.d", AArch64::ZAD4)
2802 .Case("za5.d", AArch64::ZAD5)
2803 .Case("za6.d", AArch64::ZAD6)
2804 .Case("za7.d", AArch64::ZAD7)
2805 .Case("za0.s", AArch64::ZAS0)
2806 .Case("za1.s", AArch64::ZAS1)
2807 .Case("za2.s", AArch64::ZAS2)
2808 .Case("za3.s", AArch64::ZAS3)
2809 .Case("za0.h", AArch64::ZAH0)
2810 .Case("za1.h", AArch64::ZAH1)
2811 .Case("za0.b", AArch64::ZAB0)
2812 .Case("za0h.q", AArch64::ZAQ0)
2813 .Case("za1h.q", AArch64::ZAQ1)
2814 .Case("za2h.q", AArch64::ZAQ2)
2815 .Case("za3h.q", AArch64::ZAQ3)
2816 .Case("za4h.q", AArch64::ZAQ4)
2817 .Case("za5h.q", AArch64::ZAQ5)
2818 .Case("za6h.q", AArch64::ZAQ6)
2819 .Case("za7h.q", AArch64::ZAQ7)
2820 .Case("za8h.q", AArch64::ZAQ8)
2821 .Case("za9h.q", AArch64::ZAQ9)
2822 .Case("za10h.q", AArch64::ZAQ10)
2823 .Case("za11h.q", AArch64::ZAQ11)
2824 .Case("za12h.q", AArch64::ZAQ12)
2825 .Case("za13h.q", AArch64::ZAQ13)
2826 .Case("za14h.q", AArch64::ZAQ14)
2827 .Case("za15h.q", AArch64::ZAQ15)
2828 .Case("za0h.d", AArch64::ZAD0)
2829 .Case("za1h.d", AArch64::ZAD1)
2830 .Case("za2h.d", AArch64::ZAD2)
2831 .Case("za3h.d", AArch64::ZAD3)
2832 .Case("za4h.d", AArch64::ZAD4)
2833 .Case("za5h.d", AArch64::ZAD5)
2834 .Case("za6h.d", AArch64::ZAD6)
2835 .Case("za7h.d", AArch64::ZAD7)
2836 .Case("za0h.s", AArch64::ZAS0)
2837 .Case("za1h.s", AArch64::ZAS1)
2838 .Case("za2h.s", AArch64::ZAS2)
2839 .Case("za3h.s", AArch64::ZAS3)
2840 .Case("za0h.h", AArch64::ZAH0)
2841 .Case("za1h.h", AArch64::ZAH1)
2842 .Case("za0h.b", AArch64::ZAB0)
2843 .Case("za0v.q", AArch64::ZAQ0)
2844 .Case("za1v.q", AArch64::ZAQ1)
2845 .Case("za2v.q", AArch64::ZAQ2)
2846 .Case("za3v.q", AArch64::ZAQ3)
2847 .Case("za4v.q", AArch64::ZAQ4)
2848 .Case("za5v.q", AArch64::ZAQ5)
2849 .Case("za6v.q", AArch64::ZAQ6)
2850 .Case("za7v.q", AArch64::ZAQ7)
2851 .Case("za8v.q", AArch64::ZAQ8)
2852 .Case("za9v.q", AArch64::ZAQ9)
2853 .Case("za10v.q", AArch64::ZAQ10)
2854 .Case("za11v.q", AArch64::ZAQ11)
2855 .Case("za12v.q", AArch64::ZAQ12)
2856 .Case("za13v.q", AArch64::ZAQ13)
2857 .Case("za14v.q", AArch64::ZAQ14)
2858 .Case("za15v.q", AArch64::ZAQ15)
2859 .Case("za0v.d", AArch64::ZAD0)
2860 .Case("za1v.d", AArch64::ZAD1)
2861 .Case("za2v.d", AArch64::ZAD2)
2862 .Case("za3v.d", AArch64::ZAD3)
2863 .Case("za4v.d", AArch64::ZAD4)
2864 .Case("za5v.d", AArch64::ZAD5)
2865 .Case("za6v.d", AArch64::ZAD6)
2866 .Case("za7v.d", AArch64::ZAD7)
2867 .Case("za0v.s", AArch64::ZAS0)
2868 .Case("za1v.s", AArch64::ZAS1)
2869 .Case("za2v.s", AArch64::ZAS2)
2870 .Case("za3v.s", AArch64::ZAS3)
2871 .Case("za0v.h", AArch64::ZAH0)
2872 .Case("za1v.h", AArch64::ZAH1)
2873 .Case("za0v.b", AArch64::ZAB0)
2874 .Default(0);
2875}
2876
2877bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
2878 SMLoc &EndLoc) {
2879 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
2880}
2881
2882ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
2883 SMLoc &EndLoc) {
2884 StartLoc = getLoc();
2885 ParseStatus Res = tryParseScalarRegister(Reg);
2886 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2887 return Res;
2888}
2889
2890// Matches a register name or register alias previously defined by '.req'
2891unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2892 RegKind Kind) {
2893 unsigned RegNum = 0;
2894 if ((RegNum = matchSVEDataVectorRegName(Name)))
2895 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2896
2897 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2898 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2899
2901 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
2902
2903 if ((RegNum = MatchNeonVectorRegName(Name)))
2904 return Kind == RegKind::NeonVector ? RegNum : 0;
2905
2906 if ((RegNum = matchMatrixRegName(Name)))
2907 return Kind == RegKind::Matrix ? RegNum : 0;
2908
2909 if (Name.equals_insensitive("zt0"))
2910 return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0;
2911
2912 // The parsed register must be of RegKind Scalar
2913 if ((RegNum = MatchRegisterName(Name)))
2914 return (Kind == RegKind::Scalar) ? RegNum : 0;
2915
2916 if (!RegNum) {
2917 // Handle a few common aliases of registers.
2918 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2919 .Case("fp", AArch64::FP)
2920 .Case("lr", AArch64::LR)
2921 .Case("x31", AArch64::XZR)
2922 .Case("w31", AArch64::WZR)
2923 .Default(0))
2924 return Kind == RegKind::Scalar ? RegNum : 0;
2925
2926 // Check for aliases registered via .req. Canonicalize to lower case.
2927 // That's more consistent since register names are case insensitive, and
2928 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2929 auto Entry = RegisterReqs.find(Name.lower());
2930 if (Entry == RegisterReqs.end())
2931 return 0;
2932
2933 // set RegNum if the match is the right kind of register
2934 if (Kind == Entry->getValue().first)
2935 RegNum = Entry->getValue().second;
2936 }
2937 return RegNum;
2938}
2939
2940unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
2941 switch (K) {
2942 case RegKind::Scalar:
2943 case RegKind::NeonVector:
2944 case RegKind::SVEDataVector:
2945 return 32;
2946 case RegKind::Matrix:
2947 case RegKind::SVEPredicateVector:
2948 case RegKind::SVEPredicateAsCounter:
2949 return 16;
2950 case RegKind::LookupTable:
2951 return 1;
2952 }
2953 llvm_unreachable("Unsupported RegKind");
2954}
2955
2956/// tryParseScalarRegister - Try to parse a register name. The token must be an
2957/// Identifier when called, and if it is a register name the token is eaten and
2958/// the register is added to the operand list.
2959ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
2960 const AsmToken &Tok = getTok();
2961 if (Tok.isNot(AsmToken::Identifier))
2962 return ParseStatus::NoMatch;
2963
2964 std::string lowerCase = Tok.getString().lower();
2965 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2966 if (Reg == 0)
2967 return ParseStatus::NoMatch;
2968
2969 RegNum = Reg;
2970 Lex(); // Eat identifier token.
2971 return ParseStatus::Success;
2972}
2973
2974/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2975ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2976 SMLoc S = getLoc();
2977
2978 if (getTok().isNot(AsmToken::Identifier))
2979 return Error(S, "Expected cN operand where 0 <= N <= 15");
2980
2981 StringRef Tok = getTok().getIdentifier();
2982 if (Tok[0] != 'c' && Tok[0] != 'C')
2983 return Error(S, "Expected cN operand where 0 <= N <= 15");
2984
2985 uint32_t CRNum;
2986 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2987 if (BadNum || CRNum > 15)
2988 return Error(S, "Expected cN operand where 0 <= N <= 15");
2989
2990 Lex(); // Eat identifier token.
2991 Operands.push_back(
2992 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2993 return ParseStatus::Success;
2994}
2995
2996// Either an identifier for named values or a 6-bit immediate.
2997ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
2998 SMLoc S = getLoc();
2999 const AsmToken &Tok = getTok();
3000
3001 unsigned MaxVal = 63;
3002
3003 // Immediate case, with optional leading hash:
3004 if (parseOptionalToken(AsmToken::Hash) ||
3005 Tok.is(AsmToken::Integer)) {
3006 const MCExpr *ImmVal;
3007 if (getParser().parseExpression(ImmVal))
3008 return ParseStatus::Failure;
3009
3010 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3011 if (!MCE)
3012 return TokError("immediate value expected for prefetch operand");
3013 unsigned prfop = MCE->getValue();
3014 if (prfop > MaxVal)
3015 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3016 "] expected");
3017
3018 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3019 Operands.push_back(AArch64Operand::CreatePrefetch(
3020 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3021 return ParseStatus::Success;
3022 }
3023
3024 if (Tok.isNot(AsmToken::Identifier))
3025 return TokError("prefetch hint expected");
3026
3027 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3028 if (!RPRFM)
3029 return TokError("prefetch hint expected");
3030
3031 Operands.push_back(AArch64Operand::CreatePrefetch(
3032 RPRFM->Encoding, Tok.getString(), S, getContext()));
3033 Lex(); // Eat identifier token.
3034 return ParseStatus::Success;
3035}
3036
3037/// tryParsePrefetch - Try to parse a prefetch operand.
3038template <bool IsSVEPrefetch>
3039ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3040 SMLoc S = getLoc();
3041 const AsmToken &Tok = getTok();
3042
3043 auto LookupByName = [](StringRef N) {
3044 if (IsSVEPrefetch) {
3045 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3046 return std::optional<unsigned>(Res->Encoding);
3047 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3048 return std::optional<unsigned>(Res->Encoding);
3049 return std::optional<unsigned>();
3050 };
3051
3052 auto LookupByEncoding = [](unsigned E) {
3053 if (IsSVEPrefetch) {
3054 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3055 return std::optional<StringRef>(Res->Name);
3056 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3057 return std::optional<StringRef>(Res->Name);
3058 return std::optional<StringRef>();
3059 };
3060 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3061
3062 // Either an identifier for named values or a 5-bit immediate.
3063 // Eat optional hash.
3064 if (parseOptionalToken(AsmToken::Hash) ||
3065 Tok.is(AsmToken::Integer)) {
3066 const MCExpr *ImmVal;
3067 if (getParser().parseExpression(ImmVal))
3068 return ParseStatus::Failure;
3069
3070 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3071 if (!MCE)
3072 return TokError("immediate value expected for prefetch operand");
3073 unsigned prfop = MCE->getValue();
3074 if (prfop > MaxVal)
3075 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3076 "] expected");
3077
3078 auto PRFM = LookupByEncoding(MCE->getValue());
3079 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3080 S, getContext()));
3081 return ParseStatus::Success;
3082 }
3083
3084 if (Tok.isNot(AsmToken::Identifier))
3085 return TokError("prefetch hint expected");
3086
3087 auto PRFM = LookupByName(Tok.getString());
3088 if (!PRFM)
3089 return TokError("prefetch hint expected");
3090
3091 Operands.push_back(AArch64Operand::CreatePrefetch(
3092 *PRFM, Tok.getString(), S, getContext()));
3093 Lex(); // Eat identifier token.
3094 return ParseStatus::Success;
3095}
3096
3097/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3098ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3099 SMLoc S = getLoc();
3100 const AsmToken &Tok = getTok();
3101 if (Tok.isNot(AsmToken::Identifier))
3102 return TokError("invalid operand for instruction");
3103
3104 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3105 if (!PSB)
3106 return TokError("invalid operand for instruction");
3107
3108 Operands.push_back(AArch64Operand::CreatePSBHint(
3109 PSB->Encoding, Tok.getString(), S, getContext()));
3110 Lex(); // Eat identifier token.
3111 return ParseStatus::Success;
3112}
3113
3114ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3115 SMLoc StartLoc = getLoc();
3116
3117 MCRegister RegNum;
3118
3119 // The case where xzr, xzr is not present is handled by an InstAlias.
3120
3121 auto RegTok = getTok(); // in case we need to backtrack
3122 if (!tryParseScalarRegister(RegNum).isSuccess())
3123 return ParseStatus::NoMatch;
3124
3125 if (RegNum != AArch64::XZR) {
3126 getLexer().UnLex(RegTok);
3127 return ParseStatus::NoMatch;
3128 }
3129
3130 if (parseComma())
3131 return ParseStatus::Failure;
3132
3133 if (!tryParseScalarRegister(RegNum).isSuccess())
3134 return TokError("expected register operand");
3135
3136 if (RegNum != AArch64::XZR)
3137 return TokError("xzr must be followed by xzr");
3138
3139 // We need to push something, since we claim this is an operand in .td.
3140 // See also AArch64AsmParser::parseKeywordOperand.
3141 Operands.push_back(AArch64Operand::CreateReg(
3142 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3143
3144 return ParseStatus::Success;
3145}
3146
3147/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3148ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3149 SMLoc S = getLoc();
3150 const AsmToken &Tok = getTok();
3151 if (Tok.isNot(AsmToken::Identifier))
3152 return TokError("invalid operand for instruction");
3153
3154 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3155 if (!BTI)
3156 return TokError("invalid operand for instruction");
3157
3158 Operands.push_back(AArch64Operand::CreateBTIHint(
3159 BTI->Encoding, Tok.getString(), S, getContext()));
3160 Lex(); // Eat identifier token.
3161 return ParseStatus::Success;
3162}
3163
3164/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3165/// instruction.
3166ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3167 SMLoc S = getLoc();
3168 const MCExpr *Expr = nullptr;
3169
3170 if (getTok().is(AsmToken::Hash)) {
3171 Lex(); // Eat hash token.
3172 }
3173
3174 if (parseSymbolicImmVal(Expr))
3175 return ParseStatus::Failure;
3176
3177 AArch64MCExpr::VariantKind ELFRefKind;
3178 MCSymbolRefExpr::VariantKind DarwinRefKind;
3179 int64_t Addend;
3180 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3181 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3182 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3183 // No modifier was specified at all; this is the syntax for an ELF basic
3184 // ADRP relocation (unfortunately).
3185 Expr =
3187 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3188 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3189 Addend != 0) {
3190 return Error(S, "gotpage label reference not allowed an addend");
3191 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3192 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3193 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3194 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3195 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3196 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3197 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3198 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
3199 // The operand must be an @page or @gotpage qualified symbolref.
3200 return Error(S, "page or gotpage label reference expected");
3201 }
3202 }
3203
3204 // We have either a label reference possibly with addend or an immediate. The
3205 // addend is a raw value here. The linker will adjust it to only reference the
3206 // page.
3207 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3208 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3209
3210 return ParseStatus::Success;
3211}
3212
3213/// tryParseAdrLabel - Parse and validate a source label for the ADR
3214/// instruction.
3215ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3216 SMLoc S = getLoc();
3217 const MCExpr *Expr = nullptr;
3218
3219 // Leave anything with a bracket to the default for SVE
3220 if (getTok().is(AsmToken::LBrac))
3221 return ParseStatus::NoMatch;
3222
3223 if (getTok().is(AsmToken::Hash))
3224 Lex(); // Eat hash token.
3225
3226 if (parseSymbolicImmVal(Expr))
3227 return ParseStatus::Failure;
3228
3229 AArch64MCExpr::VariantKind ELFRefKind;
3230 MCSymbolRefExpr::VariantKind DarwinRefKind;
3231 int64_t Addend;
3232 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3233 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3234 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3235 // No modifier was specified at all; this is the syntax for an ELF basic
3236 // ADR relocation (unfortunately).
3237 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3238 } else {
3239 return Error(S, "unexpected adr label");
3240 }
3241 }
3242
3243 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3244 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3245 return ParseStatus::Success;
3246}
3247
3248/// tryParseFPImm - A floating point immediate expression operand.
3249template <bool AddFPZeroAsLiteral>
3250ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3251 SMLoc S = getLoc();
3252
3253 bool Hash = parseOptionalToken(AsmToken::Hash);
3254
3255 // Handle negation, as that still comes through as a separate token.
3256 bool isNegative = parseOptionalToken(AsmToken::Minus);
3257
3258 const AsmToken &Tok = getTok();
3259 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3260 if (!Hash)
3261 return ParseStatus::NoMatch;
3262 return TokError("invalid floating point immediate");
3263 }
3264
3265 // Parse hexadecimal representation.
3266 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
3267 if (Tok.getIntVal() > 255 || isNegative)
3268 return TokError("encoded floating point value out of range");
3269
3271 Operands.push_back(
3272 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3273 } else {
3274 // Parse FP representation.
3275 APFloat RealVal(APFloat::IEEEdouble());
3276 auto StatusOrErr =
3277 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3278 if (errorToBool(StatusOrErr.takeError()))
3279 return TokError("invalid floating point representation");
3280
3281 if (isNegative)
3282 RealVal.changeSign();
3283
3284 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3285 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3286 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3287 } else
3288 Operands.push_back(AArch64Operand::CreateFPImm(
3289 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3290 }
3291
3292 Lex(); // Eat the token.
3293
3294 return ParseStatus::Success;
3295}
3296
3297/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3298/// a shift suffix, for example '#1, lsl #12'.
3300AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3301 SMLoc S = getLoc();
3302
3303 if (getTok().is(AsmToken::Hash))
3304 Lex(); // Eat '#'
3305 else if (getTok().isNot(AsmToken::Integer))
3306 // Operand should start from # or should be integer, emit error otherwise.
3307 return ParseStatus::NoMatch;
3308
3309 if (getTok().is(AsmToken::Integer) &&
3310 getLexer().peekTok().is(AsmToken::Colon))
3311 return tryParseImmRange(Operands);
3312
3313 const MCExpr *Imm = nullptr;
3314 if (parseSymbolicImmVal(Imm))
3315 return ParseStatus::Failure;
3316 else if (getTok().isNot(AsmToken::Comma)) {
3317 Operands.push_back(
3318 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3319 return ParseStatus::Success;
3320 }
3321
3322 // Eat ','
3323 Lex();
3324 StringRef VecGroup;
3325 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3326 Operands.push_back(
3327 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3328 Operands.push_back(
3329 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3330 return ParseStatus::Success;
3331 }
3332
3333 // The optional operand must be "lsl #N" where N is non-negative.
3334 if (!getTok().is(AsmToken::Identifier) ||
3335 !getTok().getIdentifier().equals_insensitive("lsl"))
3336 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3337
3338 // Eat 'lsl'
3339 Lex();
3340
3341 parseOptionalToken(AsmToken::Hash);
3342
3343 if (getTok().isNot(AsmToken::Integer))
3344 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3345
3346 int64_t ShiftAmount = getTok().getIntVal();
3347
3348 if (ShiftAmount < 0)
3349 return Error(getLoc(), "positive shift amount required");
3350 Lex(); // Eat the number
3351
3352 // Just in case the optional lsl #0 is used for immediates other than zero.
3353 if (ShiftAmount == 0 && Imm != nullptr) {
3354 Operands.push_back(
3355 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3356 return ParseStatus::Success;
3357 }
3358
3359 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3360 getLoc(), getContext()));
3361 return ParseStatus::Success;
3362}
3363
3364/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3365/// suggestion to help common typos.
3367AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3369 .Case("eq", AArch64CC::EQ)
3370 .Case("ne", AArch64CC::NE)
3371 .Case("cs", AArch64CC::HS)
3372 .Case("hs", AArch64CC::HS)
3373 .Case("cc", AArch64CC::LO)
3374 .Case("lo", AArch64CC::LO)
3375 .Case("mi", AArch64CC::MI)
3376 .Case("pl", AArch64CC::PL)
3377 .Case("vs", AArch64CC::VS)
3378 .Case("vc", AArch64CC::VC)
3379 .Case("hi", AArch64CC::HI)
3380 .Case("ls", AArch64CC::LS)
3381 .Case("ge", AArch64CC::GE)
3382 .Case("lt", AArch64CC::LT)
3383 .Case("gt", AArch64CC::GT)
3384 .Case("le", AArch64CC::LE)
3385 .Case("al", AArch64CC::AL)
3386 .Case("nv", AArch64CC::NV)
3388
3389 if (CC == AArch64CC::Invalid && getSTI().hasFeature(AArch64::FeatureSVE)) {
3391 .Case("none", AArch64CC::EQ)
3392 .Case("any", AArch64CC::NE)
3393 .Case("nlast", AArch64CC::HS)
3394 .Case("last", AArch64CC::LO)
3395 .Case("first", AArch64CC::MI)
3396 .Case("nfrst", AArch64CC::PL)
3397 .Case("pmore", AArch64CC::HI)
3398 .Case("plast", AArch64CC::LS)
3399 .Case("tcont", AArch64CC::GE)
3400 .Case("tstop", AArch64CC::LT)
3402
3403 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3404 Suggestion = "nfrst";
3405 }
3406 return CC;
3407}
3408
3409/// parseCondCode - Parse a Condition Code operand.
3410bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3411 bool invertCondCode) {
3412 SMLoc S = getLoc();
3413 const AsmToken &Tok = getTok();
3414 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3415
3416 StringRef Cond = Tok.getString();
3417 std::string Suggestion;
3418 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3419 if (CC == AArch64CC::Invalid) {
3420 std::string Msg = "invalid condition code";
3421 if (!Suggestion.empty())
3422 Msg += ", did you mean " + Suggestion + "?";
3423 return TokError(Msg);
3424 }
3425 Lex(); // Eat identifier token.
3426
3427 if (invertCondCode) {
3428 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3429 return TokError("condition codes AL and NV are invalid for this instruction");
3431 }
3432
3433 Operands.push_back(
3434 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3435 return false;
3436}
3437
3438ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3439 const AsmToken &Tok = getTok();
3440 SMLoc S = getLoc();
3441
3442 if (Tok.isNot(AsmToken::Identifier))
3443 return TokError("invalid operand for instruction");
3444
3445 unsigned PStateImm = -1;
3446 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3447 if (!SVCR)
3448 return ParseStatus::NoMatch;
3449 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3450 PStateImm = SVCR->Encoding;
3451
3452 Operands.push_back(
3453 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3454 Lex(); // Eat identifier token.
3455 return ParseStatus::Success;
3456}
3457
3458ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3459 const AsmToken &Tok = getTok();
3460 SMLoc S = getLoc();
3461
3462 StringRef Name = Tok.getString();
3463
3464 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3465 Lex(); // eat "za[.(b|h|s|d)]"
3466 unsigned ElementWidth = 0;
3467 auto DotPosition = Name.find('.');
3468 if (DotPosition != StringRef::npos) {
3469 const auto &KindRes =
3470 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3471 if (!KindRes)
3472 return TokError(
3473 "Expected the register to be followed by element width suffix");
3474 ElementWidth = KindRes->second;
3475 }
3476 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3477 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3478 getContext()));
3479 if (getLexer().is(AsmToken::LBrac)) {
3480 // There's no comma after matrix operand, so we can parse the next operand
3481 // immediately.
3482 if (parseOperand(Operands, false, false))
3483 return ParseStatus::NoMatch;
3484 }
3485 return ParseStatus::Success;
3486 }
3487
3488 // Try to parse matrix register.
3489 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3490 if (!Reg)
3491 return ParseStatus::NoMatch;
3492
3493 size_t DotPosition = Name.find('.');
3494 assert(DotPosition != StringRef::npos && "Unexpected register");
3495
3496 StringRef Head = Name.take_front(DotPosition);
3497 StringRef Tail = Name.drop_front(DotPosition);
3498 StringRef RowOrColumn = Head.take_back();
3499
3500 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3501 .Case("h", MatrixKind::Row)
3502 .Case("v", MatrixKind::Col)
3503 .Default(MatrixKind::Tile);
3504
3505 // Next up, parsing the suffix
3506 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3507 if (!KindRes)
3508 return TokError(
3509 "Expected the register to be followed by element width suffix");
3510 unsigned ElementWidth = KindRes->second;
3511
3512 Lex();
3513
3514 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3515 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3516
3517 if (getLexer().is(AsmToken::LBrac)) {
3518 // There's no comma after matrix operand, so we can parse the next operand
3519 // immediately.
3520 if (parseOperand(Operands, false, false))
3521 return ParseStatus::NoMatch;
3522 }
3523 return ParseStatus::Success;
3524}
3525
3526/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3527/// them if present.
3529AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3530 const AsmToken &Tok = getTok();
3531 std::string LowerID = Tok.getString().lower();
3534 .Case("lsl", AArch64_AM::LSL)
3535 .Case("lsr", AArch64_AM::LSR)
3536 .Case("asr", AArch64_AM::ASR)
3537 .Case("ror", AArch64_AM::ROR)
3538 .Case("msl", AArch64_AM::MSL)
3539 .Case("uxtb", AArch64_AM::UXTB)
3540 .Case("uxth", AArch64_AM::UXTH)
3541 .Case("uxtw", AArch64_AM::UXTW)
3542 .Case("uxtx", AArch64_AM::UXTX)
3543 .Case("sxtb", AArch64_AM::SXTB)
3544 .Case("sxth", AArch64_AM::SXTH)
3545 .Case("sxtw", AArch64_AM::SXTW)
3546 .Case("sxtx", AArch64_AM::SXTX)
3548
3550 return ParseStatus::NoMatch;
3551
3552 SMLoc S = Tok.getLoc();
3553 Lex();
3554
3555 bool Hash = parseOptionalToken(AsmToken::Hash);
3556
3557 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3558 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3559 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3560 ShOp == AArch64_AM::MSL) {
3561 // We expect a number here.
3562 return TokError("expected #imm after shift specifier");
3563 }
3564
3565 // "extend" type operations don't need an immediate, #0 is implicit.
3566 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3567 Operands.push_back(
3568 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3569 return ParseStatus::Success;
3570 }
3571
3572 // Make sure we do actually have a number, identifier or a parenthesized
3573 // expression.
3574 SMLoc E = getLoc();
3575 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3576 !getTok().is(AsmToken::Identifier))
3577 return Error(E, "expected integer shift amount");
3578
3579 const MCExpr *ImmVal;
3580 if (getParser().parseExpression(ImmVal))
3581 return ParseStatus::Failure;
3582
3583 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3584 if (!MCE)
3585 return Error(E, "expected constant '#imm' after shift specifier");
3586
3587 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3588 Operands.push_back(AArch64Operand::CreateShiftExtend(
3589 ShOp, MCE->getValue(), true, S, E, getContext()));
3590 return ParseStatus::Success;
3591}
3592
3593static const struct Extension {
3594 const char *Name;
3596} ExtensionMap[] = {
3597 {"crc", {AArch64::FeatureCRC}},
3598 {"sm4", {AArch64::FeatureSM4}},
3599 {"sha3", {AArch64::FeatureSHA3}},
3600 {"sha2", {AArch64::FeatureSHA2}},
3601 {"aes", {AArch64::FeatureAES}},
3602 {"crypto", {AArch64::FeatureCrypto}},
3603 {"fp", {AArch64::FeatureFPARMv8}},
3604 {"simd", {AArch64::FeatureNEON}},
3605 {"ras", {AArch64::FeatureRAS}},
3606 {"rasv2", {AArch64::FeatureRASv2}},
3607 {"lse", {AArch64::FeatureLSE}},
3608 {"predres", {AArch64::FeaturePredRes}},
3609 {"predres2", {AArch64::FeatureSPECRES2}},
3610 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3611 {"mte", {AArch64::FeatureMTE}},
3612 {"memtag", {AArch64::FeatureMTE}},
3613 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3614 {"pan", {AArch64::FeaturePAN}},
3615 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3616 {"ccpp", {AArch64::FeatureCCPP}},
3617 {"rcpc", {AArch64::FeatureRCPC}},
3618 {"rng", {AArch64::FeatureRandGen}},
3619 {"sve", {AArch64::FeatureSVE}},
3620 {"sve2", {AArch64::FeatureSVE2}},
3621 {"sve2-aes", {AArch64::FeatureSVE2AES}},
3622 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3623 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3624 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3625 {"sve2p1", {AArch64::FeatureSVE2p1}},
3626 {"b16b16", {AArch64::FeatureB16B16}},
3627 {"ls64", {AArch64::FeatureLS64}},
3628 {"xs", {AArch64::FeatureXS}},
3629 {"pauth", {AArch64::FeaturePAuth}},
3630 {"flagm", {AArch64::FeatureFlagM}},
3631 {"rme", {AArch64::FeatureRME}},
3632 {"sme", {AArch64::FeatureSME}},
3633 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3634 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3635 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3636 {"sme2", {AArch64::FeatureSME2}},
3637 {"sme2p1", {AArch64::FeatureSME2p1}},
3638 {"hbc", {AArch64::FeatureHBC}},
3639 {"mops", {AArch64::FeatureMOPS}},
3640 {"mec", {AArch64::FeatureMEC}},
3641 {"the", {AArch64::FeatureTHE}},
3642 {"d128", {AArch64::FeatureD128}},
3643 {"lse128", {AArch64::FeatureLSE128}},
3644 {"ite", {AArch64::FeatureITE}},
3645 {"cssc", {AArch64::FeatureCSSC}},
3646 {"rcpc3", {AArch64::FeatureRCPC3}},
3647 {"gcs", {AArch64::FeatureGCS}},
3648 {"bf16", {AArch64::FeatureBF16}},
3649 {"compnum", {AArch64::FeatureComplxNum}},
3650 {"dotprod", {AArch64::FeatureDotProd}},
3651 {"f32mm", {AArch64::FeatureMatMulFP32}},
3652 {"f64mm", {AArch64::FeatureMatMulFP64}},
3653 {"fp16", {AArch64::FeatureFullFP16}},
3654 {"fp16fml", {AArch64::FeatureFP16FML}},
3655 {"i8mm", {AArch64::FeatureMatMulInt8}},
3656 {"lor", {AArch64::FeatureLOR}},
3657 {"profile", {AArch64::FeatureSPE}},
3658 // "rdma" is the name documented by binutils for the feature, but
3659 // binutils also accepts incomplete prefixes of features, so "rdm"
3660 // works too. Support both spellings here.
3661 {"rdm", {AArch64::FeatureRDM}},
3662 {"rdma", {AArch64::FeatureRDM}},
3663 {"sb", {AArch64::FeatureSB}},
3664 {"ssbs", {AArch64::FeatureSSBS}},
3665 {"tme", {AArch64::FeatureTME}},
3666 {"fpmr", {AArch64::FeatureFPMR}},
3667 {"fp8", {AArch64::FeatureFP8}},
3668 {"faminmax", {AArch64::FeatureFAMINMAX}},
3669 {"fp8fma", {AArch64::FeatureFP8FMA}},
3670 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3671 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3672 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3673 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3674 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3675 {"lut", {AArch64::FeatureLUT}},
3676 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3677 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3678 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3679 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3680 {"cpa", {AArch64::FeatureCPA}},
3682
3683static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3684 if (FBS[AArch64::HasV8_0aOps])
3685 Str += "ARMv8a";
3686 if (FBS[AArch64::HasV8_1aOps])
3687 Str += "ARMv8.1a";
3688 else if (FBS[AArch64::HasV8_2aOps])
3689 Str += "ARMv8.2a";
3690 else if (FBS[AArch64::HasV8_3aOps])
3691 Str += "ARMv8.3a";
3692 else if (FBS[AArch64::HasV8_4aOps])
3693 Str += "ARMv8.4a";
3694 else if (FBS[AArch64::HasV8_5aOps])
3695 Str += "ARMv8.5a";
3696 else if (FBS[AArch64::HasV8_6aOps])
3697 Str += "ARMv8.6a";
3698 else if (FBS[AArch64::HasV8_7aOps])
3699 Str += "ARMv8.7a";
3700 else if (FBS[AArch64::HasV8_8aOps])
3701 Str += "ARMv8.8a";
3702 else if (FBS[AArch64::HasV8_9aOps])
3703 Str += "ARMv8.9a";
3704 else if (FBS[AArch64::HasV9_0aOps])
3705 Str += "ARMv9-a";
3706 else if (FBS[AArch64::HasV9_1aOps])
3707 Str += "ARMv9.1a";
3708 else if (FBS[AArch64::HasV9_2aOps])
3709 Str += "ARMv9.2a";
3710 else if (FBS[AArch64::HasV9_3aOps])
3711 Str += "ARMv9.3a";
3712 else if (FBS[AArch64::HasV9_4aOps])
3713 Str += "ARMv9.4a";
3714 else if (FBS[AArch64::HasV9_5aOps])
3715 Str += "ARMv9.5a";
3716 else if (FBS[AArch64::HasV8_0rOps])
3717 Str += "ARMv8r";
3718 else {
3719 SmallVector<std::string, 2> ExtMatches;
3720 for (const auto& Ext : ExtensionMap) {
3721 // Use & in case multiple features are enabled
3722 if ((FBS & Ext.Features) != FeatureBitset())
3723 ExtMatches.push_back(Ext.Name);
3724 }
3725 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3726 }
3727}
3728
3729void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3730 SMLoc S) {
3731 const uint16_t Op2 = Encoding & 7;
3732 const uint16_t Cm = (Encoding & 0x78) >> 3;
3733 const uint16_t Cn = (Encoding & 0x780) >> 7;
3734 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3735
3736 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3737
3738 Operands.push_back(
3739 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3740 Operands.push_back(
3741 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3742 Operands.push_back(
3743 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3744 Expr = MCConstantExpr::create(Op2, getContext());
3745 Operands.push_back(
3746 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3747}
3748
3749/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3750/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3751bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3753 if (Name.contains('.'))
3754 return TokError("invalid operand");
3755
3756 Mnemonic = Name;
3757 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3758
3759 const AsmToken &Tok = getTok();
3760 StringRef Op = Tok.getString();
3761 SMLoc S = Tok.getLoc();
3762
3763 if (Mnemonic == "ic") {
3764 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3765 if (!IC)
3766 return TokError("invalid operand for IC instruction");
3767 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3768 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3770 return TokError(Str);
3771 }
3772 createSysAlias(IC->Encoding, Operands, S);
3773 } else if (Mnemonic == "dc") {
3774 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3775 if (!DC)
3776 return TokError("invalid operand for DC instruction");
3777 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3778 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3780 return TokError(Str);
3781 }
3782 createSysAlias(DC->Encoding, Operands, S);
3783 } else if (Mnemonic == "at") {
3784 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3785 if (!AT)
3786 return TokError("invalid operand for AT instruction");
3787 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3788 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3790 return TokError(Str);
3791 }
3792 createSysAlias(AT->Encoding, Operands, S);
3793 } else if (Mnemonic == "tlbi") {
3794 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3795 if (!TLBI)
3796 return TokError("invalid operand for TLBI instruction");
3797 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3798 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3800 return TokError(Str);
3801 }
3802 createSysAlias(TLBI->Encoding, Operands, S);
3803 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3804
3805 if (Op.lower() != "rctx")
3806 return TokError("invalid operand for prediction restriction instruction");
3807
3808 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3809 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3810 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3811
3812 if (Mnemonic == "cosp" && !hasSpecres2)
3813 return TokError("COSP requires: predres2");
3814 if (!hasPredres)
3815 return TokError(Mnemonic.upper() + "RCTX requires: predres");
3816
3817 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
3818 : Mnemonic == "dvp" ? 0b101
3819 : Mnemonic == "cosp" ? 0b110
3820 : Mnemonic == "cpp" ? 0b111
3821 : 0;
3822 assert(PRCTX_Op2 &&
3823 "Invalid mnemonic for prediction restriction instruction");
3824 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3825 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3826
3827 createSysAlias(Encoding, Operands, S);
3828 }
3829
3830 Lex(); // Eat operand.
3831
3832 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3833 bool HasRegister = false;
3834
3835 // Check for the optional register operand.
3836 if (parseOptionalToken(AsmToken::Comma)) {
3837 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3838 return TokError("expected register operand");
3839 HasRegister = true;
3840 }
3841
3842 if (ExpectRegister && !HasRegister)
3843 return TokError("specified " + Mnemonic + " op requires a register");
3844 else if (!ExpectRegister && HasRegister)
3845 return TokError("specified " + Mnemonic + " op does not use a register");
3846
3847 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3848 return true;
3849
3850 return false;
3851}
3852
3853/// parseSyspAlias - The TLBIP instructions are simple aliases for
3854/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
3855bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
3857 if (Name.contains('.'))
3858 return TokError("invalid operand");
3859
3860 Mnemonic = Name;
3861 Operands.push_back(
3862 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
3863
3864 const AsmToken &Tok = getTok();
3865 StringRef Op = Tok.getString();
3866 SMLoc S = Tok.getLoc();
3867
3868 if (Mnemonic == "tlbip") {
3869 bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
3870 if (HasnXSQualifier) {
3871 Op = Op.drop_back(3);
3872 }
3873 const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
3874 if (!TLBIorig)
3875 return TokError("invalid operand for TLBIP instruction");
3876 const AArch64TLBI::TLBI TLBI(
3877 TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
3878 TLBIorig->NeedsReg,
3879 HasnXSQualifier
3880 ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
3881 : TLBIorig->FeaturesRequired);
3882 if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
3883 std::string Name =
3884 std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
3885 std::string Str("TLBIP " + Name + " requires: ");
3887 return TokError(Str);
3888 }
3889 createSysAlias(TLBI.Encoding, Operands, S);
3890 }
3891
3892 Lex(); // Eat operand.
3893
3894 if (parseComma())
3895 return true;
3896
3897 if (Tok.isNot(AsmToken::Identifier))
3898 return TokError("expected register identifier");
3899 auto Result = tryParseSyspXzrPair(Operands);
3900 if (Result.isNoMatch())
3901 Result = tryParseGPRSeqPair(Operands);
3902 if (!Result.isSuccess())
3903 return TokError("specified " + Mnemonic +
3904 " op requires a pair of registers");
3905
3906 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3907 return true;
3908
3909 return false;
3910}
3911
3912ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3913 MCAsmParser &Parser = getParser();
3914 const AsmToken &Tok = getTok();
3915
3916 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
3917 return TokError("'csync' operand expected");
3918 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3919 // Immediate operand.
3920 const MCExpr *ImmVal;
3921 SMLoc ExprLoc = getLoc();
3922 AsmToken IntTok = Tok;
3923 if (getParser().parseExpression(ImmVal))
3924 return ParseStatus::Failure;
3925 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3926 if (!MCE)
3927 return Error(ExprLoc, "immediate value expected for barrier operand");
3928 int64_t Value = MCE->getValue();
3929 if (Mnemonic == "dsb" && Value > 15) {
3930 // This case is a no match here, but it might be matched by the nXS
3931 // variant. Deliberately not unlex the optional '#' as it is not necessary
3932 // to characterize an integer immediate.
3933 Parser.getLexer().UnLex(IntTok);
3934 return ParseStatus::NoMatch;
3935 }
3936 if (Value < 0 || Value > 15)
3937 return Error(ExprLoc, "barrier operand out of range");
3938 auto DB = AArch64DB::lookupDBByEncoding(Value);
3939 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3940 ExprLoc, getContext(),
3941 false /*hasnXSModifier*/));
3942 return ParseStatus::Success;
3943 }
3944
3945 if (Tok.isNot(AsmToken::Identifier))
3946 return TokError("invalid operand for instruction");
3947
3948 StringRef Operand = Tok.getString();
3949 auto TSB = AArch64TSB::lookupTSBByName(Operand);
3950 auto DB = AArch64DB::lookupDBByName(Operand);
3951 // The only valid named option for ISB is 'sy'
3952 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
3953 return TokError("'sy' or #imm operand expected");
3954 // The only valid named option for TSB is 'csync'
3955 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
3956 return TokError("'csync' operand expected");
3957 if (!DB && !TSB) {
3958 if (Mnemonic == "dsb") {
3959 // This case is a no match here, but it might be matched by the nXS
3960 // variant.
3961 return ParseStatus::NoMatch;
3962 }
3963 return TokError("invalid barrier option name");
3964 }
3965
3966 Operands.push_back(AArch64Operand::CreateBarrier(
3967 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3968 getContext(), false /*hasnXSModifier*/));
3969 Lex(); // Consume the option
3970
3971 return ParseStatus::Success;
3972}
3973
3975AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3976 const AsmToken &Tok = getTok();
3977
3978 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
3979 if (Mnemonic != "dsb")
3980 return ParseStatus::Failure;
3981
3982 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3983 // Immediate operand.
3984 const MCExpr *ImmVal;
3985 SMLoc ExprLoc = getLoc();
3986 if (getParser().parseExpression(ImmVal))
3987 return ParseStatus::Failure;
3988 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3989 if (!MCE)
3990 return Error(ExprLoc, "immediate value expected for barrier operand");
3991 int64_t Value = MCE->getValue();
3992 // v8.7-A DSB in the nXS variant accepts only the following immediate
3993 // values: 16, 20, 24, 28.
3994 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
3995 return Error(ExprLoc, "barrier operand out of range");
3996 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
3997 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
3998 ExprLoc, getContext(),
3999 true /*hasnXSModifier*/));
4000 return ParseStatus::Success;
4001 }
4002
4003 if (Tok.isNot(AsmToken::Identifier))
4004 return TokError("invalid operand for instruction");
4005
4006 StringRef Operand = Tok.getString();
4007 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4008
4009 if (!DB)
4010 return TokError("invalid barrier option name");
4011
4012 Operands.push_back(
4013 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4014 getContext(), true /*hasnXSModifier*/));
4015 Lex(); // Consume the option
4016
4017 return ParseStatus::Success;
4018}
4019
4020ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4021 const AsmToken &Tok = getTok();
4022
4023 if (Tok.isNot(AsmToken::Identifier))
4024 return ParseStatus::NoMatch;
4025
4026 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4027 return ParseStatus::NoMatch;
4028
4029 int MRSReg, MSRReg;
4030 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4031 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4032 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4033 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4034 } else
4035 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4036
4037 unsigned PStateImm = -1;
4038 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4039 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4040 PStateImm = PState15->Encoding;
4041 if (!PState15) {
4042 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4043 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4044 PStateImm = PState1->Encoding;
4045 }
4046
4047 Operands.push_back(
4048 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4049 PStateImm, getContext()));
4050 Lex(); // Eat identifier
4051
4052 return ParseStatus::Success;
4053}
4054
4055/// tryParseNeonVectorRegister - Parse a vector register operand.
4056bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4057 if (getTok().isNot(AsmToken::Identifier))
4058 return true;
4059
4060 SMLoc S = getLoc();
4061 // Check for a vector register specifier first.
4064 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4065 if (!Res.isSuccess())
4066 return true;
4067
4068 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4069 if (!KindRes)
4070 return true;
4071
4072 unsigned ElementWidth = KindRes->second;
4073 Operands.push_back(
4074 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4075 S, getLoc(), getContext()));
4076
4077 // If there was an explicit qualifier, that goes on as a literal text
4078 // operand.
4079 if (!Kind.empty())
4080 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4081
4082 return tryParseVectorIndex(Operands).isFailure();
4083}
4084
4085ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4086 SMLoc SIdx = getLoc();
4087 if (parseOptionalToken(AsmToken::LBrac)) {
4088 const MCExpr *ImmVal;
4089 if (getParser().parseExpression(ImmVal))
4090 return ParseStatus::NoMatch;
4091 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4092 if (!MCE)
4093 return TokError("immediate value expected for vector index");
4094
4095 SMLoc E = getLoc();
4096
4097 if (parseToken(AsmToken::RBrac, "']' expected"))
4098 return ParseStatus::Failure;
4099
4100 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4101 E, getContext()));
4102 return ParseStatus::Success;
4103 }
4104
4105 return ParseStatus::NoMatch;
4106}
4107
4108// tryParseVectorRegister - Try to parse a vector register name with
4109// optional kind specifier. If it is a register specifier, eat the token
4110// and return it.
4111ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4112 StringRef &Kind,
4113 RegKind MatchKind) {
4114 const AsmToken &Tok = getTok();
4115
4116 if (Tok.isNot(AsmToken::Identifier))
4117 return ParseStatus::NoMatch;
4118
4119 StringRef Name = Tok.getString();
4120 // If there is a kind specifier, it's separated from the register name by
4121 // a '.'.
4122 size_t Start = 0, Next = Name.find('.');
4123 StringRef Head = Name.slice(Start, Next);
4124 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4125
4126 if (RegNum) {
4127 if (Next != StringRef::npos) {
4128 Kind = Name.slice(Next, StringRef::npos);
4129 if (!isValidVectorKind(Kind, MatchKind))
4130 return TokError("invalid vector kind qualifier");
4131 }
4132 Lex(); // Eat the register token.
4133
4134 Reg = RegNum;
4135 return ParseStatus::Success;
4136 }
4137
4138 return ParseStatus::NoMatch;
4139}
4140
4141/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4142template <RegKind RK>
4144AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4145 // Check for a SVE predicate register specifier first.
4146 const SMLoc S = getLoc();
4148 MCRegister RegNum;
4149 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4150 if (!Res.isSuccess())
4151 return Res;
4152
4153 const auto &KindRes = parseVectorKind(Kind, RK);
4154 if (!KindRes)
4155 return ParseStatus::NoMatch;
4156
4157 unsigned ElementWidth = KindRes->second;
4158 Operands.push_back(AArch64Operand::CreateVectorReg(
4159 RegNum, RK, ElementWidth, S,
4160 getLoc(), getContext()));
4161
4162 if (getLexer().is(AsmToken::LBrac)) {
4163 if (RK == RegKind::SVEPredicateAsCounter) {
4164 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4165 if (ResIndex.isSuccess())
4166 return ParseStatus::Success;
4167 } else {
4168 // Indexed predicate, there's no comma so try parse the next operand
4169 // immediately.
4170 if (parseOperand(Operands, false, false))
4171 return ParseStatus::NoMatch;
4172 }
4173 }
4174
4175 // Not all predicates are followed by a '/m' or '/z'.
4176 if (getTok().isNot(AsmToken::Slash))
4177 return ParseStatus::Success;
4178
4179 // But when they do they shouldn't have an element type suffix.
4180 if (!Kind.empty())
4181 return Error(S, "not expecting size suffix");
4182
4183 // Add a literal slash as operand
4184 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4185
4186 Lex(); // Eat the slash.
4187
4188 // Zeroing or merging?
4189 auto Pred = getTok().getString().lower();
4190 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4191 return Error(getLoc(), "expecting 'z' predication");
4192
4193 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4194 return Error(getLoc(), "expecting 'm' or 'z' predication");
4195
4196 // Add zero/merge token.
4197 const char *ZM = Pred == "z" ? "z" : "m";
4198 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4199
4200 Lex(); // Eat zero/merge token.
4201 return ParseStatus::Success;
4202}
4203
4204/// parseRegister - Parse a register operand.
4205bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4206 // Try for a Neon vector register.
4207 if (!tryParseNeonVectorRegister(Operands))
4208 return false;
4209
4210 if (tryParseZTOperand(Operands).isSuccess())
4211 return false;
4212
4213 // Otherwise try for a scalar register.
4214 if (tryParseGPROperand<false>(Operands).isSuccess())
4215 return false;
4216
4217 return true;
4218}
4219
4220bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4221 bool HasELFModifier = false;
4223
4224 if (parseOptionalToken(AsmToken::Colon)) {
4225 HasELFModifier = true;
4226
4227 if (getTok().isNot(AsmToken::Identifier))
4228 return TokError("expect relocation specifier in operand after ':'");
4229
4230 std::string LowerCase = getTok().getIdentifier().lower();
4231 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
4233 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4234 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4235 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4236 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4237 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4238 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4239 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4240 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4241 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4242 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4243 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4244 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4245 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4246 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4247 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4248 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4249 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4250 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4251 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4252 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4253 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4254 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4255 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4256 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4257 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4258 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4259 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4260 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4261 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4262 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4263 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4264 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4265 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4266 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4267 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4269 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4270 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4272 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4273 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4274 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4276 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4277 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4279
4280 if (RefKind == AArch64MCExpr::VK_INVALID)
4281 return TokError("expect relocation specifier in operand after ':'");
4282
4283 Lex(); // Eat identifier
4284
4285 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4286 return true;
4287 }
4288
4289 if (getParser().parseExpression(ImmVal))
4290 return true;
4291
4292 if (HasELFModifier)
4293 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4294
4295 return false;
4296}
4297
4298ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4299 if (getTok().isNot(AsmToken::LCurly))
4300 return ParseStatus::NoMatch;
4301
4302 auto ParseMatrixTile = [this](unsigned &Reg,
4303 unsigned &ElementWidth) -> ParseStatus {
4304 StringRef Name = getTok().getString();
4305 size_t DotPosition = Name.find('.');
4306 if (DotPosition == StringRef::npos)
4307 return ParseStatus::NoMatch;
4308
4309 unsigned RegNum = matchMatrixTileListRegName(Name);
4310 if (!RegNum)
4311 return ParseStatus::NoMatch;
4312
4313 StringRef Tail = Name.drop_front(DotPosition);
4314 const std::optional<std::pair<int, int>> &KindRes =
4315 parseVectorKind(Tail, RegKind::Matrix);
4316 if (!KindRes)
4317 return TokError(
4318 "Expected the register to be followed by element width suffix");
4319 ElementWidth = KindRes->second;
4320 Reg = RegNum;
4321 Lex(); // Eat the register.
4322 return ParseStatus::Success;
4323 };
4324
4325 SMLoc S = getLoc();
4326 auto LCurly = getTok();
4327 Lex(); // Eat left bracket token.
4328
4329 // Empty matrix list
4330 if (parseOptionalToken(AsmToken::RCurly)) {
4331 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4332 /*RegMask=*/0, S, getLoc(), getContext()));
4333 return ParseStatus::Success;
4334 }
4335
4336 // Try parse {za} alias early
4337 if (getTok().getString().equals_insensitive("za")) {
4338 Lex(); // Eat 'za'
4339
4340 if (parseToken(AsmToken::RCurly, "'}' expected"))
4341 return ParseStatus::Failure;
4342
4343 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4344 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4345 return ParseStatus::Success;
4346 }
4347
4348 SMLoc TileLoc = getLoc();
4349
4350 unsigned FirstReg, ElementWidth;
4351 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4352 if (!ParseRes.isSuccess()) {
4353 getLexer().UnLex(LCurly);
4354 return ParseRes;
4355 }
4356
4357 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4358
4359 unsigned PrevReg = FirstReg;
4360
4362 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4363
4364 SmallSet<unsigned, 8> SeenRegs;
4365 SeenRegs.insert(FirstReg);
4366
4367 while (parseOptionalToken(AsmToken::Comma)) {
4368 TileLoc = getLoc();
4369 unsigned Reg, NextElementWidth;
4370 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4371 if (!ParseRes.isSuccess())
4372 return ParseRes;
4373
4374 // Element size must match on all regs in the list.
4375 if (ElementWidth != NextElementWidth)
4376 return Error(TileLoc, "mismatched register size suffix");
4377
4378 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4379 Warning(TileLoc, "tile list not in ascending order");
4380
4381 if (SeenRegs.contains(Reg))
4382 Warning(TileLoc, "duplicate tile in list");
4383 else {
4384 SeenRegs.insert(Reg);
4385 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4386 }
4387
4388 PrevReg = Reg;
4389 }
4390
4391 if (parseToken(AsmToken::RCurly, "'}' expected"))
4392 return ParseStatus::Failure;
4393
4394 unsigned RegMask = 0;
4395 for (auto Reg : DRegs)
4396 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4397 RI->getEncodingValue(AArch64::ZAD0));
4398 Operands.push_back(
4399 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4400
4401 return ParseStatus::Success;
4402}
4403
4404template <RegKind VectorKind>
4405ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4406 bool ExpectMatch) {
4407 MCAsmParser &Parser = getParser();
4408 if (!getTok().is(AsmToken::LCurly))
4409 return ParseStatus::NoMatch;
4410
4411 // Wrapper around parse function
4412 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4413 bool NoMatchIsError) -> ParseStatus {
4414 auto RegTok = getTok();
4415 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4416 if (ParseRes.isSuccess()) {
4417 if (parseVectorKind(Kind, VectorKind))
4418 return ParseRes;
4419 llvm_unreachable("Expected a valid vector kind");
4420 }
4421
4422 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4423 RegTok.getString().equals_insensitive("zt0"))
4424 return ParseStatus::NoMatch;
4425
4426 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4427 (ParseRes.isNoMatch() && NoMatchIsError &&
4428 !RegTok.getString().starts_with_insensitive("za")))
4429 return Error(Loc, "vector register expected");
4430
4431 return ParseStatus::NoMatch;
4432 };
4433
4434 int NumRegs = getNumRegsForRegKind(VectorKind);
4435 SMLoc S = getLoc();
4436 auto LCurly = getTok();
4437 Lex(); // Eat left bracket token.
4438
4440 MCRegister FirstReg;
4441 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4442
4443 // Put back the original left bracket if there was no match, so that
4444 // different types of list-operands can be matched (e.g. SVE, Neon).
4445 if (ParseRes.isNoMatch())
4446 Parser.getLexer().UnLex(LCurly);
4447
4448 if (!ParseRes.isSuccess())
4449 return ParseRes;
4450
4451 int64_t PrevReg = FirstReg;
4452 unsigned Count = 1;
4453
4454 int Stride = 1;
4455 if (parseOptionalToken(AsmToken::Minus)) {
4456 SMLoc Loc = getLoc();
4457 StringRef NextKind;
4458
4460 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4461 if (!ParseRes.isSuccess())
4462 return ParseRes;
4463
4464 // Any Kind suffices must match on all regs in the list.
4465 if (Kind != NextKind)
4466 return Error(Loc, "mismatched register size suffix");
4467
4468 unsigned Space =
4469 (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4470
4471 if (Space == 0 || Space > 3)
4472 return Error(Loc, "invalid number of vectors");
4473
4474 Count += Space;
4475 }
4476 else {
4477 bool HasCalculatedStride = false;
4478 while (parseOptionalToken(AsmToken::Comma)) {
4479 SMLoc Loc = getLoc();
4480 StringRef NextKind;
4482 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4483 if (!ParseRes.isSuccess())
4484 return ParseRes;
4485
4486 // Any Kind suffices must match on all regs in the list.
4487 if (Kind != NextKind)
4488 return Error(Loc, "mismatched register size suffix");
4489
4490 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4491 unsigned PrevRegVal =
4492 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4493 if (!HasCalculatedStride) {
4494 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4495 : (RegVal + NumRegs - PrevRegVal);
4496 HasCalculatedStride = true;
4497 }
4498
4499 // Register must be incremental (with a wraparound at last register).
4500 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4501 return Error(Loc, "registers must have the same sequential stride");
4502
4503 PrevReg = Reg;
4504 ++Count;
4505 }
4506 }
4507
4508 if (parseToken(AsmToken::RCurly, "'}' expected"))
4509 return ParseStatus::Failure;
4510
4511 if (Count > 4)
4512 return Error(S, "invalid number of vectors");
4513
4514 unsigned NumElements = 0;
4515 unsigned ElementWidth = 0;
4516 if (!Kind.empty()) {
4517 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4518 std::tie(NumElements, ElementWidth) = *VK;
4519 }
4520
4521 Operands.push_back(AArch64Operand::CreateVectorList(
4522 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4523 getLoc(), getContext()));
4524
4525 return ParseStatus::Success;
4526}
4527
4528/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4529bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4530 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4531 if (!ParseRes.isSuccess())
4532 return true;
4533
4534 return tryParseVectorIndex(Operands).isFailure();
4535}
4536
4537ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4538 SMLoc StartLoc = getLoc();
4539
4540 MCRegister RegNum;
4541 ParseStatus Res = tryParseScalarRegister(RegNum);
4542 if (!Res.isSuccess())
4543 return Res;
4544
4545 if (!parseOptionalToken(AsmToken::Comma)) {
4546 Operands.push_back(AArch64Operand::CreateReg(
4547 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4548 return ParseStatus::Success;
4549 }
4550
4551 parseOptionalToken(AsmToken::Hash);
4552
4553 if (getTok().isNot(AsmToken::Integer))
4554 return Error(getLoc(), "index must be absent or #0");
4555
4556 const MCExpr *ImmVal;
4557 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4558 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4559 return Error(getLoc(), "index must be absent or #0");
4560
4561 Operands.push_back(AArch64Operand::CreateReg(
4562 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4563 return ParseStatus::Success;
4564}
4565
4566ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4567 SMLoc StartLoc = getLoc();
4568 const AsmToken &Tok = getTok();
4569 std::string Name = Tok.getString().lower();
4570
4571 unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4572
4573 if (RegNum == 0)
4574 return ParseStatus::NoMatch;
4575
4576 Operands.push_back(AArch64Operand::CreateReg(
4577 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4578 Lex(); // Eat register.
4579
4580 // Check if register is followed by an index
4581 if (parseOptionalToken(AsmToken::LBrac)) {
4582 Operands.push_back(
4583 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4584 const MCExpr *ImmVal;
4585 if (getParser().parseExpression(ImmVal))
4586 return ParseStatus::NoMatch;
4587 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4588 if (!MCE)
4589 return TokError("immediate value expected for vector index");
4590 Operands.push_back(AArch64Operand::CreateImm(
4591 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4592 getLoc(), getContext()));
4593 if (parseOptionalToken(AsmToken::Comma))
4594 if (parseOptionalMulOperand(Operands))
4595 return ParseStatus::Failure;
4596 if (parseToken(AsmToken::RBrac, "']' expected"))
4597 return ParseStatus::Failure;
4598 Operands.push_back(
4599 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4600 }
4601 return ParseStatus::Success;
4602}
4603
4604template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4605ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4606 SMLoc StartLoc = getLoc();
4607
4608 MCRegister RegNum;
4609 ParseStatus Res = tryParseScalarRegister(RegNum);
4610 if (!Res.isSuccess())
4611 return Res;
4612
4613 // No shift/extend is the default.
4614 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4615 Operands.push_back(AArch64Operand::CreateReg(
4616 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4617 return ParseStatus::Success;
4618 }
4619
4620 // Eat the comma
4621 Lex();
4622
4623 // Match the shift
4625 Res = tryParseOptionalShiftExtend(ExtOpnd);
4626 if (!Res.isSuccess())
4627 return Res;
4628
4629 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4630 Operands.push_back(AArch64Operand::CreateReg(
4631 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4632 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4633 Ext->hasShiftExtendAmount()));
4634
4635 return ParseStatus::Success;
4636}
4637
4638bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4639 MCAsmParser &Parser = getParser();
4640
4641 // Some SVE instructions have a decoration after the immediate, i.e.
4642 // "mul vl". We parse them here and add tokens, which must be present in the
4643 // asm string in the tablegen instruction.
4644 bool NextIsVL =
4645 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4646 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4647 if (!getTok().getString().equals_insensitive("mul") ||
4648 !(NextIsVL || NextIsHash))
4649 return true;
4650
4651 Operands.push_back(
4652 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4653 Lex(); // Eat the "mul"
4654
4655 if (NextIsVL) {
4656 Operands.push_back(
4657 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4658 Lex(); // Eat the "vl"
4659 return false;
4660 }
4661
4662 if (NextIsHash) {
4663 Lex(); // Eat the #
4664 SMLoc S = getLoc();
4665
4666 // Parse immediate operand.
4667 const MCExpr *ImmVal;
4668 if (!Parser.parseExpression(ImmVal))
4669 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4670 Operands.push_back(AArch64Operand::CreateImm(
4671 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4672 getContext()));
4673 return false;
4674 }
4675 }
4676
4677 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4678}
4679
4680bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4681 StringRef &VecGroup) {
4682 MCAsmParser &Parser = getParser();
4683 auto Tok = Parser.getTok();
4684 if (Tok.isNot(AsmToken::Identifier))
4685 return true;
4686
4688 .Case("vgx2", "vgx2")
4689 .Case("vgx4", "vgx4")
4690 .Default("");
4691
4692 if (VG.empty())
4693 return true;
4694
4695 VecGroup = VG;
4696 Parser.Lex(); // Eat vgx[2|4]
4697 return false;
4698}
4699
4700bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4701 auto Tok = getTok();
4702 if (Tok.isNot(AsmToken::Identifier))
4703 return true;
4704
4705 auto Keyword = Tok.getString();
4707 .Case("sm", "sm")
4708 .Case("za", "za")
4709 .Default(Keyword);
4710 Operands.push_back(
4711 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4712
4713 Lex();
4714 return false;
4715}
4716
4717/// parseOperand - Parse a arm instruction operand. For now this parses the
4718/// operand regardless of the mnemonic.
4719bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4720 bool invertCondCode) {
4721 MCAsmParser &Parser = getParser();
4722
4723 ParseStatus ResTy =
4724 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
4725
4726 // Check if the current operand has a custom associated parser, if so, try to
4727 // custom parse the operand, or fallback to the general approach.
4728 if (ResTy.isSuccess())
4729 return false;
4730 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4731 // there was a match, but an error occurred, in which case, just return that
4732 // the operand parsing failed.
4733 if (ResTy.isFailure())
4734 return true;
4735
4736 // Nothing custom, so do general case parsing.
4737 SMLoc S, E;
4738 switch (getLexer().getKind()) {
4739 default: {
4740 SMLoc S = getLoc();
4741 const MCExpr *Expr;
4742 if (parseSymbolicImmVal(Expr))
4743 return Error(S, "invalid operand");
4744
4745 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4746 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4747 return false;
4748 }
4749 case AsmToken::LBrac: {
4750 Operands.push_back(
4751 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4752 Lex(); // Eat '['
4753
4754 // There's no comma after a '[', so we can parse the next operand
4755 // immediately.
4756 return parseOperand(Operands, false, false);
4757 }
4758 case AsmToken::LCurly: {
4759 if (!parseNeonVectorList(Operands))
4760 return false;
4761
4762 Operands.push_back(
4763 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4764 Lex(); // Eat '{'
4765
4766 // There's no comma after a '{', so we can parse the next operand
4767 // immediately.
4768 return parseOperand(Operands, false, false);
4769 }
4770 case AsmToken::Identifier: {
4771 // See if this is a "VG" decoration used by SME instructions.
4772 StringRef VecGroup;
4773 if (!parseOptionalVGOperand(Operands, VecGroup)) {
4774 Operands.push_back(
4775 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4776 return false;
4777 }
4778 // If we're expecting a Condition Code operand, then just parse that.
4779 if (isCondCode)
4780 return parseCondCode(Operands, invertCondCode);
4781
4782 // If it's a register name, parse it.
4783 if (!parseRegister(Operands))
4784 return false;
4785
4786 // See if this is a "mul vl" decoration or "mul #<int>" operand used
4787 // by SVE instructions.
4788 if (!parseOptionalMulOperand(Operands))
4789 return false;
4790
4791 // This could be an optional "shift" or "extend" operand.
4792 ParseStatus GotShift = tryParseOptionalShiftExtend(Operands);
4793 // We can only continue if no tokens were eaten.
4794 if (!GotShift.isNoMatch())
4795 return GotShift.isFailure();
4796
4797 // If this is a two-word mnemonic, parse its special keyword
4798 // operand as an identifier.
4799 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
4800 Mnemonic == "gcsb")
4801 return parseKeywordOperand(Operands);
4802
4803 // This was not a register so parse other operands that start with an
4804 // identifier (like labels) as expressions and create them as immediates.
4805 const MCExpr *IdVal;
4806 S = getLoc();
4807 if (getParser().parseExpression(IdVal))
4808 return true;
4809 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4810 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4811 return false;
4812 }
4813 case AsmToken::Integer:
4814 case AsmToken::Real:
4815 case AsmToken::Hash: {
4816 // #42 -> immediate.
4817 S = getLoc();
4818
4819 parseOptionalToken(AsmToken::Hash);
4820
4821 // Parse a negative sign
4822 bool isNegative = false;
4823 if (getTok().is(AsmToken::Minus)) {
4824 isNegative = true;
4825 // We need to consume this token only when we have a Real, otherwise
4826 // we let parseSymbolicImmVal take care of it
4827 if (Parser.getLexer().peekTok().is(AsmToken::Real))
4828 Lex();
4829 }
4830
4831 // The only Real that should come through here is a literal #0.0 for
4832 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4833 // so convert the value.
4834 const AsmToken &Tok = getTok();
4835 if (Tok.is(AsmToken::Real)) {
4836 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4837 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4838 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4839 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4840 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4841 return TokError("unexpected floating point literal");
4842 else if (IntVal != 0 || isNegative)
4843 return TokError("expected floating-point constant #0.0");
4844 Lex(); // Eat the token.
4845
4846 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4847 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4848 return false;
4849 }
4850
4851 const MCExpr *ImmVal;
4852 if (parseSymbolicImmVal(ImmVal))
4853 return true;
4854
4855 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4856 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4857 return false;
4858 }
4859 case AsmToken::Equal: {
4860 SMLoc Loc = getLoc();
4861 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4862 return TokError("unexpected token in operand");
4863 Lex(); // Eat '='
4864 const MCExpr *SubExprVal;
4865 if (getParser().parseExpression(SubExprVal))
4866 return true;
4867
4868 if (Operands.size() < 2 ||
4869 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4870 return Error(Loc, "Only valid when first operand is register");
4871
4872 bool IsXReg =
4873 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4874 Operands[1]->getReg());
4875
4876 MCContext& Ctx = getContext();
4877 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4878 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4879 if (isa<MCConstantExpr>(SubExprVal)) {
4880 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4881 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4882 while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) {
4883 ShiftAmt += 16;
4884 Imm >>= 16;
4885 }
4886 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4887 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
4888 Operands.push_back(AArch64Operand::CreateImm(
4889 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4890 if (ShiftAmt)
4891 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4892 ShiftAmt, true, S, E, Ctx));
4893 return false;
4894 }
4895 APInt Simm = APInt(64, Imm << ShiftAmt);
4896 // check if the immediate is an unsigned or signed 32-bit int for W regs
4897 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
<