LLVM 19.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
39#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSymbol.h"
43#include "llvm/MC/MCValue.h"
49#include "llvm/Support/SMLoc.h"
53#include <cassert>
54#include <cctype>
55#include <cstdint>
56#include <cstdio>
57#include <optional>
58#include <string>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65namespace {
66
67enum class RegKind {
68 Scalar,
69 NeonVector,
70 SVEDataVector,
71 SVEPredicateAsCounter,
72 SVEPredicateVector,
73 Matrix,
74 LookupTable
75};
76
77enum class MatrixKind { Array, Tile, Row, Col };
78
79enum RegConstraintEqualityTy {
80 EqualsReg,
81 EqualsSuperReg,
82 EqualsSubReg
83};
84
85class AArch64AsmParser : public MCTargetAsmParser {
86private:
87 StringRef Mnemonic; ///< Instruction mnemonic.
88
89 // Map of register aliases registers via the .req directive.
91
92 class PrefixInfo {
93 public:
94 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
95 PrefixInfo Prefix;
96 switch (Inst.getOpcode()) {
97 case AArch64::MOVPRFX_ZZ:
98 Prefix.Active = true;
99 Prefix.Dst = Inst.getOperand(0).getReg();
100 break;
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
105 Prefix.Active = true;
106 Prefix.Predicated = true;
107 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
108 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
109 "No destructive element size set for movprfx");
110 Prefix.Dst = Inst.getOperand(0).getReg();
111 Prefix.Pg = Inst.getOperand(2).getReg();
112 break;
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
117 Prefix.Active = true;
118 Prefix.Predicated = true;
119 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
120 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
121 "No destructive element size set for movprfx");
122 Prefix.Dst = Inst.getOperand(0).getReg();
123 Prefix.Pg = Inst.getOperand(1).getReg();
124 break;
125 default:
126 break;
127 }
128
129 return Prefix;
130 }
131
132 PrefixInfo() = default;
133 bool isActive() const { return Active; }
134 bool isPredicated() const { return Predicated; }
135 unsigned getElementSize() const {
136 assert(Predicated);
137 return ElementSize;
138 }
139 unsigned getDstReg() const { return Dst; }
140 unsigned getPgReg() const {
141 assert(Predicated);
142 return Pg;
143 }
144
145 private:
146 bool Active = false;
147 bool Predicated = false;
148 unsigned ElementSize;
149 unsigned Dst;
150 unsigned Pg;
151 } NextPrefix;
152
153 AArch64TargetStreamer &getTargetStreamer() {
155 return static_cast<AArch64TargetStreamer &>(TS);
156 }
157
158 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
159
160 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
161 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
163 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
164 std::string &Suggestion);
165 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
166 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
168 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
169 bool parseNeonVectorList(OperandVector &Operands);
170 bool parseOptionalMulOperand(OperandVector &Operands);
171 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
172 bool parseKeywordOperand(OperandVector &Operands);
173 bool parseOperand(OperandVector &Operands, bool isCondCode,
174 bool invertCondCode);
175 bool parseImmExpr(int64_t &Out);
176 bool parseComma();
177 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
178 unsigned Last);
179
180 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
182
183 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
184
185 bool parseDirectiveArch(SMLoc L);
186 bool parseDirectiveArchExtension(SMLoc L);
187 bool parseDirectiveCPU(SMLoc L);
188 bool parseDirectiveInst(SMLoc L);
189
190 bool parseDirectiveTLSDescCall(SMLoc L);
191
192 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
193 bool parseDirectiveLtorg(SMLoc L);
194
195 bool parseDirectiveReq(StringRef Name, SMLoc L);
196 bool parseDirectiveUnreq(SMLoc L);
197 bool parseDirectiveCFINegateRAState();
198 bool parseDirectiveCFIBKeyFrame();
199 bool parseDirectiveCFIMTETaggedFrame();
200
201 bool parseDirectiveVariantPCS(SMLoc L);
202
203 bool parseDirectiveSEHAllocStack(SMLoc L);
204 bool parseDirectiveSEHPrologEnd(SMLoc L);
205 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
206 bool parseDirectiveSEHSaveFPLR(SMLoc L);
207 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
208 bool parseDirectiveSEHSaveReg(SMLoc L);
209 bool parseDirectiveSEHSaveRegX(SMLoc L);
210 bool parseDirectiveSEHSaveRegP(SMLoc L);
211 bool parseDirectiveSEHSaveRegPX(SMLoc L);
212 bool parseDirectiveSEHSaveLRPair(SMLoc L);
213 bool parseDirectiveSEHSaveFReg(SMLoc L);
214 bool parseDirectiveSEHSaveFRegX(SMLoc L);
215 bool parseDirectiveSEHSaveFRegP(SMLoc L);
216 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
217 bool parseDirectiveSEHSetFP(SMLoc L);
218 bool parseDirectiveSEHAddFP(SMLoc L);
219 bool parseDirectiveSEHNop(SMLoc L);
220 bool parseDirectiveSEHSaveNext(SMLoc L);
221 bool parseDirectiveSEHEpilogStart(SMLoc L);
222 bool parseDirectiveSEHEpilogEnd(SMLoc L);
223 bool parseDirectiveSEHTrapFrame(SMLoc L);
224 bool parseDirectiveSEHMachineFrame(SMLoc L);
225 bool parseDirectiveSEHContext(SMLoc L);
226 bool parseDirectiveSEHECContext(SMLoc L);
227 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
228 bool parseDirectiveSEHPACSignLR(SMLoc L);
229 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
230
231 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
233 unsigned getNumRegsForRegKind(RegKind K);
234 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
237 bool MatchingInlineAsm) override;
238/// @name Auto-generated Match Functions
239/// {
240
241#define GET_ASSEMBLER_HEADER
242#include "AArch64GenAsmMatcher.inc"
243
244 /// }
245
246 ParseStatus tryParseScalarRegister(MCRegister &Reg);
247 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
248 RegKind MatchKind);
249 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
250 ParseStatus tryParseSVCR(OperandVector &Operands);
251 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
252 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
253 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
254 ParseStatus tryParseSysReg(OperandVector &Operands);
255 ParseStatus tryParseSysCROperand(OperandVector &Operands);
256 template <bool IsSVEPrefetch = false>
257 ParseStatus tryParsePrefetch(OperandVector &Operands);
258 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
259 ParseStatus tryParsePSBHint(OperandVector &Operands);
260 ParseStatus tryParseBTIHint(OperandVector &Operands);
261 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
262 ParseStatus tryParseAdrLabel(OperandVector &Operands);
263 template <bool AddFPZeroAsLiteral>
264 ParseStatus tryParseFPImm(OperandVector &Operands);
265 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
266 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
267 bool tryParseNeonVectorRegister(OperandVector &Operands);
268 ParseStatus tryParseVectorIndex(OperandVector &Operands);
269 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
270 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
271 template <bool ParseShiftExtend,
272 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
273 ParseStatus tryParseGPROperand(OperandVector &Operands);
274 ParseStatus tryParseZTOperand(OperandVector &Operands);
275 template <bool ParseShiftExtend, bool ParseSuffix>
276 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
277 template <RegKind RK>
278 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
280 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
281 template <RegKind VectorKind>
282 ParseStatus tryParseVectorList(OperandVector &Operands,
283 bool ExpectMatch = false);
284 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
285 ParseStatus tryParseSVEPattern(OperandVector &Operands);
286 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
287 ParseStatus tryParseGPR64x8(OperandVector &Operands);
288 ParseStatus tryParseImmRange(OperandVector &Operands);
289
290public:
291 enum AArch64MatchResultTy {
292 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
293#define GET_OPERAND_DIAGNOSTIC_TYPES
294#include "AArch64GenAsmMatcher.inc"
295 };
296 bool IsILP32;
297
298 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
299 const MCInstrInfo &MII, const MCTargetOptions &Options)
300 : MCTargetAsmParser(Options, STI, MII) {
304 if (S.getTargetStreamer() == nullptr)
306
307 // Alias .hword/.word/.[dx]word to the target-independent
308 // .2byte/.4byte/.8byte directives as they have the same form and
309 // semantics:
310 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
311 Parser.addAliasForDirective(".hword", ".2byte");
312 Parser.addAliasForDirective(".word", ".4byte");
313 Parser.addAliasForDirective(".dword", ".8byte");
314 Parser.addAliasForDirective(".xword", ".8byte");
315
316 // Initialize the set of available features.
317 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
318 }
319
320 bool areEqualRegs(const MCParsedAsmOperand &Op1,
321 const MCParsedAsmOperand &Op2) const override;
323 SMLoc NameLoc, OperandVector &Operands) override;
324 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
326 SMLoc &EndLoc) override;
327 bool ParseDirective(AsmToken DirectiveID) override;
329 unsigned Kind) override;
330
331 bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
332
333 static bool classifySymbolRef(const MCExpr *Expr,
334 AArch64MCExpr::VariantKind &ELFRefKind,
335 MCSymbolRefExpr::VariantKind &DarwinRefKind,
336 int64_t &Addend);
337};
338
339/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
340/// instruction.
341class AArch64Operand : public MCParsedAsmOperand {
342private:
343 enum KindTy {
344 k_Immediate,
345 k_ShiftedImm,
346 k_ImmRange,
347 k_CondCode,
348 k_Register,
349 k_MatrixRegister,
350 k_MatrixTileList,
351 k_SVCR,
352 k_VectorList,
353 k_VectorIndex,
354 k_Token,
355 k_SysReg,
356 k_SysCR,
357 k_Prefetch,
358 k_ShiftExtend,
359 k_FPImm,
360 k_Barrier,
361 k_PSBHint,
362 k_BTIHint,
363 } Kind;
364
365 SMLoc StartLoc, EndLoc;
366
367 struct TokOp {
368 const char *Data;
369 unsigned Length;
370 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
371 };
372
373 // Separate shift/extend operand.
374 struct ShiftExtendOp {
376 unsigned Amount;
377 bool HasExplicitAmount;
378 };
379
380 struct RegOp {
381 unsigned RegNum;
382 RegKind Kind;
383 int ElementWidth;
384
385 // The register may be allowed as a different register class,
386 // e.g. for GPR64as32 or GPR32as64.
387 RegConstraintEqualityTy EqualityTy;
388
389 // In some cases the shift/extend needs to be explicitly parsed together
390 // with the register, rather than as a separate operand. This is needed
391 // for addressing modes where the instruction as a whole dictates the
392 // scaling/extend, rather than specific bits in the instruction.
393 // By parsing them as a single operand, we avoid the need to pass an
394 // extra operand in all CodeGen patterns (because all operands need to
395 // have an associated value), and we avoid the need to update TableGen to
396 // accept operands that have no associated bits in the instruction.
397 //
398 // An added benefit of parsing them together is that the assembler
399 // can give a sensible diagnostic if the scaling is not correct.
400 //
401 // The default is 'lsl #0' (HasExplicitAmount = false) if no
402 // ShiftExtend is specified.
403 ShiftExtendOp ShiftExtend;
404 };
405
406 struct MatrixRegOp {
407 unsigned RegNum;
408 unsigned ElementWidth;
409 MatrixKind Kind;
410 };
411
412 struct MatrixTileListOp {
413 unsigned RegMask = 0;
414 };
415
416 struct VectorListOp {
417 unsigned RegNum;
418 unsigned Count;
419 unsigned Stride;
420 unsigned NumElements;
421 unsigned ElementWidth;
422 RegKind RegisterKind;
423 };
424
425 struct VectorIndexOp {
426 int Val;
427 };
428
429 struct ImmOp {
430 const MCExpr *Val;
431 };
432
433 struct ShiftedImmOp {
434 const MCExpr *Val;
435 unsigned ShiftAmount;
436 };
437
438 struct ImmRangeOp {
439 unsigned First;
440 unsigned Last;
441 };
442
443 struct CondCodeOp {
445 };
446
447 struct FPImmOp {
448 uint64_t Val; // APFloat value bitcasted to uint64_t.
449 bool IsExact; // describes whether parsed value was exact.
450 };
451
452 struct BarrierOp {
453 const char *Data;
454 unsigned Length;
455 unsigned Val; // Not the enum since not all values have names.
456 bool HasnXSModifier;
457 };
458
459 struct SysRegOp {
460 const char *Data;
461 unsigned Length;
462 uint32_t MRSReg;
463 uint32_t MSRReg;
464 uint32_t PStateField;
465 };
466
467 struct SysCRImmOp {
468 unsigned Val;
469 };
470
471 struct PrefetchOp {
472 const char *Data;
473 unsigned Length;
474 unsigned Val;
475 };
476
477 struct PSBHintOp {
478 const char *Data;
479 unsigned Length;
480 unsigned Val;
481 };
482
483 struct BTIHintOp {
484 const char *Data;
485 unsigned Length;
486 unsigned Val;
487 };
488
489 struct SVCROp {
490 const char *Data;
491 unsigned Length;
492 unsigned PStateField;
493 };
494
495 union {
496 struct TokOp Tok;
497 struct RegOp Reg;
498 struct MatrixRegOp MatrixReg;
499 struct MatrixTileListOp MatrixTileList;
500 struct VectorListOp VectorList;
501 struct VectorIndexOp VectorIndex;
502 struct ImmOp Imm;
503 struct ShiftedImmOp ShiftedImm;
504 struct ImmRangeOp ImmRange;
505 struct CondCodeOp CondCode;
506 struct FPImmOp FPImm;
507 struct BarrierOp Barrier;
508 struct SysRegOp SysReg;
509 struct SysCRImmOp SysCRImm;
510 struct PrefetchOp Prefetch;
511 struct PSBHintOp PSBHint;
512 struct BTIHintOp BTIHint;
513 struct ShiftExtendOp ShiftExtend;
514 struct SVCROp SVCR;
515 };
516
517 // Keep the MCContext around as the MCExprs may need manipulated during
518 // the add<>Operands() calls.
519 MCContext &Ctx;
520
521public:
522 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
523
524 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
525 Kind = o.Kind;
526 StartLoc = o.StartLoc;
527 EndLoc = o.EndLoc;
528 switch (Kind) {
529 case k_Token:
530 Tok = o.Tok;
531 break;
532 case k_Immediate:
533 Imm = o.Imm;
534 break;
535 case k_ShiftedImm:
536 ShiftedImm = o.ShiftedImm;
537 break;
538 case k_ImmRange:
539 ImmRange = o.ImmRange;
540 break;
541 case k_CondCode:
542 CondCode = o.CondCode;
543 break;
544 case k_FPImm:
545 FPImm = o.FPImm;
546 break;
547 case k_Barrier:
548 Barrier = o.Barrier;
549 break;
550 case k_Register:
551 Reg = o.Reg;
552 break;
553 case k_MatrixRegister:
554 MatrixReg = o.MatrixReg;
555 break;
556 case k_MatrixTileList:
557 MatrixTileList = o.MatrixTileList;
558 break;
559 case k_VectorList:
560 VectorList = o.VectorList;
561 break;
562 case k_VectorIndex:
563 VectorIndex = o.VectorIndex;
564 break;
565 case k_SysReg:
566 SysReg = o.SysReg;
567 break;
568 case k_SysCR:
569 SysCRImm = o.SysCRImm;
570 break;
571 case k_Prefetch:
572 Prefetch = o.Prefetch;
573 break;
574 case k_PSBHint:
575 PSBHint = o.PSBHint;
576 break;
577 case k_BTIHint:
578 BTIHint = o.BTIHint;
579 break;
580 case k_ShiftExtend:
581 ShiftExtend = o.ShiftExtend;
582 break;
583 case k_SVCR:
584 SVCR = o.SVCR;
585 break;
586 }
587 }
588
589 /// getStartLoc - Get the location of the first token of this operand.
590 SMLoc getStartLoc() const override { return StartLoc; }
591 /// getEndLoc - Get the location of the last token of this operand.
592 SMLoc getEndLoc() const override { return EndLoc; }
593
594 StringRef getToken() const {
595 assert(Kind == k_Token && "Invalid access!");
596 return StringRef(Tok.Data, Tok.Length);
597 }
598
599 bool isTokenSuffix() const {
600 assert(Kind == k_Token && "Invalid access!");
601 return Tok.IsSuffix;
602 }
603
604 const MCExpr *getImm() const {
605 assert(Kind == k_Immediate && "Invalid access!");
606 return Imm.Val;
607 }
608
609 const MCExpr *getShiftedImmVal() const {
610 assert(Kind == k_ShiftedImm && "Invalid access!");
611 return ShiftedImm.Val;
612 }
613
614 unsigned getShiftedImmShift() const {
615 assert(Kind == k_ShiftedImm && "Invalid access!");
616 return ShiftedImm.ShiftAmount;
617 }
618
619 unsigned getFirstImmVal() const {
620 assert(Kind == k_ImmRange && "Invalid access!");
621 return ImmRange.First;
622 }
623
624 unsigned getLastImmVal() const {
625 assert(Kind == k_ImmRange && "Invalid access!");
626 return ImmRange.Last;
627 }
628
630 assert(Kind == k_CondCode && "Invalid access!");
631 return CondCode.Code;
632 }
633
634 APFloat getFPImm() const {
635 assert (Kind == k_FPImm && "Invalid access!");
636 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
637 }
638
639 bool getFPImmIsExact() const {
640 assert (Kind == k_FPImm && "Invalid access!");
641 return FPImm.IsExact;
642 }
643
644 unsigned getBarrier() const {
645 assert(Kind == k_Barrier && "Invalid access!");
646 return Barrier.Val;
647 }
648
649 StringRef getBarrierName() const {
650 assert(Kind == k_Barrier && "Invalid access!");
651 return StringRef(Barrier.Data, Barrier.Length);
652 }
653
654 bool getBarriernXSModifier() const {
655 assert(Kind == k_Barrier && "Invalid access!");
656 return Barrier.HasnXSModifier;
657 }
658
659 MCRegister getReg() const override {
660 assert(Kind == k_Register && "Invalid access!");
661 return Reg.RegNum;
662 }
663
664 unsigned getMatrixReg() const {
665 assert(Kind == k_MatrixRegister && "Invalid access!");
666 return MatrixReg.RegNum;
667 }
668
669 unsigned getMatrixElementWidth() const {
670 assert(Kind == k_MatrixRegister && "Invalid access!");
671 return MatrixReg.ElementWidth;
672 }
673
674 MatrixKind getMatrixKind() const {
675 assert(Kind == k_MatrixRegister && "Invalid access!");
676 return MatrixReg.Kind;
677 }
678
679 unsigned getMatrixTileListRegMask() const {
680 assert(isMatrixTileList() && "Invalid access!");
681 return MatrixTileList.RegMask;
682 }
683
684 RegConstraintEqualityTy getRegEqualityTy() const {
685 assert(Kind == k_Register && "Invalid access!");
686 return Reg.EqualityTy;
687 }
688
689 unsigned getVectorListStart() const {
690 assert(Kind == k_VectorList && "Invalid access!");
691 return VectorList.RegNum;
692 }
693
694 unsigned getVectorListCount() const {
695 assert(Kind == k_VectorList && "Invalid access!");
696 return VectorList.Count;
697 }
698
699 unsigned getVectorListStride() const {
700 assert(Kind == k_VectorList && "Invalid access!");
701 return VectorList.Stride;
702 }
703
704 int getVectorIndex() const {
705 assert(Kind == k_VectorIndex && "Invalid access!");
706 return VectorIndex.Val;
707 }
708
709 StringRef getSysReg() const {
710 assert(Kind == k_SysReg && "Invalid access!");
711 return StringRef(SysReg.Data, SysReg.Length);
712 }
713
714 unsigned getSysCR() const {
715 assert(Kind == k_SysCR && "Invalid access!");
716 return SysCRImm.Val;
717 }
718
719 unsigned getPrefetch() const {
720 assert(Kind == k_Prefetch && "Invalid access!");
721 return Prefetch.Val;
722 }
723
724 unsigned getPSBHint() const {
725 assert(Kind == k_PSBHint && "Invalid access!");
726 return PSBHint.Val;
727 }
728
729 StringRef getPSBHintName() const {
730 assert(Kind == k_PSBHint && "Invalid access!");
731 return StringRef(PSBHint.Data, PSBHint.Length);
732 }
733
734 unsigned getBTIHint() const {
735 assert(Kind == k_BTIHint && "Invalid access!");
736 return BTIHint.Val;
737 }
738
739 StringRef getBTIHintName() const {
740 assert(Kind == k_BTIHint && "Invalid access!");
741 return StringRef(BTIHint.Data, BTIHint.Length);
742 }
743
744 StringRef getSVCR() const {
745 assert(Kind == k_SVCR && "Invalid access!");
746 return StringRef(SVCR.Data, SVCR.Length);
747 }
748
749 StringRef getPrefetchName() const {
750 assert(Kind == k_Prefetch && "Invalid access!");
751 return StringRef(Prefetch.Data, Prefetch.Length);
752 }
753
754 AArch64_AM::ShiftExtendType getShiftExtendType() const {
755 if (Kind == k_ShiftExtend)
756 return ShiftExtend.Type;
757 if (Kind == k_Register)
758 return Reg.ShiftExtend.Type;
759 llvm_unreachable("Invalid access!");
760 }
761
762 unsigned getShiftExtendAmount() const {
763 if (Kind == k_ShiftExtend)
764 return ShiftExtend.Amount;
765 if (Kind == k_Register)
766 return Reg.ShiftExtend.Amount;
767 llvm_unreachable("Invalid access!");
768 }
769
770 bool hasShiftExtendAmount() const {
771 if (Kind == k_ShiftExtend)
772 return ShiftExtend.HasExplicitAmount;
773 if (Kind == k_Register)
774 return Reg.ShiftExtend.HasExplicitAmount;
775 llvm_unreachable("Invalid access!");
776 }
777
778 bool isImm() const override { return Kind == k_Immediate; }
779 bool isMem() const override { return false; }
780
781 bool isUImm6() const {
782 if (!isImm())
783 return false;
784 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
785 if (!MCE)
786 return false;
787 int64_t Val = MCE->getValue();
788 return (Val >= 0 && Val < 64);
789 }
790
791 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
792
793 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
794 return isImmScaled<Bits, Scale>(true);
795 }
796
797 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
798 DiagnosticPredicate isUImmScaled() const {
799 if (IsRange && isImmRange() &&
800 (getLastImmVal() != getFirstImmVal() + Offset))
801 return DiagnosticPredicateTy::NoMatch;
802
803 return isImmScaled<Bits, Scale, IsRange>(false);
804 }
805
806 template <int Bits, int Scale, bool IsRange = false>
807 DiagnosticPredicate isImmScaled(bool Signed) const {
808 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
809 (isImmRange() && !IsRange))
810 return DiagnosticPredicateTy::NoMatch;
811
812 int64_t Val;
813 if (isImmRange())
814 Val = getFirstImmVal();
815 else {
816 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
817 if (!MCE)
818 return DiagnosticPredicateTy::NoMatch;
819 Val = MCE->getValue();
820 }
821
822 int64_t MinVal, MaxVal;
823 if (Signed) {
824 int64_t Shift = Bits - 1;
825 MinVal = (int64_t(1) << Shift) * -Scale;
826 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
827 } else {
828 MinVal = 0;
829 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
830 }
831
832 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
833 return DiagnosticPredicateTy::Match;
834
835 return DiagnosticPredicateTy::NearMatch;
836 }
837
838 DiagnosticPredicate isSVEPattern() const {
839 if (!isImm())
840 return DiagnosticPredicateTy::NoMatch;
841 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
842 if (!MCE)
843 return DiagnosticPredicateTy::NoMatch;
844 int64_t Val = MCE->getValue();
845 if (Val >= 0 && Val < 32)
846 return DiagnosticPredicateTy::Match;
847 return DiagnosticPredicateTy::NearMatch;
848 }
849
850 DiagnosticPredicate isSVEVecLenSpecifier() const {
851 if (!isImm())
852 return DiagnosticPredicateTy::NoMatch;
853 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
854 if (!MCE)
855 return DiagnosticPredicateTy::NoMatch;
856 int64_t Val = MCE->getValue();
857 if (Val >= 0 && Val <= 1)
858 return DiagnosticPredicateTy::Match;
859 return DiagnosticPredicateTy::NearMatch;
860 }
861
862 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
864 MCSymbolRefExpr::VariantKind DarwinRefKind;
865 int64_t Addend;
866 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
867 Addend)) {
868 // If we don't understand the expression, assume the best and
869 // let the fixup and relocation code deal with it.
870 return true;
871 }
872
873 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
874 ELFRefKind == AArch64MCExpr::VK_LO12 ||
875 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
876 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
877 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
878 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
879 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
881 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
882 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
883 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
884 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
885 // Note that we don't range-check the addend. It's adjusted modulo page
886 // size when converted, so there is no "out of range" condition when using
887 // @pageoff.
888 return true;
889 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
890 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
891 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
892 return Addend == 0;
893 }
894
895 return false;
896 }
897
898 template <int Scale> bool isUImm12Offset() const {
899 if (!isImm())
900 return false;
901
902 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
903 if (!MCE)
904 return isSymbolicUImm12Offset(getImm());
905
906 int64_t Val = MCE->getValue();
907 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
908 }
909
910 template <int N, int M>
911 bool isImmInRange() const {
912 if (!isImm())
913 return false;
914 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
915 if (!MCE)
916 return false;
917 int64_t Val = MCE->getValue();
918 return (Val >= N && Val <= M);
919 }
920
921 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
922 // a logical immediate can always be represented when inverted.
923 template <typename T>
924 bool isLogicalImm() const {
925 if (!isImm())
926 return false;
927 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
928 if (!MCE)
929 return false;
930
931 int64_t Val = MCE->getValue();
932 // Avoid left shift by 64 directly.
933 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
934 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
935 if ((Val & Upper) && (Val & Upper) != Upper)
936 return false;
937
938 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
939 }
940
941 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
942
943 bool isImmRange() const { return Kind == k_ImmRange; }
944
945 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
946 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
947 /// immediate that can be shifted by 'Shift'.
948 template <unsigned Width>
949 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
950 if (isShiftedImm() && Width == getShiftedImmShift())
951 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
952 return std::make_pair(CE->getValue(), Width);
953
954 if (isImm())
955 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
956 int64_t Val = CE->getValue();
957 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
958 return std::make_pair(Val >> Width, Width);
959 else
960 return std::make_pair(Val, 0u);
961 }
962
963 return {};
964 }
965
966 bool isAddSubImm() const {
967 if (!isShiftedImm() && !isImm())
968 return false;
969
970 const MCExpr *Expr;
971
972 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
973 if (isShiftedImm()) {
974 unsigned Shift = ShiftedImm.ShiftAmount;
975 Expr = ShiftedImm.Val;
976 if (Shift != 0 && Shift != 12)
977 return false;
978 } else {
979 Expr = getImm();
980 }
981
983 MCSymbolRefExpr::VariantKind DarwinRefKind;
984 int64_t Addend;
985 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
986 DarwinRefKind, Addend)) {
987 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
988 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
989 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
990 || ELFRefKind == AArch64MCExpr::VK_LO12
991 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
992 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
993 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
994 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
995 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
996 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
997 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
998 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
999 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
1000 }
1001
1002 // If it's a constant, it should be a real immediate in range.
1003 if (auto ShiftedVal = getShiftedVal<12>())
1004 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1005
1006 // If it's an expression, we hope for the best and let the fixup/relocation
1007 // code deal with it.
1008 return true;
1009 }
1010
1011 bool isAddSubImmNeg() const {
1012 if (!isShiftedImm() && !isImm())
1013 return false;
1014
1015 // Otherwise it should be a real negative immediate in range.
1016 if (auto ShiftedVal = getShiftedVal<12>())
1017 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1018
1019 return false;
1020 }
1021
1022 // Signed value in the range -128 to +127. For element widths of
1023 // 16 bits or higher it may also be a signed multiple of 256 in the
1024 // range -32768 to +32512.
1025 // For element-width of 8 bits a range of -128 to 255 is accepted,
1026 // since a copy of a byte can be either signed/unsigned.
1027 template <typename T>
1028 DiagnosticPredicate isSVECpyImm() const {
1029 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1030 return DiagnosticPredicateTy::NoMatch;
1031
1032 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1033 std::is_same<int8_t, T>::value;
1034 if (auto ShiftedImm = getShiftedVal<8>())
1035 if (!(IsByte && ShiftedImm->second) &&
1036 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1037 << ShiftedImm->second))
1038 return DiagnosticPredicateTy::Match;
1039
1040 return DiagnosticPredicateTy::NearMatch;
1041 }
1042
1043 // Unsigned value in the range 0 to 255. For element widths of
1044 // 16 bits or higher it may also be a signed multiple of 256 in the
1045 // range 0 to 65280.
1046 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1047 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1048 return DiagnosticPredicateTy::NoMatch;
1049
1050 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1051 std::is_same<int8_t, T>::value;
1052 if (auto ShiftedImm = getShiftedVal<8>())
1053 if (!(IsByte && ShiftedImm->second) &&
1054 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1055 << ShiftedImm->second))
1056 return DiagnosticPredicateTy::Match;
1057
1058 return DiagnosticPredicateTy::NearMatch;
1059 }
1060
1061 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1062 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1063 return DiagnosticPredicateTy::Match;
1064 return DiagnosticPredicateTy::NoMatch;
1065 }
1066
1067 bool isCondCode() const { return Kind == k_CondCode; }
1068
1069 bool isSIMDImmType10() const {
1070 if (!isImm())
1071 return false;
1072 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1073 if (!MCE)
1074 return false;
1076 }
1077
1078 template<int N>
1079 bool isBranchTarget() const {
1080 if (!isImm())
1081 return false;
1082 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1083 if (!MCE)
1084 return true;
1085 int64_t Val = MCE->getValue();
1086 if (Val & 0x3)
1087 return false;
1088 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1089 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1090 }
1091
1092 bool
1093 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1094 if (!isImm())
1095 return false;
1096
1097 AArch64MCExpr::VariantKind ELFRefKind;
1098 MCSymbolRefExpr::VariantKind DarwinRefKind;
1099 int64_t Addend;
1100 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1101 DarwinRefKind, Addend)) {
1102 return false;
1103 }
1104 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1105 return false;
1106
1107 return llvm::is_contained(AllowedModifiers, ELFRefKind);
1108 }
1109
1110 bool isMovWSymbolG3() const {
1112 }
1113
1114 bool isMovWSymbolG2() const {
1115 return isMovWSymbol(
1120 }
1121
1122 bool isMovWSymbolG1() const {
1123 return isMovWSymbol(
1129 }
1130
1131 bool isMovWSymbolG0() const {
1132 return isMovWSymbol(
1138 }
1139
1140 template<int RegWidth, int Shift>
1141 bool isMOVZMovAlias() const {
1142 if (!isImm()) return false;
1143
1144 const MCExpr *E = getImm();
1145 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1146 uint64_t Value = CE->getValue();
1147
1148 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1149 }
1150 // Only supports the case of Shift being 0 if an expression is used as an
1151 // operand
1152 return !Shift && E;
1153 }
1154
1155 template<int RegWidth, int Shift>
1156 bool isMOVNMovAlias() const {
1157 if (!isImm()) return false;
1158
1159 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1160 if (!CE) return false;
1161 uint64_t Value = CE->getValue();
1162
1163 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1164 }
1165
1166 bool isFPImm() const {
1167 return Kind == k_FPImm &&
1168 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1169 }
1170
1171 bool isBarrier() const {
1172 return Kind == k_Barrier && !getBarriernXSModifier();
1173 }
1174 bool isBarriernXS() const {
1175 return Kind == k_Barrier && getBarriernXSModifier();
1176 }
1177 bool isSysReg() const { return Kind == k_SysReg; }
1178
1179 bool isMRSSystemRegister() const {
1180 if (!isSysReg()) return false;
1181
1182 return SysReg.MRSReg != -1U;
1183 }
1184
1185 bool isMSRSystemRegister() const {
1186 if (!isSysReg()) return false;
1187 return SysReg.MSRReg != -1U;
1188 }
1189
1190 bool isSystemPStateFieldWithImm0_1() const {
1191 if (!isSysReg()) return false;
1192 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1193 }
1194
1195 bool isSystemPStateFieldWithImm0_15() const {
1196 if (!isSysReg())
1197 return false;
1198 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1199 }
1200
1201 bool isSVCR() const {
1202 if (Kind != k_SVCR)
1203 return false;
1204 return SVCR.PStateField != -1U;
1205 }
1206
1207 bool isReg() const override {
1208 return Kind == k_Register;
1209 }
1210
1211 bool isVectorList() const { return Kind == k_VectorList; }
1212
1213 bool isScalarReg() const {
1214 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1215 }
1216
1217 bool isNeonVectorReg() const {
1218 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1219 }
1220
1221 bool isNeonVectorRegLo() const {
1222 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1223 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1224 Reg.RegNum) ||
1225 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1226 Reg.RegNum));
1227 }
1228
1229 bool isNeonVectorReg0to7() const {
1230 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1231 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1232 Reg.RegNum));
1233 }
1234
1235 bool isMatrix() const { return Kind == k_MatrixRegister; }
1236 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1237
1238 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1239 RegKind RK;
1240 switch (Class) {
1241 case AArch64::PPRRegClassID:
1242 case AArch64::PPR_3bRegClassID:
1243 case AArch64::PPR_p8to15RegClassID:
1244 case AArch64::PNRRegClassID:
1245 case AArch64::PNR_p8to15RegClassID:
1246 case AArch64::PPRorPNRRegClassID:
1247 RK = RegKind::SVEPredicateAsCounter;
1248 break;
1249 default:
1250 llvm_unreachable("Unsupport register class");
1251 }
1252
1253 return (Kind == k_Register && Reg.Kind == RK) &&
1254 AArch64MCRegisterClasses[Class].contains(getReg());
1255 }
1256
1257 template <unsigned Class> bool isSVEVectorReg() const {
1258 RegKind RK;
1259 switch (Class) {
1260 case AArch64::ZPRRegClassID:
1261 case AArch64::ZPR_3bRegClassID:
1262 case AArch64::ZPR_4bRegClassID:
1263 RK = RegKind::SVEDataVector;
1264 break;
1265 case AArch64::PPRRegClassID:
1266 case AArch64::PPR_3bRegClassID:
1267 case AArch64::PPR_p8to15RegClassID:
1268 case AArch64::PNRRegClassID:
1269 case AArch64::PNR_p8to15RegClassID:
1270 case AArch64::PPRorPNRRegClassID:
1271 RK = RegKind::SVEPredicateVector;
1272 break;
1273 default:
1274 llvm_unreachable("Unsupport register class");
1275 }
1276
1277 return (Kind == k_Register && Reg.Kind == RK) &&
1278 AArch64MCRegisterClasses[Class].contains(getReg());
1279 }
1280
1281 template <unsigned Class> bool isFPRasZPR() const {
1282 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1283 AArch64MCRegisterClasses[Class].contains(getReg());
1284 }
1285
1286 template <int ElementWidth, unsigned Class>
1287 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1288 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1289 return DiagnosticPredicateTy::NoMatch;
1290
1291 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1292 return DiagnosticPredicateTy::Match;
1293
1294 return DiagnosticPredicateTy::NearMatch;
1295 }
1296
1297 template <int ElementWidth, unsigned Class>
1298 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1299 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1300 Reg.Kind != RegKind::SVEPredicateVector))
1301 return DiagnosticPredicateTy::NoMatch;
1302
1303 if ((isSVEPredicateAsCounterReg<Class>() ||
1304 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1305 Reg.ElementWidth == ElementWidth)
1306 return DiagnosticPredicateTy::Match;
1307
1308 return DiagnosticPredicateTy::NearMatch;
1309 }
1310
1311 template <int ElementWidth, unsigned Class>
1312 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1313 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1314 return DiagnosticPredicateTy::NoMatch;
1315
1316 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1317 return DiagnosticPredicateTy::Match;
1318
1319 return DiagnosticPredicateTy::NearMatch;
1320 }
1321
1322 template <int ElementWidth, unsigned Class>
1323 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1324 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1325 return DiagnosticPredicateTy::NoMatch;
1326
1327 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1328 return DiagnosticPredicateTy::Match;
1329
1330 return DiagnosticPredicateTy::NearMatch;
1331 }
1332
1333 template <int ElementWidth, unsigned Class,
1334 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1335 bool ShiftWidthAlwaysSame>
1336 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1337 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1338 if (!VectorMatch.isMatch())
1339 return DiagnosticPredicateTy::NoMatch;
1340
1341 // Give a more specific diagnostic when the user has explicitly typed in
1342 // a shift-amount that does not match what is expected, but for which
1343 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1344 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1345 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1346 ShiftExtendTy == AArch64_AM::SXTW) &&
1347 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1348 return DiagnosticPredicateTy::NoMatch;
1349
1350 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1351 return DiagnosticPredicateTy::Match;
1352
1353 return DiagnosticPredicateTy::NearMatch;
1354 }
1355
1356 bool isGPR32as64() const {
1357 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1358 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1359 }
1360
1361 bool isGPR64as32() const {
1362 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1363 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1364 }
1365
1366 bool isGPR64x8() const {
1367 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1368 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1369 Reg.RegNum);
1370 }
1371
1372 bool isWSeqPair() const {
1373 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1374 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1375 Reg.RegNum);
1376 }
1377
1378 bool isXSeqPair() const {
1379 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1380 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1381 Reg.RegNum);
1382 }
1383
1384 bool isSyspXzrPair() const {
1385 return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1386 }
1387
1388 template<int64_t Angle, int64_t Remainder>
1389 DiagnosticPredicate isComplexRotation() const {
1390 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1391
1392 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1393 if (!CE) return DiagnosticPredicateTy::NoMatch;
1394 uint64_t Value = CE->getValue();
1395
1396 if (Value % Angle == Remainder && Value <= 270)
1397 return DiagnosticPredicateTy::Match;
1398 return DiagnosticPredicateTy::NearMatch;
1399 }
1400
1401 template <unsigned RegClassID> bool isGPR64() const {
1402 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1403 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1404 }
1405
1406 template <unsigned RegClassID, int ExtWidth>
1407 DiagnosticPredicate isGPR64WithShiftExtend() const {
1408 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1409 return DiagnosticPredicateTy::NoMatch;
1410
1411 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1412 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1413 return DiagnosticPredicateTy::Match;
1414 return DiagnosticPredicateTy::NearMatch;
1415 }
1416
1417 /// Is this a vector list with the type implicit (presumably attached to the
1418 /// instruction itself)?
1419 template <RegKind VectorKind, unsigned NumRegs>
1420 bool isImplicitlyTypedVectorList() const {
1421 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1422 VectorList.NumElements == 0 &&
1423 VectorList.RegisterKind == VectorKind;
1424 }
1425
1426 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1427 unsigned ElementWidth, unsigned Stride = 1>
1428 bool isTypedVectorList() const {
1429 if (Kind != k_VectorList)
1430 return false;
1431 if (VectorList.Count != NumRegs)
1432 return false;
1433 if (VectorList.RegisterKind != VectorKind)
1434 return false;
1435 if (VectorList.ElementWidth != ElementWidth)
1436 return false;
1437 if (VectorList.Stride != Stride)
1438 return false;
1439 return VectorList.NumElements == NumElements;
1440 }
1441
1442 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1443 unsigned ElementWidth>
1444 DiagnosticPredicate isTypedVectorListMultiple() const {
1445 bool Res =
1446 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1447 if (!Res)
1448 return DiagnosticPredicateTy::NoMatch;
1449 if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0)
1450 return DiagnosticPredicateTy::NearMatch;
1451 return DiagnosticPredicateTy::Match;
1452 }
1453
1454 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1455 unsigned ElementWidth>
1456 DiagnosticPredicate isTypedVectorListStrided() const {
1457 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1458 ElementWidth, Stride>();
1459 if (!Res)
1460 return DiagnosticPredicateTy::NoMatch;
1461 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1462 ((VectorList.RegNum >= AArch64::Z16) &&
1463 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1464 return DiagnosticPredicateTy::Match;
1465 return DiagnosticPredicateTy::NoMatch;
1466 }
1467
1468 template <int Min, int Max>
1469 DiagnosticPredicate isVectorIndex() const {
1470 if (Kind != k_VectorIndex)
1471 return DiagnosticPredicateTy::NoMatch;
1472 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1473 return DiagnosticPredicateTy::Match;
1474 return DiagnosticPredicateTy::NearMatch;
1475 }
1476
1477 bool isToken() const override { return Kind == k_Token; }
1478
1479 bool isTokenEqual(StringRef Str) const {
1480 return Kind == k_Token && getToken() == Str;
1481 }
1482 bool isSysCR() const { return Kind == k_SysCR; }
1483 bool isPrefetch() const { return Kind == k_Prefetch; }
1484 bool isPSBHint() const { return Kind == k_PSBHint; }
1485 bool isBTIHint() const { return Kind == k_BTIHint; }
1486 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1487 bool isShifter() const {
1488 if (!isShiftExtend())
1489 return false;
1490
1491 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1492 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1493 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1494 ST == AArch64_AM::MSL);
1495 }
1496
1497 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1498 if (Kind != k_FPImm)
1499 return DiagnosticPredicateTy::NoMatch;
1500
1501 if (getFPImmIsExact()) {
1502 // Lookup the immediate from table of supported immediates.
1503 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1504 assert(Desc && "Unknown enum value");
1505
1506 // Calculate its FP value.
1507 APFloat RealVal(APFloat::IEEEdouble());
1508 auto StatusOrErr =
1509 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1510 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1511 llvm_unreachable("FP immediate is not exact");
1512
1513 if (getFPImm().bitwiseIsEqual(RealVal))
1514 return DiagnosticPredicateTy::Match;
1515 }
1516
1517 return DiagnosticPredicateTy::NearMatch;
1518 }
1519
1520 template <unsigned ImmA, unsigned ImmB>
1521 DiagnosticPredicate isExactFPImm() const {
1522 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1523 if ((Res = isExactFPImm<ImmA>()))
1524 return DiagnosticPredicateTy::Match;
1525 if ((Res = isExactFPImm<ImmB>()))
1526 return DiagnosticPredicateTy::Match;
1527 return Res;
1528 }
1529
1530 bool isExtend() const {
1531 if (!isShiftExtend())
1532 return false;
1533
1534 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1535 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1536 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1537 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1538 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1539 ET == AArch64_AM::LSL) &&
1540 getShiftExtendAmount() <= 4;
1541 }
1542
1543 bool isExtend64() const {
1544 if (!isExtend())
1545 return false;
1546 // Make sure the extend expects a 32-bit source register.
1547 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1548 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1549 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1550 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1551 }
1552
1553 bool isExtendLSL64() const {
1554 if (!isExtend())
1555 return false;
1556 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1557 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1558 ET == AArch64_AM::LSL) &&
1559 getShiftExtendAmount() <= 4;
1560 }
1561
1562 bool isLSLImm3Shift() const {
1563 if (!isShiftExtend())
1564 return false;
1565 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1566 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1567 }
1568
1569 template<int Width> bool isMemXExtend() const {
1570 if (!isExtend())
1571 return false;
1572 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1573 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1574 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1575 getShiftExtendAmount() == 0);
1576 }
1577
1578 template<int Width> bool isMemWExtend() const {
1579 if (!isExtend())
1580 return false;
1581 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1582 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1583 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1584 getShiftExtendAmount() == 0);
1585 }
1586
1587 template <unsigned width>
1588 bool isArithmeticShifter() const {
1589 if (!isShifter())
1590 return false;
1591
1592 // An arithmetic shifter is LSL, LSR, or ASR.
1593 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1594 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1595 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1596 }
1597
1598 template <unsigned width>
1599 bool isLogicalShifter() const {
1600 if (!isShifter())
1601 return false;
1602
1603 // A logical shifter is LSL, LSR, ASR or ROR.
1604 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1605 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1606 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1607 getShiftExtendAmount() < width;
1608 }
1609
1610 bool isMovImm32Shifter() const {
1611 if (!isShifter())
1612 return false;
1613
1614 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1615 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1616 if (ST != AArch64_AM::LSL)
1617 return false;
1618 uint64_t Val = getShiftExtendAmount();
1619 return (Val == 0 || Val == 16);
1620 }
1621
1622 bool isMovImm64Shifter() const {
1623 if (!isShifter())
1624 return false;
1625
1626 // A MOVi shifter is LSL of 0 or 16.
1627 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1628 if (ST != AArch64_AM::LSL)
1629 return false;
1630 uint64_t Val = getShiftExtendAmount();
1631 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1632 }
1633
1634 bool isLogicalVecShifter() const {
1635 if (!isShifter())
1636 return false;
1637
1638 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1639 unsigned Shift = getShiftExtendAmount();
1640 return getShiftExtendType() == AArch64_AM::LSL &&
1641 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1642 }
1643
1644 bool isLogicalVecHalfWordShifter() const {
1645 if (!isLogicalVecShifter())
1646 return false;
1647
1648 // A logical vector shifter is a left shift by 0 or 8.
1649 unsigned Shift = getShiftExtendAmount();
1650 return getShiftExtendType() == AArch64_AM::LSL &&
1651 (Shift == 0 || Shift == 8);
1652 }
1653
1654 bool isMoveVecShifter() const {
1655 if (!isShiftExtend())
1656 return false;
1657
1658 // A logical vector shifter is a left shift by 8 or 16.
1659 unsigned Shift = getShiftExtendAmount();
1660 return getShiftExtendType() == AArch64_AM::MSL &&
1661 (Shift == 8 || Shift == 16);
1662 }
1663
1664 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1665 // to LDUR/STUR when the offset is not legal for the former but is for
1666 // the latter. As such, in addition to checking for being a legal unscaled
1667 // address, also check that it is not a legal scaled address. This avoids
1668 // ambiguity in the matcher.
1669 template<int Width>
1670 bool isSImm9OffsetFB() const {
1671 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1672 }
1673
1674 bool isAdrpLabel() const {
1675 // Validation was handled during parsing, so we just verify that
1676 // something didn't go haywire.
1677 if (!isImm())
1678 return false;
1679
1680 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1681 int64_t Val = CE->getValue();
1682 int64_t Min = - (4096 * (1LL << (21 - 1)));
1683 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1684 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1685 }
1686
1687 return true;
1688 }
1689
1690 bool isAdrLabel() const {
1691 // Validation was handled during parsing, so we just verify that
1692 // something didn't go haywire.
1693 if (!isImm())
1694 return false;
1695
1696 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1697 int64_t Val = CE->getValue();
1698 int64_t Min = - (1LL << (21 - 1));
1699 int64_t Max = ((1LL << (21 - 1)) - 1);
1700 return Val >= Min && Val <= Max;
1701 }
1702
1703 return true;
1704 }
1705
1706 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1707 DiagnosticPredicate isMatrixRegOperand() const {
1708 if (!isMatrix())
1709 return DiagnosticPredicateTy::NoMatch;
1710 if (getMatrixKind() != Kind ||
1711 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1712 EltSize != getMatrixElementWidth())
1713 return DiagnosticPredicateTy::NearMatch;
1714 return DiagnosticPredicateTy::Match;
1715 }
1716
1717 bool isPAuthPCRelLabel16Operand() const {
1718 // PAuth PCRel16 operands are similar to regular branch targets, but only
1719 // negative values are allowed for concrete immediates as signing instr
1720 // should be in a lower address.
1721 if (!isImm())
1722 return false;
1723 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1724 if (!MCE)
1725 return true;
1726 int64_t Val = MCE->getValue();
1727 if (Val & 0b11)
1728 return false;
1729 return (Val <= 0) && (Val > -(1 << 18));
1730 }
1731
1732 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1733 // Add as immediates when possible. Null MCExpr = 0.
1734 if (!Expr)
1736 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1737 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1738 else
1740 }
1741
1742 void addRegOperands(MCInst &Inst, unsigned N) const {
1743 assert(N == 1 && "Invalid number of operands!");
1745 }
1746
1747 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1748 assert(N == 1 && "Invalid number of operands!");
1749 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1750 }
1751
1752 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1753 assert(N == 1 && "Invalid number of operands!");
1754 assert(
1755 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1756
1757 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1758 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1759 RI->getEncodingValue(getReg()));
1760
1762 }
1763
1764 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1765 assert(N == 1 && "Invalid number of operands!");
1766 assert(
1767 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1768
1769 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1770 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1771 RI->getEncodingValue(getReg()));
1772
1774 }
1775
1776 template <int Width>
1777 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1778 unsigned Base;
1779 switch (Width) {
1780 case 8: Base = AArch64::B0; break;
1781 case 16: Base = AArch64::H0; break;
1782 case 32: Base = AArch64::S0; break;
1783 case 64: Base = AArch64::D0; break;
1784 case 128: Base = AArch64::Q0; break;
1785 default:
1786 llvm_unreachable("Unsupported width");
1787 }
1788 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1789 }
1790
1791 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1792 assert(N == 1 && "Invalid number of operands!");
1793 unsigned Reg = getReg();
1794 // Normalise to PPR
1795 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1796 Reg = Reg - AArch64::PN0 + AArch64::P0;
1798 }
1799
1800 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1801 assert(N == 1 && "Invalid number of operands!");
1802 Inst.addOperand(
1803 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1804 }
1805
1806 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1807 assert(N == 1 && "Invalid number of operands!");
1808 assert(
1809 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1810 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1811 }
1812
1813 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1814 assert(N == 1 && "Invalid number of operands!");
1815 assert(
1816 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1818 }
1819
1820 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1821 assert(N == 1 && "Invalid number of operands!");
1823 }
1824
1825 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1826 assert(N == 1 && "Invalid number of operands!");
1828 }
1829
1830 enum VecListIndexType {
1831 VecListIdx_DReg = 0,
1832 VecListIdx_QReg = 1,
1833 VecListIdx_ZReg = 2,
1834 VecListIdx_PReg = 3,
1835 };
1836
1837 template <VecListIndexType RegTy, unsigned NumRegs>
1838 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1839 assert(N == 1 && "Invalid number of operands!");
1840 static const unsigned FirstRegs[][5] = {
1841 /* DReg */ { AArch64::Q0,
1842 AArch64::D0, AArch64::D0_D1,
1843 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1844 /* QReg */ { AArch64::Q0,
1845 AArch64::Q0, AArch64::Q0_Q1,
1846 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1847 /* ZReg */ { AArch64::Z0,
1848 AArch64::Z0, AArch64::Z0_Z1,
1849 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1850 /* PReg */ { AArch64::P0,
1851 AArch64::P0, AArch64::P0_P1 }
1852 };
1853
1854 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1855 " NumRegs must be <= 4 for ZRegs");
1856
1857 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1858 " NumRegs must be <= 2 for PRegs");
1859
1860 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1861 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1862 FirstRegs[(unsigned)RegTy][0]));
1863 }
1864
1865 template <unsigned NumRegs>
1866 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1867 assert(N == 1 && "Invalid number of operands!");
1868 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1869
1870 switch (NumRegs) {
1871 case 2:
1872 if (getVectorListStart() < AArch64::Z16) {
1873 assert((getVectorListStart() < AArch64::Z8) &&
1874 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1876 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1877 } else {
1878 assert((getVectorListStart() < AArch64::Z24) &&
1879 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1881 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1882 }
1883 break;
1884 case 4:
1885 if (getVectorListStart() < AArch64::Z16) {
1886 assert((getVectorListStart() < AArch64::Z4) &&
1887 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1889 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1890 } else {
1891 assert((getVectorListStart() < AArch64::Z20) &&
1892 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1894 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1895 }
1896 break;
1897 default:
1898 llvm_unreachable("Unsupported number of registers for strided vec list");
1899 }
1900 }
1901
1902 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1903 assert(N == 1 && "Invalid number of operands!");
1904 unsigned RegMask = getMatrixTileListRegMask();
1905 assert(RegMask <= 0xFF && "Invalid mask!");
1906 Inst.addOperand(MCOperand::createImm(RegMask));
1907 }
1908
1909 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1910 assert(N == 1 && "Invalid number of operands!");
1911 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1912 }
1913
1914 template <unsigned ImmIs0, unsigned ImmIs1>
1915 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1916 assert(N == 1 && "Invalid number of operands!");
1917 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1918 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1919 }
1920
1921 void addImmOperands(MCInst &Inst, unsigned N) const {
1922 assert(N == 1 && "Invalid number of operands!");
1923 // If this is a pageoff symrefexpr with an addend, adjust the addend
1924 // to be only the page-offset portion. Otherwise, just add the expr
1925 // as-is.
1926 addExpr(Inst, getImm());
1927 }
1928
1929 template <int Shift>
1930 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1931 assert(N == 2 && "Invalid number of operands!");
1932 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1933 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1934 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1935 } else if (isShiftedImm()) {
1936 addExpr(Inst, getShiftedImmVal());
1937 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1938 } else {
1939 addExpr(Inst, getImm());
1941 }
1942 }
1943
1944 template <int Shift>
1945 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1946 assert(N == 2 && "Invalid number of operands!");
1947 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1948 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1949 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1950 } else
1951 llvm_unreachable("Not a shifted negative immediate");
1952 }
1953
1954 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1955 assert(N == 1 && "Invalid number of operands!");
1957 }
1958
1959 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1960 assert(N == 1 && "Invalid number of operands!");
1961 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1962 if (!MCE)
1963 addExpr(Inst, getImm());
1964 else
1965 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1966 }
1967
1968 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1969 addImmOperands(Inst, N);
1970 }
1971
1972 template<int Scale>
1973 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1974 assert(N == 1 && "Invalid number of operands!");
1975 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1976
1977 if (!MCE) {
1978 Inst.addOperand(MCOperand::createExpr(getImm()));
1979 return;
1980 }
1981 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1982 }
1983
1984 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1985 assert(N == 1 && "Invalid number of operands!");
1986 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1988 }
1989
1990 template <int Scale>
1991 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1992 assert(N == 1 && "Invalid number of operands!");
1993 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1994 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1995 }
1996
1997 template <int Scale>
1998 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
1999 assert(N == 1 && "Invalid number of operands!");
2000 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
2001 }
2002
2003 template <typename T>
2004 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2005 assert(N == 1 && "Invalid number of operands!");
2006 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2007 std::make_unsigned_t<T> Val = MCE->getValue();
2008 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2009 Inst.addOperand(MCOperand::createImm(encoding));
2010 }
2011
2012 template <typename T>
2013 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2014 assert(N == 1 && "Invalid number of operands!");
2015 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2016 std::make_unsigned_t<T> Val = ~MCE->getValue();
2017 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2018 Inst.addOperand(MCOperand::createImm(encoding));
2019 }
2020
2021 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2022 assert(N == 1 && "Invalid number of operands!");
2023 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2025 Inst.addOperand(MCOperand::createImm(encoding));
2026 }
2027
2028 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2029 // Branch operands don't encode the low bits, so shift them off
2030 // here. If it's a label, however, just put it on directly as there's
2031 // not enough information now to do anything.
2032 assert(N == 1 && "Invalid number of operands!");
2033 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2034 if (!MCE) {
2035 addExpr(Inst, getImm());
2036 return;
2037 }
2038 assert(MCE && "Invalid constant immediate operand!");
2039 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2040 }
2041
2042 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2043 // PC-relative operands don't encode the low bits, so shift them off
2044 // here. If it's a label, however, just put it on directly as there's
2045 // not enough information now to do anything.
2046 assert(N == 1 && "Invalid number of operands!");
2047 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2048 if (!MCE) {
2049 addExpr(Inst, getImm());
2050 return;
2051 }
2052 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2053 }
2054
2055 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2056 // Branch operands don't encode the low bits, so shift them off
2057 // here. If it's a label, however, just put it on directly as there's
2058 // not enough information now to do anything.
2059 assert(N == 1 && "Invalid number of operands!");
2060 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2061 if (!MCE) {
2062 addExpr(Inst, getImm());
2063 return;
2064 }
2065 assert(MCE && "Invalid constant immediate operand!");
2066 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2067 }
2068
2069 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2070 // Branch operands don't encode the low bits, so shift them off
2071 // here. If it's a label, however, just put it on directly as there's
2072 // not enough information now to do anything.
2073 assert(N == 1 && "Invalid number of operands!");
2074 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2075 if (!MCE) {
2076 addExpr(Inst, getImm());
2077 return;
2078 }
2079 assert(MCE && "Invalid constant immediate operand!");
2080 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2081 }
2082
2083 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2084 assert(N == 1 && "Invalid number of operands!");
2086 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2087 }
2088
2089 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2090 assert(N == 1 && "Invalid number of operands!");
2091 Inst.addOperand(MCOperand::createImm(getBarrier()));
2092 }
2093
2094 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2095 assert(N == 1 && "Invalid number of operands!");
2096 Inst.addOperand(MCOperand::createImm(getBarrier()));
2097 }
2098
2099 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2100 assert(N == 1 && "Invalid number of operands!");
2101
2102 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2103 }
2104
2105 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2106 assert(N == 1 && "Invalid number of operands!");
2107
2108 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2109 }
2110
2111 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2112 assert(N == 1 && "Invalid number of operands!");
2113
2114 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2115 }
2116
2117 void addSVCROperands(MCInst &Inst, unsigned N) const {
2118 assert(N == 1 && "Invalid number of operands!");
2119
2120 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2121 }
2122
2123 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2124 assert(N == 1 && "Invalid number of operands!");
2125
2126 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2127 }
2128
2129 void addSysCROperands(MCInst &Inst, unsigned N) const {
2130 assert(N == 1 && "Invalid number of operands!");
2131 Inst.addOperand(MCOperand::createImm(getSysCR()));
2132 }
2133
2134 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2135 assert(N == 1 && "Invalid number of operands!");
2136 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2137 }
2138
2139 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2140 assert(N == 1 && "Invalid number of operands!");
2141 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2142 }
2143
2144 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2145 assert(N == 1 && "Invalid number of operands!");
2146 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2147 }
2148
2149 void addShifterOperands(MCInst &Inst, unsigned N) const {
2150 assert(N == 1 && "Invalid number of operands!");
2151 unsigned Imm =
2152 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2154 }
2155
2156 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2157 assert(N == 1 && "Invalid number of operands!");
2158 unsigned Imm = getShiftExtendAmount();
2160 }
2161
2162 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2163 assert(N == 1 && "Invalid number of operands!");
2164
2165 if (!isScalarReg())
2166 return;
2167
2168 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2169 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2171 if (Reg != AArch64::XZR)
2172 llvm_unreachable("wrong register");
2173
2174 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2175 }
2176
2177 void addExtendOperands(MCInst &Inst, unsigned N) const {
2178 assert(N == 1 && "Invalid number of operands!");
2179 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2180 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2181 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2183 }
2184
2185 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2186 assert(N == 1 && "Invalid number of operands!");
2187 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2188 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2189 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2191 }
2192
2193 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2194 assert(N == 2 && "Invalid number of operands!");
2195 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2196 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2197 Inst.addOperand(MCOperand::createImm(IsSigned));
2198 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2199 }
2200
2201 // For 8-bit load/store instructions with a register offset, both the
2202 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2203 // they're disambiguated by whether the shift was explicit or implicit rather
2204 // than its size.
2205 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2206 assert(N == 2 && "Invalid number of operands!");
2207 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2208 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2209 Inst.addOperand(MCOperand::createImm(IsSigned));
2210 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2211 }
2212
2213 template<int Shift>
2214 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2215 assert(N == 1 && "Invalid number of operands!");
2216
2217 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2218 if (CE) {
2219 uint64_t Value = CE->getValue();
2220 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2221 } else {
2222 addExpr(Inst, getImm());
2223 }
2224 }
2225
2226 template<int Shift>
2227 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2228 assert(N == 1 && "Invalid number of operands!");
2229
2230 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2231 uint64_t Value = CE->getValue();
2232 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2233 }
2234
2235 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2236 assert(N == 1 && "Invalid number of operands!");
2237 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2238 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2239 }
2240
2241 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2242 assert(N == 1 && "Invalid number of operands!");
2243 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2244 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2245 }
2246
2247 void print(raw_ostream &OS) const override;
2248
2249 static std::unique_ptr<AArch64Operand>
2250 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2251 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2252 Op->Tok.Data = Str.data();
2253 Op->Tok.Length = Str.size();
2254 Op->Tok.IsSuffix = IsSuffix;
2255 Op->StartLoc = S;
2256 Op->EndLoc = S;
2257 return Op;
2258 }
2259
2260 static std::unique_ptr<AArch64Operand>
2261 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2262 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2264 unsigned ShiftAmount = 0,
2265 unsigned HasExplicitAmount = false) {
2266 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2267 Op->Reg.RegNum = RegNum;
2268 Op->Reg.Kind = Kind;
2269 Op->Reg.ElementWidth = 0;
2270 Op->Reg.EqualityTy = EqTy;
2271 Op->Reg.ShiftExtend.Type = ExtTy;
2272 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2273 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2274 Op->StartLoc = S;
2275 Op->EndLoc = E;
2276 return Op;
2277 }
2278
2279 static std::unique_ptr<AArch64Operand>
2280 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2281 SMLoc S, SMLoc E, MCContext &Ctx,
2283 unsigned ShiftAmount = 0,
2284 unsigned HasExplicitAmount = false) {
2285 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2286 Kind == RegKind::SVEPredicateVector ||
2287 Kind == RegKind::SVEPredicateAsCounter) &&
2288 "Invalid vector kind");
2289 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2290 HasExplicitAmount);
2291 Op->Reg.ElementWidth = ElementWidth;
2292 return Op;
2293 }
2294
2295 static std::unique_ptr<AArch64Operand>
2296 CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2297 unsigned NumElements, unsigned ElementWidth,
2298 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2299 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2300 Op->VectorList.RegNum = RegNum;
2301 Op->VectorList.Count = Count;
2302 Op->VectorList.Stride = Stride;
2303 Op->VectorList.NumElements = NumElements;
2304 Op->VectorList.ElementWidth = ElementWidth;
2305 Op->VectorList.RegisterKind = RegisterKind;
2306 Op->StartLoc = S;
2307 Op->EndLoc = E;
2308 return Op;
2309 }
2310
2311 static std::unique_ptr<AArch64Operand>
2312 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2313 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2314 Op->VectorIndex.Val = Idx;
2315 Op->StartLoc = S;
2316 Op->EndLoc = E;
2317 return Op;
2318 }
2319
2320 static std::unique_ptr<AArch64Operand>
2321 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2322 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2323 Op->MatrixTileList.RegMask = RegMask;
2324 Op->StartLoc = S;
2325 Op->EndLoc = E;
2326 return Op;
2327 }
2328
2329 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2330 const unsigned ElementWidth) {
2331 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2332 RegMap = {
2333 {{0, AArch64::ZAB0},
2334 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2335 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2336 {{8, AArch64::ZAB0},
2337 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2338 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2339 {{16, AArch64::ZAH0},
2340 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2341 {{16, AArch64::ZAH1},
2342 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2343 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2344 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2345 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2346 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2347 };
2348
2349 if (ElementWidth == 64)
2350 OutRegs.insert(Reg);
2351 else {
2352 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2353 assert(!Regs.empty() && "Invalid tile or element width!");
2354 for (auto OutReg : Regs)
2355 OutRegs.insert(OutReg);
2356 }
2357 }
2358
2359 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2360 SMLoc E, MCContext &Ctx) {
2361 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2362 Op->Imm.Val = Val;
2363 Op->StartLoc = S;
2364 Op->EndLoc = E;
2365 return Op;
2366 }
2367
2368 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2369 unsigned ShiftAmount,
2370 SMLoc S, SMLoc E,
2371 MCContext &Ctx) {
2372 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2373 Op->ShiftedImm .Val = Val;
2374 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2375 Op->StartLoc = S;
2376 Op->EndLoc = E;
2377 return Op;
2378 }
2379
2380 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2381 unsigned Last, SMLoc S,
2382 SMLoc E,
2383 MCContext &Ctx) {
2384 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2385 Op->ImmRange.First = First;
2386 Op->ImmRange.Last = Last;
2387 Op->EndLoc = E;
2388 return Op;
2389 }
2390
2391 static std::unique_ptr<AArch64Operand>
2392 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2393 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2394 Op->CondCode.Code = Code;
2395 Op->StartLoc = S;
2396 Op->EndLoc = E;
2397 return Op;
2398 }
2399
2400 static std::unique_ptr<AArch64Operand>
2401 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2402 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2403 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2404 Op->FPImm.IsExact = IsExact;
2405 Op->StartLoc = S;
2406 Op->EndLoc = S;
2407 return Op;
2408 }
2409
2410 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2411 StringRef Str,
2412 SMLoc S,
2413 MCContext &Ctx,
2414 bool HasnXSModifier) {
2415 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2416 Op->Barrier.Val = Val;
2417 Op->Barrier.Data = Str.data();
2418 Op->Barrier.Length = Str.size();
2419 Op->Barrier.HasnXSModifier = HasnXSModifier;
2420 Op->StartLoc = S;
2421 Op->EndLoc = S;
2422 return Op;
2423 }
2424
2425 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2426 uint32_t MRSReg,
2427 uint32_t MSRReg,
2428 uint32_t PStateField,
2429 MCContext &Ctx) {
2430 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2431 Op->SysReg.Data = Str.data();
2432 Op->SysReg.Length = Str.size();
2433 Op->SysReg.MRSReg = MRSReg;
2434 Op->SysReg.MSRReg = MSRReg;
2435 Op->SysReg.PStateField = PStateField;
2436 Op->StartLoc = S;
2437 Op->EndLoc = S;
2438 return Op;
2439 }
2440
2441 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2442 SMLoc E, MCContext &Ctx) {
2443 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2444 Op->SysCRImm.Val = Val;
2445 Op->StartLoc = S;
2446 Op->EndLoc = E;
2447 return Op;
2448 }
2449
2450 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2451 StringRef Str,
2452 SMLoc S,
2453 MCContext &Ctx) {
2454 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2455 Op->Prefetch.Val = Val;
2456 Op->Barrier.Data = Str.data();
2457 Op->Barrier.Length = Str.size();
2458 Op->StartLoc = S;
2459 Op->EndLoc = S;
2460 return Op;
2461 }
2462
2463 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2464 StringRef Str,
2465 SMLoc S,
2466 MCContext &Ctx) {
2467 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2468 Op->PSBHint.Val = Val;
2469 Op->PSBHint.Data = Str.data();
2470 Op->PSBHint.Length = Str.size();
2471 Op->StartLoc = S;
2472 Op->EndLoc = S;
2473 return Op;
2474 }
2475
2476 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2477 StringRef Str,
2478 SMLoc S,
2479 MCContext &Ctx) {
2480 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2481 Op->BTIHint.Val = Val | 32;
2482 Op->BTIHint.Data = Str.data();
2483 Op->BTIHint.Length = Str.size();
2484 Op->StartLoc = S;
2485 Op->EndLoc = S;
2486 return Op;
2487 }
2488
2489 static std::unique_ptr<AArch64Operand>
2490 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2491 SMLoc S, SMLoc E, MCContext &Ctx) {
2492 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2493 Op->MatrixReg.RegNum = RegNum;
2494 Op->MatrixReg.ElementWidth = ElementWidth;
2495 Op->MatrixReg.Kind = Kind;
2496 Op->StartLoc = S;
2497 Op->EndLoc = E;
2498 return Op;
2499 }
2500
2501 static std::unique_ptr<AArch64Operand>
2502 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2503 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2504 Op->SVCR.PStateField = PStateField;
2505 Op->SVCR.Data = Str.data();
2506 Op->SVCR.Length = Str.size();
2507 Op->StartLoc = S;
2508 Op->EndLoc = S;
2509 return Op;
2510 }
2511
2512 static std::unique_ptr<AArch64Operand>
2513 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2514 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2515 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2516 Op->ShiftExtend.Type = ShOp;
2517 Op->ShiftExtend.Amount = Val;
2518 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2519 Op->StartLoc = S;
2520 Op->EndLoc = E;
2521 return Op;
2522 }
2523};
2524
2525} // end anonymous namespace.
2526
2527void AArch64Operand::print(raw_ostream &OS) const {
2528 switch (Kind) {
2529 case k_FPImm:
2530 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2531 if (!getFPImmIsExact())
2532 OS << " (inexact)";
2533 OS << ">";
2534 break;
2535 case k_Barrier: {
2536 StringRef Name = getBarrierName();
2537 if (!Name.empty())
2538 OS << "<barrier " << Name << ">";
2539 else
2540 OS << "<barrier invalid #" << getBarrier() << ">";
2541 break;
2542 }
2543 case k_Immediate:
2544 OS << *getImm();
2545 break;
2546 case k_ShiftedImm: {
2547 unsigned Shift = getShiftedImmShift();
2548 OS << "<shiftedimm ";
2549 OS << *getShiftedImmVal();
2550 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2551 break;
2552 }
2553 case k_ImmRange: {
2554 OS << "<immrange ";
2555 OS << getFirstImmVal();
2556 OS << ":" << getLastImmVal() << ">";
2557 break;
2558 }
2559 case k_CondCode:
2560 OS << "<condcode " << getCondCode() << ">";
2561 break;
2562 case k_VectorList: {
2563 OS << "<vectorlist ";
2564 unsigned Reg = getVectorListStart();
2565 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2566 OS << Reg + i * getVectorListStride() << " ";
2567 OS << ">";
2568 break;
2569 }
2570 case k_VectorIndex:
2571 OS << "<vectorindex " << getVectorIndex() << ">";
2572 break;
2573 case k_SysReg:
2574 OS << "<sysreg: " << getSysReg() << '>';
2575 break;
2576 case k_Token:
2577 OS << "'" << getToken() << "'";
2578 break;
2579 case k_SysCR:
2580 OS << "c" << getSysCR();
2581 break;
2582 case k_Prefetch: {
2583 StringRef Name = getPrefetchName();
2584 if (!Name.empty())
2585 OS << "<prfop " << Name << ">";
2586 else
2587 OS << "<prfop invalid #" << getPrefetch() << ">";
2588 break;
2589 }
2590 case k_PSBHint:
2591 OS << getPSBHintName();
2592 break;
2593 case k_BTIHint:
2594 OS << getBTIHintName();
2595 break;
2596 case k_MatrixRegister:
2597 OS << "<matrix " << getMatrixReg() << ">";
2598 break;
2599 case k_MatrixTileList: {
2600 OS << "<matrixlist ";
2601 unsigned RegMask = getMatrixTileListRegMask();
2602 unsigned MaxBits = 8;
2603 for (unsigned I = MaxBits; I > 0; --I)
2604 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2605 OS << '>';
2606 break;
2607 }
2608 case k_SVCR: {
2609 OS << getSVCR();
2610 break;
2611 }
2612 case k_Register:
2613 OS << "<register " << getReg() << ">";
2614 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2615 break;
2616 [[fallthrough]];
2617 case k_ShiftExtend:
2618 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2619 << getShiftExtendAmount();
2620 if (!hasShiftExtendAmount())
2621 OS << "<imp>";
2622 OS << '>';
2623 break;
2624 }
2625}
2626
2627/// @name Auto-generated Match Functions
2628/// {
2629
2631
2632/// }
2633
2635 return StringSwitch<unsigned>(Name.lower())
2636 .Case("v0", AArch64::Q0)
2637 .Case("v1", AArch64::Q1)
2638 .Case("v2", AArch64::Q2)
2639 .Case("v3", AArch64::Q3)
2640 .Case("v4", AArch64::Q4)
2641 .Case("v5", AArch64::Q5)
2642 .Case("v6", AArch64::Q6)
2643 .Case("v7", AArch64::Q7)
2644 .Case("v8", AArch64::Q8)
2645 .Case("v9", AArch64::Q9)
2646 .Case("v10", AArch64::Q10)
2647 .Case("v11", AArch64::Q11)
2648 .Case("v12", AArch64::Q12)
2649 .Case("v13", AArch64::Q13)
2650 .Case("v14", AArch64::Q14)
2651 .Case("v15", AArch64::Q15)
2652 .Case("v16", AArch64::Q16)
2653 .Case("v17", AArch64::Q17)
2654 .Case("v18", AArch64::Q18)
2655 .Case("v19", AArch64::Q19)
2656 .Case("v20", AArch64::Q20)
2657 .Case("v21", AArch64::Q21)
2658 .Case("v22", AArch64::Q22)
2659 .Case("v23", AArch64::Q23)
2660 .Case("v24", AArch64::Q24)
2661 .Case("v25", AArch64::Q25)
2662 .Case("v26", AArch64::Q26)
2663 .Case("v27", AArch64::Q27)
2664 .Case("v28", AArch64::Q28)
2665 .Case("v29", AArch64::Q29)
2666 .Case("v30", AArch64::Q30)
2667 .Case("v31", AArch64::Q31)
2668 .Default(0);
2669}
2670
2671/// Returns an optional pair of (#elements, element-width) if Suffix
2672/// is a valid vector kind. Where the number of elements in a vector
2673/// or the vector width is implicit or explicitly unknown (but still a
2674/// valid suffix kind), 0 is used.
2675static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2676 RegKind VectorKind) {
2677 std::pair<int, int> Res = {-1, -1};
2678
2679 switch (VectorKind) {
2680 case RegKind::NeonVector:
2682 .Case("", {0, 0})
2683 .Case(".1d", {1, 64})
2684 .Case(".1q", {1, 128})
2685 // '.2h' needed for fp16 scalar pairwise reductions
2686 .Case(".2h", {2, 16})
2687 .Case(".2b", {2, 8})
2688 .Case(".2s", {2, 32})
2689 .Case(".2d", {2, 64})
2690 // '.4b' is another special case for the ARMv8.2a dot product
2691 // operand
2692 .Case(".4b", {4, 8})
2693 .Case(".4h", {4, 16})
2694 .Case(".4s", {4, 32})
2695 .Case(".8b", {8, 8})
2696 .Case(".8h", {8, 16})
2697 .Case(".16b", {16, 8})
2698 // Accept the width neutral ones, too, for verbose syntax. If
2699 // those aren't used in the right places, the token operand won't
2700 // match so all will work out.
2701 .Case(".b", {0, 8})
2702 .Case(".h", {0, 16})
2703 .Case(".s", {0, 32})
2704 .Case(".d", {0, 64})
2705 .Default({-1, -1});
2706 break;
2707 case RegKind::SVEPredicateAsCounter:
2708 case RegKind::SVEPredicateVector:
2709 case RegKind::SVEDataVector:
2710 case RegKind::Matrix:
2712 .Case("", {0, 0})
2713 .Case(".b", {0, 8})
2714 .Case(".h", {0, 16})
2715 .Case(".s", {0, 32})
2716 .Case(".d", {0, 64})
2717 .Case(".q", {0, 128})
2718 .Default({-1, -1});
2719 break;
2720 default:
2721 llvm_unreachable("Unsupported RegKind");
2722 }
2723
2724 if (Res == std::make_pair(-1, -1))
2725 return std::nullopt;
2726
2727 return std::optional<std::pair<int, int>>(Res);
2728}
2729
2730static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2731 return parseVectorKind(Suffix, VectorKind).has_value();
2732}
2733
2735 return StringSwitch<unsigned>(Name.lower())
2736 .Case("z0", AArch64::Z0)
2737 .Case("z1", AArch64::Z1)
2738 .Case("z2", AArch64::Z2)
2739 .Case("z3", AArch64::Z3)
2740 .Case("z4", AArch64::Z4)
2741 .Case("z5", AArch64::Z5)
2742 .Case("z6", AArch64::Z6)
2743 .Case("z7", AArch64::Z7)
2744 .Case("z8", AArch64::Z8)
2745 .Case("z9", AArch64::Z9)
2746 .Case("z10", AArch64::Z10)
2747 .Case("z11", AArch64::Z11)
2748 .Case("z12", AArch64::Z12)
2749 .Case("z13", AArch64::Z13)
2750 .Case("z14", AArch64::Z14)
2751 .Case("z15", AArch64::Z15)
2752 .Case("z16", AArch64::Z16)
2753 .Case("z17", AArch64::Z17)
2754 .Case("z18", AArch64::Z18)
2755 .Case("z19", AArch64::Z19)
2756 .Case("z20", AArch64::Z20)
2757 .Case("z21", AArch64::Z21)
2758 .Case("z22", AArch64::Z22)
2759 .Case("z23", AArch64::Z23)
2760 .Case("z24", AArch64::Z24)
2761 .Case("z25", AArch64::Z25)
2762 .Case("z26", AArch64::Z26)
2763 .Case("z27", AArch64::Z27)
2764 .Case("z28", AArch64::Z28)
2765 .Case("z29", AArch64::Z29)
2766 .Case("z30", AArch64::Z30)
2767 .Case("z31", AArch64::Z31)
2768 .Default(0);
2769}
2770
2772 return StringSwitch<unsigned>(Name.lower())
2773 .Case("p0", AArch64::P0)
2774 .Case("p1", AArch64::P1)
2775 .Case("p2", AArch64::P2)
2776 .Case("p3", AArch64::P3)
2777 .Case("p4", AArch64::P4)
2778 .Case("p5", AArch64::P5)
2779 .Case("p6", AArch64::P6)
2780 .Case("p7", AArch64::P7)
2781 .Case("p8", AArch64::P8)
2782 .Case("p9", AArch64::P9)
2783 .Case("p10", AArch64::P10)
2784 .Case("p11", AArch64::P11)
2785 .Case("p12", AArch64::P12)
2786 .Case("p13", AArch64::P13)
2787 .Case("p14", AArch64::P14)
2788 .Case("p15", AArch64::P15)
2789 .Default(0);
2790}
2791
2793 return StringSwitch<unsigned>(Name.lower())
2794 .Case("pn0", AArch64::PN0)
2795 .Case("pn1", AArch64::PN1)
2796 .Case("pn2", AArch64::PN2)
2797 .Case("pn3", AArch64::PN3)
2798 .Case("pn4", AArch64::PN4)
2799 .Case("pn5", AArch64::PN5)
2800 .Case("pn6", AArch64::PN6)
2801 .Case("pn7", AArch64::PN7)
2802 .Case("pn8", AArch64::PN8)
2803 .Case("pn9", AArch64::PN9)
2804 .Case("pn10", AArch64::PN10)
2805 .Case("pn11", AArch64::PN11)
2806 .Case("pn12", AArch64::PN12)
2807 .Case("pn13", AArch64::PN13)
2808 .Case("pn14", AArch64::PN14)
2809 .Case("pn15", AArch64::PN15)
2810 .Default(0);
2811}
2812
2814 return StringSwitch<unsigned>(Name.lower())
2815 .Case("za0.d", AArch64::ZAD0)
2816 .Case("za1.d", AArch64::ZAD1)
2817 .Case("za2.d", AArch64::ZAD2)
2818 .Case("za3.d", AArch64::ZAD3)
2819 .Case("za4.d", AArch64::ZAD4)
2820 .Case("za5.d", AArch64::ZAD5)
2821 .Case("za6.d", AArch64::ZAD6)
2822 .Case("za7.d", AArch64::ZAD7)
2823 .Case("za0.s", AArch64::ZAS0)
2824 .Case("za1.s", AArch64::ZAS1)
2825 .Case("za2.s", AArch64::ZAS2)
2826 .Case("za3.s", AArch64::ZAS3)
2827 .Case("za0.h", AArch64::ZAH0)
2828 .Case("za1.h", AArch64::ZAH1)
2829 .Case("za0.b", AArch64::ZAB0)
2830 .Default(0);
2831}
2832
2834 return StringSwitch<unsigned>(Name.lower())
2835 .Case("za", AArch64::ZA)
2836 .Case("za0.q", AArch64::ZAQ0)
2837 .Case("za1.q", AArch64::ZAQ1)
2838 .Case("za2.q", AArch64::ZAQ2)
2839 .Case("za3.q", AArch64::ZAQ3)
2840 .Case("za4.q", AArch64::ZAQ4)
2841 .Case("za5.q", AArch64::ZAQ5)
2842 .Case("za6.q", AArch64::ZAQ6)
2843 .Case("za7.q", AArch64::ZAQ7)
2844 .Case("za8.q", AArch64::ZAQ8)
2845 .Case("za9.q", AArch64::ZAQ9)
2846 .Case("za10.q", AArch64::ZAQ10)
2847 .Case("za11.q", AArch64::ZAQ11)
2848 .Case("za12.q", AArch64::ZAQ12)
2849 .Case("za13.q", AArch64::ZAQ13)
2850 .Case("za14.q", AArch64::ZAQ14)
2851 .Case("za15.q", AArch64::ZAQ15)
2852 .Case("za0.d", AArch64::ZAD0)
2853 .Case("za1.d", AArch64::ZAD1)
2854 .Case("za2.d", AArch64::ZAD2)
2855 .Case("za3.d", AArch64::ZAD3)
2856 .Case("za4.d", AArch64::ZAD4)
2857 .Case("za5.d", AArch64::ZAD5)
2858 .Case("za6.d", AArch64::ZAD6)
2859 .Case("za7.d", AArch64::ZAD7)
2860 .Case("za0.s", AArch64::ZAS0)
2861 .Case("za1.s", AArch64::ZAS1)
2862 .Case("za2.s", AArch64::ZAS2)
2863 .Case("za3.s", AArch64::ZAS3)
2864 .Case("za0.h", AArch64::ZAH0)
2865 .Case("za1.h", AArch64::ZAH1)
2866 .Case("za0.b", AArch64::ZAB0)
2867 .Case("za0h.q", AArch64::ZAQ0)
2868 .Case("za1h.q", AArch64::ZAQ1)
2869 .Case("za2h.q", AArch64::ZAQ2)
2870 .Case("za3h.q", AArch64::ZAQ3)
2871 .Case("za4h.q", AArch64::ZAQ4)
2872 .Case("za5h.q", AArch64::ZAQ5)
2873 .Case("za6h.q", AArch64::ZAQ6)
2874 .Case("za7h.q", AArch64::ZAQ7)
2875 .Case("za8h.q", AArch64::ZAQ8)
2876 .Case("za9h.q", AArch64::ZAQ9)
2877 .Case("za10h.q", AArch64::ZAQ10)
2878 .Case("za11h.q", AArch64::ZAQ11)
2879 .Case("za12h.q", AArch64::ZAQ12)
2880 .Case("za13h.q", AArch64::ZAQ13)
2881 .Case("za14h.q", AArch64::ZAQ14)
2882 .Case("za15h.q", AArch64::ZAQ15)
2883 .Case("za0h.d", AArch64::ZAD0)
2884 .Case("za1h.d", AArch64::ZAD1)
2885 .Case("za2h.d", AArch64::ZAD2)
2886 .Case("za3h.d", AArch64::ZAD3)
2887 .Case("za4h.d", AArch64::ZAD4)
2888 .Case("za5h.d", AArch64::ZAD5)
2889 .Case("za6h.d", AArch64::ZAD6)
2890 .Case("za7h.d", AArch64::ZAD7)
2891 .Case("za0h.s", AArch64::ZAS0)
2892 .Case("za1h.s", AArch64::ZAS1)
2893 .Case("za2h.s", AArch64::ZAS2)
2894 .Case("za3h.s", AArch64::ZAS3)
2895 .Case("za0h.h", AArch64::ZAH0)
2896 .Case("za1h.h", AArch64::ZAH1)
2897 .Case("za0h.b", AArch64::ZAB0)
2898 .Case("za0v.q", AArch64::ZAQ0)
2899 .Case("za1v.q", AArch64::ZAQ1)
2900 .Case("za2v.q", AArch64::ZAQ2)
2901 .Case("za3v.q", AArch64::ZAQ3)
2902 .Case("za4v.q", AArch64::ZAQ4)
2903 .Case("za5v.q", AArch64::ZAQ5)
2904 .Case("za6v.q", AArch64::ZAQ6)
2905 .Case("za7v.q", AArch64::ZAQ7)
2906 .Case("za8v.q", AArch64::ZAQ8)
2907 .Case("za9v.q", AArch64::ZAQ9)
2908 .Case("za10v.q", AArch64::ZAQ10)
2909 .Case("za11v.q", AArch64::ZAQ11)
2910 .Case("za12v.q", AArch64::ZAQ12)
2911 .Case("za13v.q", AArch64::ZAQ13)
2912 .Case("za14v.q", AArch64::ZAQ14)
2913 .Case("za15v.q", AArch64::ZAQ15)
2914 .Case("za0v.d", AArch64::ZAD0)
2915 .Case("za1v.d", AArch64::ZAD1)
2916 .Case("za2v.d", AArch64::ZAD2)
2917 .Case("za3v.d", AArch64::ZAD3)
2918 .Case("za4v.d", AArch64::ZAD4)
2919 .Case("za5v.d", AArch64::ZAD5)
2920 .Case("za6v.d", AArch64::ZAD6)
2921 .Case("za7v.d", AArch64::ZAD7)
2922 .Case("za0v.s", AArch64::ZAS0)
2923 .Case("za1v.s", AArch64::ZAS1)
2924 .Case("za2v.s", AArch64::ZAS2)
2925 .Case("za3v.s", AArch64::ZAS3)
2926 .Case("za0v.h", AArch64::ZAH0)
2927 .Case("za1v.h", AArch64::ZAH1)
2928 .Case("za0v.b", AArch64::ZAB0)
2929 .Default(0);
2930}
2931
2932bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
2933 SMLoc &EndLoc) {
2934 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
2935}
2936
2937ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
2938 SMLoc &EndLoc) {
2939 StartLoc = getLoc();
2940 ParseStatus Res = tryParseScalarRegister(Reg);
2941 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2942 return Res;
2943}
2944
2945// Matches a register name or register alias previously defined by '.req'
2946unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2947 RegKind Kind) {
2948 unsigned RegNum = 0;
2949 if ((RegNum = matchSVEDataVectorRegName(Name)))
2950 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2951
2952 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2953 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2954
2956 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
2957
2958 if ((RegNum = MatchNeonVectorRegName(Name)))
2959 return Kind == RegKind::NeonVector ? RegNum : 0;
2960
2961 if ((RegNum = matchMatrixRegName(Name)))
2962 return Kind == RegKind::Matrix ? RegNum : 0;
2963
2964 if (Name.equals_insensitive("zt0"))
2965 return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0;
2966
2967 // The parsed register must be of RegKind Scalar
2968 if ((RegNum = MatchRegisterName(Name)))
2969 return (Kind == RegKind::Scalar) ? RegNum : 0;
2970
2971 if (!RegNum) {
2972 // Handle a few common aliases of registers.
2973 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2974 .Case("fp", AArch64::FP)
2975 .Case("lr", AArch64::LR)
2976 .Case("x31", AArch64::XZR)
2977 .Case("w31", AArch64::WZR)
2978 .Default(0))
2979 return Kind == RegKind::Scalar ? RegNum : 0;
2980
2981 // Check for aliases registered via .req. Canonicalize to lower case.
2982 // That's more consistent since register names are case insensitive, and
2983 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2984 auto Entry = RegisterReqs.find(Name.lower());
2985 if (Entry == RegisterReqs.end())
2986 return 0;
2987
2988 // set RegNum if the match is the right kind of register
2989 if (Kind == Entry->getValue().first)
2990 RegNum = Entry->getValue().second;
2991 }
2992 return RegNum;
2993}
2994
2995unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
2996 switch (K) {
2997 case RegKind::Scalar:
2998 case RegKind::NeonVector:
2999 case RegKind::SVEDataVector:
3000 return 32;
3001 case RegKind::Matrix:
3002 case RegKind::SVEPredicateVector:
3003 case RegKind::SVEPredicateAsCounter:
3004 return 16;
3005 case RegKind::LookupTable:
3006 return 1;
3007 }
3008 llvm_unreachable("Unsupported RegKind");
3009}
3010
3011/// tryParseScalarRegister - Try to parse a register name. The token must be an
3012/// Identifier when called, and if it is a register name the token is eaten and
3013/// the register is added to the operand list.
3014ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3015 const AsmToken &Tok = getTok();
3016 if (Tok.isNot(AsmToken::Identifier))
3017 return ParseStatus::NoMatch;
3018
3019 std::string lowerCase = Tok.getString().lower();
3020 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3021 if (Reg == 0)
3022 return ParseStatus::NoMatch;
3023
3024 RegNum = Reg;
3025 Lex(); // Eat identifier token.
3026 return ParseStatus::Success;
3027}
3028
3029/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3030ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3031 SMLoc S = getLoc();
3032
3033 if (getTok().isNot(AsmToken::Identifier))
3034 return Error(S, "Expected cN operand where 0 <= N <= 15");
3035
3036 StringRef Tok = getTok().getIdentifier();
3037 if (Tok[0] != 'c' && Tok[0] != 'C')
3038 return Error(S, "Expected cN operand where 0 <= N <= 15");
3039
3040 uint32_t CRNum;
3041 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3042 if (BadNum || CRNum > 15)
3043 return Error(S, "Expected cN operand where 0 <= N <= 15");
3044
3045 Lex(); // Eat identifier token.
3046 Operands.push_back(
3047 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3048 return ParseStatus::Success;
3049}
3050
3051// Either an identifier for named values or a 6-bit immediate.
3052ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3053 SMLoc S = getLoc();
3054 const AsmToken &Tok = getTok();
3055
3056 unsigned MaxVal = 63;
3057
3058 // Immediate case, with optional leading hash:
3059 if (parseOptionalToken(AsmToken::Hash) ||
3060 Tok.is(AsmToken::Integer)) {
3061 const MCExpr *ImmVal;
3062 if (getParser().parseExpression(ImmVal))
3063 return ParseStatus::Failure;
3064
3065 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3066 if (!MCE)
3067 return TokError("immediate value expected for prefetch operand");
3068 unsigned prfop = MCE->getValue();
3069 if (prfop > MaxVal)
3070 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3071 "] expected");
3072
3073 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3074 Operands.push_back(AArch64Operand::CreatePrefetch(
3075 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3076 return ParseStatus::Success;
3077 }
3078
3079 if (Tok.isNot(AsmToken::Identifier))
3080 return TokError("prefetch hint expected");
3081
3082 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3083 if (!RPRFM)
3084 return TokError("prefetch hint expected");
3085
3086 Operands.push_back(AArch64Operand::CreatePrefetch(
3087 RPRFM->Encoding, Tok.getString(), S, getContext()));
3088 Lex(); // Eat identifier token.
3089 return ParseStatus::Success;
3090}
3091
3092/// tryParsePrefetch - Try to parse a prefetch operand.
3093template <bool IsSVEPrefetch>
3094ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3095 SMLoc S = getLoc();
3096 const AsmToken &Tok = getTok();
3097
3098 auto LookupByName = [](StringRef N) {
3099 if (IsSVEPrefetch) {
3100 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3101 return std::optional<unsigned>(Res->Encoding);
3102 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3103 return std::optional<unsigned>(Res->Encoding);
3104 return std::optional<unsigned>();
3105 };
3106
3107 auto LookupByEncoding = [](unsigned E) {
3108 if (IsSVEPrefetch) {
3109 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3110 return std::optional<StringRef>(Res->Name);
3111 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3112 return std::optional<StringRef>(Res->Name);
3113 return std::optional<StringRef>();
3114 };
3115 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3116
3117 // Either an identifier for named values or a 5-bit immediate.
3118 // Eat optional hash.
3119 if (parseOptionalToken(AsmToken::Hash) ||
3120 Tok.is(AsmToken::Integer)) {
3121 const MCExpr *ImmVal;
3122 if (getParser().parseExpression(ImmVal))
3123 return ParseStatus::Failure;
3124
3125 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3126 if (!MCE)
3127 return TokError("immediate value expected for prefetch operand");
3128 unsigned prfop = MCE->getValue();
3129 if (prfop > MaxVal)
3130 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3131 "] expected");
3132
3133 auto PRFM = LookupByEncoding(MCE->getValue());
3134 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3135 S, getContext()));
3136 return ParseStatus::Success;
3137 }
3138
3139 if (Tok.isNot(AsmToken::Identifier))
3140 return TokError("prefetch hint expected");
3141
3142 auto PRFM = LookupByName(Tok.getString());
3143 if (!PRFM)
3144 return TokError("prefetch hint expected");
3145
3146 Operands.push_back(AArch64Operand::CreatePrefetch(
3147 *PRFM, Tok.getString(), S, getContext()));
3148 Lex(); // Eat identifier token.
3149 return ParseStatus::Success;
3150}
3151
3152/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3153ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3154 SMLoc S = getLoc();
3155 const AsmToken &Tok = getTok();
3156 if (Tok.isNot(AsmToken::Identifier))
3157 return TokError("invalid operand for instruction");
3158
3159 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3160 if (!PSB)
3161 return TokError("invalid operand for instruction");
3162
3163 Operands.push_back(AArch64Operand::CreatePSBHint(
3164 PSB->Encoding, Tok.getString(), S, getContext()));
3165 Lex(); // Eat identifier token.
3166 return ParseStatus::Success;
3167}
3168
3169ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3170 SMLoc StartLoc = getLoc();
3171
3172 MCRegister RegNum;
3173
3174 // The case where xzr, xzr is not present is handled by an InstAlias.
3175
3176 auto RegTok = getTok(); // in case we need to backtrack
3177 if (!tryParseScalarRegister(RegNum).isSuccess())
3178 return ParseStatus::NoMatch;
3179
3180 if (RegNum != AArch64::XZR) {
3181 getLexer().UnLex(RegTok);
3182 return ParseStatus::NoMatch;
3183 }
3184
3185 if (parseComma())
3186 return ParseStatus::Failure;
3187
3188 if (!tryParseScalarRegister(RegNum).isSuccess())
3189 return TokError("expected register operand");
3190
3191 if (RegNum != AArch64::XZR)
3192 return TokError("xzr must be followed by xzr");
3193
3194 // We need to push something, since we claim this is an operand in .td.
3195 // See also AArch64AsmParser::parseKeywordOperand.
3196 Operands.push_back(AArch64Operand::CreateReg(
3197 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3198
3199 return ParseStatus::Success;
3200}
3201
3202/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3203ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3204 SMLoc S = getLoc();
3205 const AsmToken &Tok = getTok();
3206 if (Tok.isNot(AsmToken::Identifier))
3207 return TokError("invalid operand for instruction");
3208
3209 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3210 if (!BTI)
3211 return TokError("invalid operand for instruction");
3212
3213 Operands.push_back(AArch64Operand::CreateBTIHint(
3214 BTI->Encoding, Tok.getString(), S, getContext()));
3215 Lex(); // Eat identifier token.
3216 return ParseStatus::Success;
3217}
3218
3219/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3220/// instruction.
3221ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3222 SMLoc S = getLoc();
3223 const MCExpr *Expr = nullptr;
3224
3225 if (getTok().is(AsmToken::Hash)) {
3226 Lex(); // Eat hash token.
3227 }
3228
3229 if (parseSymbolicImmVal(Expr))
3230 return ParseStatus::Failure;
3231
3232 AArch64MCExpr::VariantKind ELFRefKind;
3233 MCSymbolRefExpr::VariantKind DarwinRefKind;
3234 int64_t Addend;
3235 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3236 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3237 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3238 // No modifier was specified at all; this is the syntax for an ELF basic
3239 // ADRP relocation (unfortunately).
3240 Expr =
3242 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3243 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3244 Addend != 0) {
3245 return Error(S, "gotpage label reference not allowed an addend");
3246 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3247 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3248 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3249 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3250 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3251 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3252 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3253 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
3254 // The operand must be an @page or @gotpage qualified symbolref.
3255 return Error(S, "page or gotpage label reference expected");
3256 }
3257 }
3258
3259 // We have either a label reference possibly with addend or an immediate. The
3260 // addend is a raw value here. The linker will adjust it to only reference the
3261 // page.
3262 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3263 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3264
3265 return ParseStatus::Success;
3266}
3267
3268/// tryParseAdrLabel - Parse and validate a source label for the ADR
3269/// instruction.
3270ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3271 SMLoc S = getLoc();
3272 const MCExpr *Expr = nullptr;
3273
3274 // Leave anything with a bracket to the default for SVE
3275 if (getTok().is(AsmToken::LBrac))
3276 return ParseStatus::NoMatch;
3277
3278 if (getTok().is(AsmToken::Hash))
3279 Lex(); // Eat hash token.
3280
3281 if (parseSymbolicImmVal(Expr))
3282 return ParseStatus::Failure;
3283
3284 AArch64MCExpr::VariantKind ELFRefKind;
3285 MCSymbolRefExpr::VariantKind DarwinRefKind;
3286 int64_t Addend;
3287 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3288 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3289 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3290 // No modifier was specified at all; this is the syntax for an ELF basic
3291 // ADR relocation (unfortunately).
3292 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3293 } else {
3294 return Error(S, "unexpected adr label");
3295 }
3296 }
3297
3298 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3299 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3300 return ParseStatus::Success;
3301}
3302
3303/// tryParseFPImm - A floating point immediate expression operand.
3304template <bool AddFPZeroAsLiteral>
3305ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3306 SMLoc S = getLoc();
3307
3308 bool Hash = parseOptionalToken(AsmToken::Hash);
3309
3310 // Handle negation, as that still comes through as a separate token.
3311 bool isNegative = parseOptionalToken(AsmToken::Minus);
3312
3313 const AsmToken &Tok = getTok();
3314 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3315 if (!Hash)
3316 return ParseStatus::NoMatch;
3317 return TokError("invalid floating point immediate");
3318 }
3319
3320 // Parse hexadecimal representation.
3321 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3322 if (Tok.getIntVal() > 255 || isNegative)
3323 return TokError("encoded floating point value out of range");
3324
3326 Operands.push_back(
3327 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3328 } else {
3329 // Parse FP representation.
3330 APFloat RealVal(APFloat::IEEEdouble());
3331 auto StatusOrErr =
3332 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3333 if (errorToBool(StatusOrErr.takeError()))
3334 return TokError("invalid floating point representation");
3335
3336 if (isNegative)
3337 RealVal.changeSign();
3338
3339 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3340 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3341 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3342 } else
3343 Operands.push_back(AArch64Operand::CreateFPImm(
3344 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3345 }
3346
3347 Lex(); // Eat the token.
3348
3349 return ParseStatus::Success;
3350}
3351
3352/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3353/// a shift suffix, for example '#1, lsl #12'.
3355AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3356 SMLoc S = getLoc();
3357
3358 if (getTok().is(AsmToken::Hash))
3359 Lex(); // Eat '#'
3360 else if (getTok().isNot(AsmToken::Integer))
3361 // Operand should start from # or should be integer, emit error otherwise.
3362 return ParseStatus::NoMatch;
3363
3364 if (getTok().is(AsmToken::Integer) &&
3365 getLexer().peekTok().is(AsmToken::Colon))
3366 return tryParseImmRange(Operands);
3367
3368 const MCExpr *Imm = nullptr;
3369 if (parseSymbolicImmVal(Imm))
3370 return ParseStatus::Failure;
3371 else if (getTok().isNot(AsmToken::Comma)) {
3372 Operands.push_back(
3373 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3374 return ParseStatus::Success;
3375 }
3376
3377 // Eat ','
3378 Lex();
3379 StringRef VecGroup;
3380 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3381 Operands.push_back(
3382 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3383 Operands.push_back(
3384 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3385 return ParseStatus::Success;
3386 }
3387
3388 // The optional operand must be "lsl #N" where N is non-negative.
3389 if (!getTok().is(AsmToken::Identifier) ||
3390 !getTok().getIdentifier().equals_insensitive("lsl"))
3391 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3392
3393 // Eat 'lsl'
3394 Lex();
3395
3396 parseOptionalToken(AsmToken::Hash);
3397
3398 if (getTok().isNot(AsmToken::Integer))
3399 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3400
3401 int64_t ShiftAmount = getTok().getIntVal();
3402
3403 if (ShiftAmount < 0)
3404 return Error(getLoc(), "positive shift amount required");
3405 Lex(); // Eat the number
3406
3407 // Just in case the optional lsl #0 is used for immediates other than zero.
3408 if (ShiftAmount == 0 && Imm != nullptr) {
3409 Operands.push_back(
3410 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3411 return ParseStatus::Success;
3412 }
3413
3414 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3415 getLoc(), getContext()));
3416 return ParseStatus::Success;
3417}
3418
3419/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3420/// suggestion to help common typos.
3422AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3424 .Case("eq", AArch64CC::EQ)
3425 .Case("ne", AArch64CC::NE)
3426 .Case("cs", AArch64CC::HS)
3427 .Case("hs", AArch64CC::HS)
3428 .Case("cc", AArch64CC::LO)
3429 .Case("lo", AArch64CC::LO)
3430 .Case("mi", AArch64CC::MI)
3431 .Case("pl", AArch64CC::PL)
3432 .Case("vs", AArch64CC::VS)
3433 .Case("vc", AArch64CC::VC)
3434 .Case("hi", AArch64CC::HI)
3435 .Case("ls", AArch64CC::LS)
3436 .Case("ge", AArch64CC::GE)
3437 .Case("lt", AArch64CC::LT)
3438 .Case("gt", AArch64CC::GT)
3439 .Case("le", AArch64CC::LE)
3440 .Case("al", AArch64CC::AL)
3441 .Case("nv", AArch64CC::NV)
3443
3444 if (CC == AArch64CC::Invalid && getSTI().hasFeature(AArch64::FeatureSVE)) {
3446 .Case("none", AArch64CC::EQ)
3447 .Case("any", AArch64CC::NE)
3448 .Case("nlast", AArch64CC::HS)
3449 .Case("last", AArch64CC::LO)
3450 .Case("first", AArch64CC::MI)
3451 .Case("nfrst", AArch64CC::PL)
3452 .Case("pmore", AArch64CC::HI)
3453 .Case("plast", AArch64CC::LS)
3454 .Case("tcont", AArch64CC::GE)
3455 .Case("tstop", AArch64CC::LT)
3457
3458 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3459 Suggestion = "nfrst";
3460 }
3461 return CC;
3462}
3463
3464/// parseCondCode - Parse a Condition Code operand.
3465bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3466 bool invertCondCode) {
3467 SMLoc S = getLoc();
3468 const AsmToken &Tok = getTok();
3469 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3470
3471 StringRef Cond = Tok.getString();
3472 std::string Suggestion;
3473 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3474 if (CC == AArch64CC::Invalid) {
3475 std::string Msg = "invalid condition code";
3476 if (!Suggestion.empty())
3477 Msg += ", did you mean " + Suggestion + "?";
3478 return TokError(Msg);
3479 }
3480 Lex(); // Eat identifier token.
3481
3482 if (invertCondCode) {
3483 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3484 return TokError("condition codes AL and NV are invalid for this instruction");
3486 }
3487
3488 Operands.push_back(
3489 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3490 return false;
3491}
3492
3493ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3494 const AsmToken &Tok = getTok();
3495 SMLoc S = getLoc();
3496
3497 if (Tok.isNot(AsmToken::Identifier))
3498 return TokError("invalid operand for instruction");
3499
3500 unsigned PStateImm = -1;
3501 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3502 if (!SVCR)
3503 return ParseStatus::NoMatch;
3504 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3505 PStateImm = SVCR->Encoding;
3506
3507 Operands.push_back(
3508 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3509 Lex(); // Eat identifier token.
3510 return ParseStatus::Success;
3511}
3512
3513ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3514 const AsmToken &Tok = getTok();
3515 SMLoc S = getLoc();
3516
3517 StringRef Name = Tok.getString();
3518
3519 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3520 Lex(); // eat "za[.(b|h|s|d)]"
3521 unsigned ElementWidth = 0;
3522 auto DotPosition = Name.find('.');
3523 if (DotPosition != StringRef::npos) {
3524 const auto &KindRes =
3525 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3526 if (!KindRes)
3527 return TokError(
3528 "Expected the register to be followed by element width suffix");
3529 ElementWidth = KindRes->second;
3530 }
3531 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3532 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3533 getContext()));
3534 if (getLexer().is(AsmToken::LBrac)) {
3535 // There's no comma after matrix operand, so we can parse the next operand
3536 // immediately.
3537 if (parseOperand(Operands, false, false))
3538 return ParseStatus::NoMatch;
3539 }
3540 return ParseStatus::Success;
3541 }
3542
3543 // Try to parse matrix register.
3544 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3545 if (!Reg)
3546 return ParseStatus::NoMatch;
3547
3548 size_t DotPosition = Name.find('.');
3549 assert(DotPosition != StringRef::npos && "Unexpected register");
3550
3551 StringRef Head = Name.take_front(DotPosition);
3552 StringRef Tail = Name.drop_front(DotPosition);
3553 StringRef RowOrColumn = Head.take_back();
3554
3555 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3556 .Case("h", MatrixKind::Row)
3557 .Case("v", MatrixKind::Col)
3558 .Default(MatrixKind::Tile);
3559
3560 // Next up, parsing the suffix
3561 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3562 if (!KindRes)
3563 return TokError(
3564 "Expected the register to be followed by element width suffix");
3565 unsigned ElementWidth = KindRes->second;
3566
3567 Lex();
3568
3569 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3570 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3571
3572 if (getLexer().is(AsmToken::LBrac)) {
3573 // There's no comma after matrix operand, so we can parse the next operand
3574 // immediately.
3575 if (parseOperand(Operands, false, false))
3576 return ParseStatus::NoMatch;
3577 }
3578 return ParseStatus::Success;
3579}
3580
3581/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3582/// them if present.
3584AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3585 const AsmToken &Tok = getTok();
3586 std::string LowerID = Tok.getString().lower();
3589 .Case("lsl", AArch64_AM::LSL)
3590 .Case("lsr", AArch64_AM::LSR)
3591 .Case("asr", AArch64_AM::ASR)
3592 .Case("ror", AArch64_AM::ROR)
3593 .Case("msl", AArch64_AM::MSL)
3594 .Case("uxtb", AArch64_AM::UXTB)
3595 .Case("uxth", AArch64_AM::UXTH)
3596 .Case("uxtw", AArch64_AM::UXTW)
3597 .Case("uxtx", AArch64_AM::UXTX)
3598 .Case("sxtb", AArch64_AM::SXTB)
3599 .Case("sxth", AArch64_AM::SXTH)
3600 .Case("sxtw", AArch64_AM::SXTW)
3601 .Case("sxtx", AArch64_AM::SXTX)
3603
3605 return ParseStatus::NoMatch;
3606
3607 SMLoc S = Tok.getLoc();
3608 Lex();
3609
3610 bool Hash = parseOptionalToken(AsmToken::Hash);
3611
3612 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3613 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3614 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3615 ShOp == AArch64_AM::MSL) {
3616 // We expect a number here.
3617 return TokError("expected #imm after shift specifier");
3618 }
3619
3620 // "extend" type operations don't need an immediate, #0 is implicit.
3621 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3622 Operands.push_back(
3623 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3624 return ParseStatus::Success;
3625 }
3626
3627 // Make sure we do actually have a number, identifier or a parenthesized
3628 // expression.
3629 SMLoc E = getLoc();
3630 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3631 !getTok().is(AsmToken::Identifier))
3632 return Error(E, "expected integer shift amount");
3633
3634 const MCExpr *ImmVal;
3635 if (getParser().parseExpression(ImmVal))
3636 return ParseStatus::Failure;
3637
3638 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3639 if (!MCE)
3640 return Error(E, "expected constant '#imm' after shift specifier");
3641
3642 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3643 Operands.push_back(AArch64Operand::CreateShiftExtend(
3644 ShOp, MCE->getValue(), true, S, E, getContext()));
3645 return ParseStatus::Success;
3646}
3647
3648static const struct Extension {
3649 const char *Name;
3651} ExtensionMap[] = {
3652 {"crc", {AArch64::FeatureCRC}},
3653 {"sm4", {AArch64::FeatureSM4}},
3654 {"sha3", {AArch64::FeatureSHA3}},
3655 {"sha2", {AArch64::FeatureSHA2}},
3656 {"aes", {AArch64::FeatureAES}},
3657 {"crypto", {AArch64::FeatureCrypto}},
3658 {"fp", {AArch64::FeatureFPARMv8}},
3659 {"simd", {AArch64::FeatureNEON}},
3660 {"ras", {AArch64::FeatureRAS}},
3661 {"rasv2", {AArch64::FeatureRASv2}},
3662 {"lse", {AArch64::FeatureLSE}},
3663 {"predres", {AArch64::FeaturePredRes}},
3664 {"predres2", {AArch64::FeatureSPECRES2}},
3665 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3666 {"mte", {AArch64::FeatureMTE}},
3667 {"memtag", {AArch64::FeatureMTE}},
3668 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3669 {"pan", {AArch64::FeaturePAN}},
3670 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3671 {"ccpp", {AArch64::FeatureCCPP}},
3672 {"rcpc", {AArch64::FeatureRCPC}},
3673 {"rng", {AArch64::FeatureRandGen}},
3674 {"sve", {AArch64::FeatureSVE}},
3675 {"sve2", {AArch64::FeatureSVE2}},
3676 {"sve2-aes", {AArch64::FeatureSVE2AES}},
3677 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3678 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3679 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3680 {"sve2p1", {AArch64::FeatureSVE2p1}},
3681 {"b16b16", {AArch64::FeatureB16B16}},
3682 {"ls64", {AArch64::FeatureLS64}},
3683 {"xs", {AArch64::FeatureXS}},
3684 {"pauth", {AArch64::FeaturePAuth}},
3685 {"flagm", {AArch64::FeatureFlagM}},
3686 {"rme", {AArch64::FeatureRME}},
3687 {"sme", {AArch64::FeatureSME}},
3688 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3689 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3690 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3691 {"sme2", {AArch64::FeatureSME2}},
3692 {"sme2p1", {AArch64::FeatureSME2p1}},
3693 {"hbc", {AArch64::FeatureHBC}},
3694 {"mops", {AArch64::FeatureMOPS}},
3695 {"mec", {AArch64::FeatureMEC}},
3696 {"the", {AArch64::FeatureTHE}},
3697 {"d128", {AArch64::FeatureD128}},
3698 {"lse128", {AArch64::FeatureLSE128}},
3699 {"ite", {AArch64::FeatureITE}},
3700 {"cssc", {AArch64::FeatureCSSC}},
3701 {"rcpc3", {AArch64::FeatureRCPC3}},
3702 {"gcs", {AArch64::FeatureGCS}},
3703 {"bf16", {AArch64::FeatureBF16}},
3704 {"compnum", {AArch64::FeatureComplxNum}},
3705 {"dotprod", {AArch64::FeatureDotProd}},
3706 {"f32mm", {AArch64::FeatureMatMulFP32}},
3707 {"f64mm", {AArch64::FeatureMatMulFP64}},
3708 {"fp16", {AArch64::FeatureFullFP16}},
3709 {"fp16fml", {AArch64::FeatureFP16FML}},
3710 {"i8mm", {AArch64::FeatureMatMulInt8}},
3711 {"lor", {AArch64::FeatureLOR}},
3712 {"profile", {AArch64::FeatureSPE}},
3713 // "rdma" is the name documented by binutils for the feature, but
3714 // binutils also accepts incomplete prefixes of features, so "rdm"
3715 // works too. Support both spellings here.
3716 {"rdm", {AArch64::FeatureRDM}},
3717 {"rdma", {AArch64::FeatureRDM}},
3718 {"sb", {AArch64::FeatureSB}},
3719 {"ssbs", {AArch64::FeatureSSBS}},
3720 {"tme", {AArch64::FeatureTME}},
3721 {"fpmr", {AArch64::FeatureFPMR}},
3722 {"fp8", {AArch64::FeatureFP8}},
3723 {"faminmax", {AArch64::FeatureFAMINMAX}},
3724 {"fp8fma", {AArch64::FeatureFP8FMA}},
3725 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3726 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3727 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3728 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3729 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3730 {"lut", {AArch64::FeatureLUT}},
3731 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3732 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3733 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3734 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3735 {"cpa", {AArch64::FeatureCPA}},
3736 {"tlbiw", {AArch64::FeatureTLBIW}},
3738
3739static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3740 if (FBS[AArch64::HasV8_0aOps])
3741 Str += "ARMv8a";
3742 if (FBS[AArch64::HasV8_1aOps])
3743 Str += "ARMv8.1a";
3744 else if (FBS[AArch64::HasV8_2aOps])
3745 Str += "ARMv8.2a";
3746 else if (FBS[AArch64::HasV8_3aOps])
3747 Str += "ARMv8.3a";
3748 else if (FBS[AArch64::HasV8_4aOps])
3749 Str += "ARMv8.4a";
3750 else if (FBS[AArch64::HasV8_5aOps])
3751 Str += "ARMv8.5a";
3752 else if (FBS[AArch64::HasV8_6aOps])
3753 Str += "ARMv8.6a";
3754 else if (FBS[AArch64::HasV8_7aOps])
3755 Str += "ARMv8.7a";
3756 else if (FBS[AArch64::HasV8_8aOps])
3757 Str += "ARMv8.8a";
3758 else if (FBS[AArch64::HasV8_9aOps])
3759 Str += "ARMv8.9a";
3760 else if (FBS[AArch64::HasV9_0aOps])
3761 Str += "ARMv9-a";
3762 else if (FBS[AArch64::HasV9_1aOps])
3763 Str += "ARMv9.1a";
3764 else if (FBS[AArch64::HasV9_2aOps])
3765 Str += "ARMv9.2a";
3766 else if (FBS[AArch64::HasV9_3aOps])
3767 Str += "ARMv9.3a";
3768 else if (FBS[AArch64::HasV9_4aOps])
3769 Str += "ARMv9.4a";
3770 else if (FBS[AArch64::HasV9_5aOps])
3771 Str += "ARMv9.5a";
3772 else if (FBS[AArch64::HasV8_0rOps])
3773 Str += "ARMv8r";
3774 else {
3775 SmallVector<std::string, 2> ExtMatches;
3776 for (const auto& Ext : ExtensionMap) {
3777 // Use & in case multiple features are enabled
3778 if ((FBS & Ext.Features) != FeatureBitset())
3779 ExtMatches.push_back(Ext.Name);
3780 }
3781 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3782 }
3783}
3784
3785void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3786 SMLoc S) {
3787 const uint16_t Op2 = Encoding & 7;
3788 const uint16_t Cm = (Encoding & 0x78) >> 3;
3789 const uint16_t Cn = (Encoding & 0x780) >> 7;
3790 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3791
3792 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3793
3794 Operands.push_back(
3795 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3796 Operands.push_back(
3797 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3798 Operands.push_back(
3799 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3800 Expr = MCConstantExpr::create(Op2, getContext());
3801 Operands.push_back(
3802 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3803}
3804
3805/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3806/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3807bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3809 if (Name.contains('.'))
3810 return TokError("invalid operand");
3811
3812 Mnemonic = Name;
3813 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3814
3815 const AsmToken &Tok = getTok();
3816 StringRef Op = Tok.getString();
3817 SMLoc S = Tok.getLoc();
3818
3819 if (Mnemonic == "ic") {
3820 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3821 if (!IC)
3822 return TokError("invalid operand for IC instruction");
3823 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3824 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3826 return TokError(Str);
3827 }
3828 createSysAlias(IC->Encoding, Operands, S);
3829 } else if (Mnemonic == "dc") {
3830 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3831 if (!DC)
3832 return TokError("invalid operand for DC instruction");
3833 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3834 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3836 return TokError(Str);
3837 }
3838 createSysAlias(DC->Encoding, Operands, S);
3839 } else if (Mnemonic == "at") {
3840 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3841 if (!AT)
3842 return TokError("invalid operand for AT instruction");
3843 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3844 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3846 return TokError(Str);
3847 }
3848 createSysAlias(AT->Encoding, Operands, S);
3849 } else if (Mnemonic == "tlbi") {
3850 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3851 if (!TLBI)
3852 return TokError("invalid operand for TLBI instruction");
3853 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3854 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3856 return TokError(Str);
3857 }
3858 createSysAlias(TLBI->Encoding, Operands, S);
3859 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3860
3861 if (Op.lower() != "rctx")
3862 return TokError("invalid operand for prediction restriction instruction");
3863
3864 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3865 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3866 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3867
3868 if (Mnemonic == "cosp" && !hasSpecres2)
3869 return TokError("COSP requires: predres2");
3870 if (!hasPredres)
3871 return TokError(Mnemonic.upper() + "RCTX requires: predres");
3872
3873 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
3874 : Mnemonic == "dvp" ? 0b101
3875 : Mnemonic == "cosp" ? 0b110
3876 : Mnemonic == "cpp" ? 0b111
3877 : 0;
3878 assert(PRCTX_Op2 &&
3879 "Invalid mnemonic for prediction restriction instruction");
3880 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3881 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3882
3883 createSysAlias(Encoding, Operands, S);
3884 }
3885
3886 Lex(); // Eat operand.
3887
3888 bool ExpectRegister = !Op.contains_insensitive("all");
3889 bool HasRegister = false;
3890
3891 // Check for the optional register operand.
3892 if (parseOptionalToken(AsmToken::Comma)) {
3893 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3894 return TokError("expected register operand");
3895 HasRegister = true;
3896 }
3897
3898 if (ExpectRegister && !HasRegister)
3899 return TokError("specified " + Mnemonic + " op requires a register");
3900 else if (!ExpectRegister && HasRegister)
3901 return TokError("specified " + Mnemonic + " op does not use a register");
3902
3903 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3904 return true;
3905
3906 return false;
3907}
3908
3909/// parseSyspAlias - The TLBIP instructions are simple aliases for
3910/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
3911bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
3913 if (Name.contains('.'))
3914 return TokError("invalid operand");
3915
3916 Mnemonic = Name;
3917 Operands.push_back(
3918 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
3919
3920 const AsmToken &Tok = getTok();
3921 StringRef Op = Tok.getString();
3922 SMLoc S = Tok.getLoc();
3923
3924 if (Mnemonic == "tlbip") {
3925 bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
3926 if (HasnXSQualifier) {
3927 Op = Op.drop_back(3);
3928 }
3929 const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
3930 if (!TLBIorig)
3931 return TokError("invalid operand for TLBIP instruction");
3932 const AArch64TLBI::TLBI TLBI(
3933 TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
3934 TLBIorig->NeedsReg,
3935 HasnXSQualifier
3936 ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
3937 : TLBIorig->FeaturesRequired);
3938 if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
3939 std::string Name =
3940 std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
3941 std::string Str("TLBIP " + Name + " requires: ");
3943 return TokError(Str);
3944 }
3945 createSysAlias(TLBI.Encoding, Operands, S);
3946 }
3947
3948 Lex(); // Eat operand.
3949
3950 if (parseComma())
3951 return true;
3952
3953 if (Tok.isNot(AsmToken::Identifier))
3954 return TokError("expected register identifier");
3955 auto Result = tryParseSyspXzrPair(Operands);
3956 if (Result.isNoMatch())
3957 Result = tryParseGPRSeqPair(Operands);
3958 if (!Result.isSuccess())
3959 return TokError("specified " + Mnemonic +
3960 " op requires a pair of registers");
3961
3962 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3963 return true;
3964
3965 return false;
3966}
3967
3968ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3969 MCAsmParser &Parser = getParser();
3970 const AsmToken &Tok = getTok();
3971
3972 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
3973 return TokError("'csync' operand expected");
3974 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3975 // Immediate operand.
3976 const MCExpr *ImmVal;
3977 SMLoc ExprLoc = getLoc();
3978 AsmToken IntTok = Tok;
3979 if (getParser().parseExpression(ImmVal))
3980 return ParseStatus::Failure;
3981 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3982 if (!MCE)
3983 return Error(ExprLoc, "immediate value expected for barrier operand");
3984 int64_t Value = MCE->getValue();
3985 if (Mnemonic == "dsb" && Value > 15) {
3986 // This case is a no match here, but it might be matched by the nXS
3987 // variant. Deliberately not unlex the optional '#' as it is not necessary
3988 // to characterize an integer immediate.
3989 Parser.getLexer().UnLex(IntTok);
3990 return ParseStatus::NoMatch;
3991 }
3992 if (Value < 0 || Value > 15)
3993 return Error(ExprLoc, "barrier operand out of range");
3994 auto DB = AArch64DB::lookupDBByEncoding(Value);
3995 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3996 ExprLoc, getContext(),
3997 false /*hasnXSModifier*/));
3998 return ParseStatus::Success;
3999 }
4000
4001 if (Tok.isNot(AsmToken::Identifier))
4002 return TokError("invalid operand for instruction");
4003
4004 StringRef Operand = Tok.getString();
4005 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4006 auto DB = AArch64DB::lookupDBByName(Operand);
4007 // The only valid named option for ISB is 'sy'
4008 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4009 return TokError("'sy' or #imm operand expected");
4010 // The only valid named option for TSB is 'csync'
4011 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4012 return TokError("'csync' operand expected");
4013 if (!DB && !TSB) {
4014 if (Mnemonic == "dsb") {
4015 // This case is a no match here, but it might be matched by the nXS
4016 // variant.
4017 return ParseStatus::NoMatch;
4018 }
4019 return TokError("invalid barrier option name");
4020 }
4021
4022 Operands.push_back(AArch64Operand::CreateBarrier(
4023 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
4024 getContext(), false /*hasnXSModifier*/));
4025 Lex(); // Consume the option
4026
4027 return ParseStatus::Success;
4028}
4029
4031AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4032 const AsmToken &Tok = getTok();
4033
4034 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4035 if (Mnemonic != "dsb")
4036 return ParseStatus::Failure;
4037
4038 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4039 // Immediate operand.
4040 const MCExpr *ImmVal;
4041 SMLoc ExprLoc = getLoc();
4042 if (getParser().parseExpression(ImmVal))
4043 return ParseStatus::Failure;
4044 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4045 if (!MCE)
4046 return Error(ExprLoc, "immediate value expected for barrier operand");
4047 int64_t Value = MCE->getValue();
4048 // v8.7-A DSB in the nXS variant accepts only the following immediate
4049 // values: 16, 20, 24, 28.
4050 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4051 return Error(ExprLoc, "barrier operand out of range");
4052 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4053 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4054 ExprLoc, getContext(),
4055 true /*hasnXSModifier*/));
4056 return ParseStatus::Success;
4057 }
4058
4059 if (Tok.isNot(AsmToken::Identifier))
4060 return TokError("invalid operand for instruction");
4061
4062 StringRef Operand = Tok.getString();
4063 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4064
4065 if (!DB)
4066 return TokError("invalid barrier option name");
4067
4068 Operands.push_back(
4069 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4070 getContext(), true /*hasnXSModifier*/));
4071 Lex(); // Consume the option
4072
4073 return ParseStatus::Success;
4074}
4075
4076ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4077 const AsmToken &Tok = getTok();
4078
4079 if (Tok.isNot(AsmToken::Identifier))
4080 return ParseStatus::NoMatch;
4081
4082 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4083 return ParseStatus::NoMatch;
4084
4085 int MRSReg, MSRReg;
4086 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4087 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4088 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4089 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4090 } else
4091 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4092
4093 unsigned PStateImm = -1;
4094 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4095 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4096 PStateImm = PState15->Encoding;
4097 if (!PState15) {
4098 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4099 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4100 PStateImm = PState1->Encoding;
4101 }
4102
4103 Operands.push_back(
4104 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4105 PStateImm, getContext()));
4106 Lex(); // Eat identifier
4107
4108 return ParseStatus::Success;
4109}
4110
4111/// tryParseNeonVectorRegister - Parse a vector register operand.
4112bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4113 if (getTok().isNot(AsmToken::Identifier))
4114 return true;
4115
4116 SMLoc S = getLoc();
4117 // Check for a vector register specifier first.
4120 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4121 if (!Res.isSuccess())
4122 return true;
4123
4124 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4125 if (!KindRes)
4126 return true;
4127
4128 unsigned ElementWidth = KindRes->second;
4129 Operands.push_back(
4130 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4131 S, getLoc(), getContext()));
4132
4133 // If there was an explicit qualifier, that goes on as a literal text
4134 // operand.
4135 if (!Kind.empty())
4136 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4137
4138 return tryParseVectorIndex(Operands).isFailure();
4139}
4140
4141ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4142 SMLoc SIdx = getLoc();
4143 if (parseOptionalToken(AsmToken::LBrac)) {
4144 const MCExpr *ImmVal;
4145 if (getParser().parseExpression(ImmVal))
4146 return ParseStatus::NoMatch;
4147 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4148 if (!MCE)
4149 return TokError("immediate value expected for vector index");
4150
4151 SMLoc E = getLoc();
4152
4153 if (parseToken(AsmToken::RBrac, "']' expected"))
4154 return ParseStatus::Failure;
4155
4156 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4157 E, getContext()));
4158 return ParseStatus::Success;
4159 }
4160
4161 return ParseStatus::NoMatch;
4162}
4163
4164// tryParseVectorRegister - Try to parse a vector register name with
4165// optional kind specifier. If it is a register specifier, eat the token
4166// and return it.
4167ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4168 StringRef &Kind,
4169 RegKind MatchKind) {
4170 const AsmToken &Tok = getTok();
4171
4172 if (Tok.isNot(AsmToken::Identifier))
4173 return ParseStatus::NoMatch;
4174
4175 StringRef Name = Tok.getString();
4176 // If there is a kind specifier, it's separated from the register name by
4177 // a '.'.
4178 size_t Start = 0, Next = Name.find('.');
4179 StringRef Head = Name.slice(Start, Next);
4180 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4181
4182 if (RegNum) {
4183 if (Next != StringRef::npos) {
4184 Kind = Name.slice(Next, StringRef::npos);
4185 if (!isValidVectorKind(Kind, MatchKind))
4186 return TokError("invalid vector kind qualifier");
4187 }
4188 Lex(); // Eat the register token.
4189
4190 Reg = RegNum;
4191 return ParseStatus::Success;
4192 }
4193
4194 return ParseStatus::NoMatch;
4195}
4196
4197ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4200 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4201 if (!Status.isSuccess())
4202 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4203 return Status;
4204}
4205
4206/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4207template <RegKind RK>
4209AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4210 // Check for a SVE predicate register specifier first.
4211 const SMLoc S = getLoc();
4213 MCRegister RegNum;
4214 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4215 if (!Res.isSuccess())
4216 return Res;
4217
4218 const auto &KindRes = parseVectorKind(Kind, RK);
4219 if (!KindRes)
4220 return ParseStatus::NoMatch;
4221
4222 unsigned ElementWidth = KindRes->second;
4223 Operands.push_back(AArch64Operand::CreateVectorReg(
4224 RegNum, RK, ElementWidth, S,
4225 getLoc(), getContext()));
4226
4227 if (getLexer().is(AsmToken::LBrac)) {
4228 if (RK == RegKind::SVEPredicateAsCounter) {
4229 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4230 if (ResIndex.isSuccess())
4231 return ParseStatus::Success;
4232 } else {
4233 // Indexed predicate, there's no comma so try parse the next operand
4234 // immediately.
4235 if (parseOperand(Operands, false, false))
4236 return ParseStatus::NoMatch;
4237 }
4238 }
4239
4240 // Not all predicates are followed by a '/m' or '/z'.
4241 if (getTok().isNot(AsmToken::Slash))
4242 return ParseStatus::Success;
4243
4244 // But when they do they shouldn't have an element type suffix.
4245 if (!Kind.empty())
4246 return Error(S, "not expecting size suffix");
4247
4248 // Add a literal slash as operand
4249 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4250
4251 Lex(); // Eat the slash.
4252
4253 // Zeroing or merging?
4254 auto Pred = getTok().getString().lower();
4255 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4256 return Error(getLoc(), "expecting 'z' predication");
4257
4258 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4259 return Error(getLoc(), "expecting 'm' or 'z' predication");
4260
4261 // Add zero/merge token.
4262 const char *ZM = Pred == "z" ? "z" : "m";
4263 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4264
4265 Lex(); // Eat zero/merge token.
4266 return ParseStatus::Success;
4267}
4268
4269/// parseRegister - Parse a register operand.
4270bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4271 // Try for a Neon vector register.
4272 if (!tryParseNeonVectorRegister(Operands))
4273 return false;
4274
4275 if (tryParseZTOperand(Operands).isSuccess())
4276 return false;
4277
4278 // Otherwise try for a scalar register.
4279 if (tryParseGPROperand<false>(Operands).isSuccess())
4280 return false;
4281
4282 return true;
4283}
4284
4285bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4286 bool HasELFModifier = false;
4288
4289 if (parseOptionalToken(AsmToken::Colon)) {
4290 HasELFModifier = true;
4291
4292 if (getTok().isNot(AsmToken::Identifier))
4293 return TokError("expect relocation specifier in operand after ':'");
4294
4295 std::string LowerCase = getTok().getIdentifier().lower();
4296 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
4298 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4299 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4300 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4301 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4302 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4303 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4304 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4305 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4306 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4307 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4308 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4309 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4310 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4311 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4312 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4313 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4314 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4315 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4316 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4317 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4318 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4319 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4320 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4321 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4322 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4323 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4324 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4325 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4326 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4327 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4328 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4329 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4330 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4331 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4332 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4334 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4335 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4337 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4338 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4339 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4341 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4342 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4344
4345 if (RefKind == AArch64MCExpr::VK_INVALID)
4346 return TokError("expect relocation specifier in operand after ':'");
4347
4348 Lex(); // Eat identifier
4349
4350 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4351 return true;
4352 }
4353
4354 if (getParser().parseExpression(ImmVal))
4355 return true;
4356
4357 if (HasELFModifier)
4358 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4359
4360 return false;
4361}
4362
4363ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4364 if (getTok().isNot(AsmToken::LCurly))
4365 return ParseStatus::NoMatch;
4366
4367 auto ParseMatrixTile = [this](unsigned &Reg,
4368 unsigned &ElementWidth) -> ParseStatus {
4369 StringRef Name = getTok().getString();
4370 size_t DotPosition = Name.find('.');
4371 if (DotPosition == StringRef::npos)
4372 return ParseStatus::NoMatch;
4373
4374 unsigned RegNum = matchMatrixTileListRegName(Name);
4375 if (!RegNum)
4376 return ParseStatus::NoMatch;
4377
4378 StringRef Tail = Name.drop_front(DotPosition);
4379 const std::optional<std::pair<int, int>> &KindRes =
4380 parseVectorKind(Tail, RegKind::Matrix);
4381 if (!KindRes)
4382 return TokError(
4383 "Expected the register to be followed by element width suffix");
4384 ElementWidth = KindRes->second;
4385 Reg = RegNum;
4386 Lex(); // Eat the register.
4387 return ParseStatus::Success;
4388 };
4389
4390 SMLoc S = getLoc();
4391 auto LCurly = getTok();
4392 Lex(); // Eat left bracket token.
4393
4394 // Empty matrix list
4395 if (parseOptionalToken(AsmToken::RCurly)) {
4396 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4397 /*RegMask=*/0, S, getLoc(), getContext()));
4398 return ParseStatus::Success;
4399 }
4400
4401 // Try parse {za} alias early
4402 if (getTok().getString().equals_insensitive("za")) {
4403 Lex(); // Eat 'za'
4404
4405 if (parseToken(AsmToken::RCurly, "'}' expected"))
4406 return ParseStatus::Failure;
4407
4408 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4409 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4410 return ParseStatus::Success;
4411 }
4412
4413 SMLoc TileLoc = getLoc();
4414
4415 unsigned FirstReg, ElementWidth;
4416 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4417 if (!ParseRes.isSuccess()) {
4418 getLexer().UnLex(LCurly);
4419 return ParseRes;
4420 }
4421
4422 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4423
4424 unsigned PrevReg = FirstReg;
4425
4427 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4428
4429 SmallSet<unsigned, 8> SeenRegs;
4430 SeenRegs.insert(FirstReg);
4431
4432 while (parseOptionalToken(AsmToken::Comma)) {
4433 TileLoc = getLoc();
4434 unsigned Reg, NextElementWidth;
4435 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4436 if (!ParseRes.isSuccess())
4437 return ParseRes;
4438
4439 // Element size must match on all regs in the list.
4440 if (ElementWidth != NextElementWidth)
4441 return Error(TileLoc, "mismatched register size suffix");
4442
4443 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4444 Warning(TileLoc, "tile list not in ascending order");
4445
4446 if (SeenRegs.contains(Reg))
4447 Warning(TileLoc, "duplicate tile in list");
4448 else {
4449 SeenRegs.insert(Reg);
4450 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4451 }
4452
4453 PrevReg = Reg;
4454 }
4455
4456 if (parseToken(AsmToken::RCurly, "'}' expected"))
4457 return ParseStatus::Failure;
4458
4459 unsigned RegMask = 0;
4460 for (auto Reg : DRegs)
4461 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4462 RI->getEncodingValue(AArch64::ZAD0));
4463 Operands.push_back(
4464 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4465
4466 return ParseStatus::Success;
4467}
4468
4469template <RegKind VectorKind>
4470ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4471 bool ExpectMatch) {
4472 MCAsmParser &Parser = getParser();
4473 if (!getTok().is(AsmToken::LCurly))
4474 return ParseStatus::NoMatch;
4475
4476 // Wrapper around parse function
4477 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4478 bool NoMatchIsError) -> ParseStatus {
4479 auto RegTok = getTok();
4480 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4481 if (ParseRes.isSuccess()) {
4482 if (parseVectorKind(Kind, VectorKind))
4483 return ParseRes;
4484 llvm_unreachable("Expected a valid vector kind");
4485 }
4486
4487 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4488 RegTok.getString().equals_insensitive("zt0"))
4489 return ParseStatus::NoMatch;
4490
4491 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4492 (ParseRes.isNoMatch() && NoMatchIsError &&
4493 !RegTok.getString().starts_with_insensitive("za")))
4494 return Error(Loc, "vector register expected");
4495
4496 return ParseStatus::NoMatch;
4497 };
4498
4499 int NumRegs = getNumRegsForRegKind(VectorKind);
4500 SMLoc S = getLoc();
4501 auto LCurly = getTok();
4502 Lex(); // Eat left bracket token.
4503
4505 MCRegister FirstReg;
4506 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4507
4508 // Put back the original left bracket if there was no match, so that
4509 // different types of list-operands can be matched (e.g. SVE, Neon).
4510 if (ParseRes.isNoMatch())
4511 Parser.getLexer().UnLex(LCurly);
4512
4513 if (!ParseRes.isSuccess())
4514 return ParseRes;
4515
4516 int64_t PrevReg = FirstReg;
4517 unsigned Count = 1;
4518
4519 int Stride = 1;
4520 if (parseOptionalToken(AsmToken::Minus)) {
4521 SMLoc Loc = getLoc();
4522 StringRef NextKind;
4523
4525 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4526 if (!ParseRes.isSuccess())
4527 return ParseRes;
4528
4529 // Any Kind suffices must match on all regs in the list.
4530 if (Kind != NextKind)
4531 return Error(Loc, "mismatched register size suffix");
4532
4533 unsigned Space =
4534 (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4535
4536 if (Space == 0 || Space > 3)
4537 return Error(Loc, "invalid number of vectors");
4538
4539 Count += Space;
4540 }
4541 else {
4542 bool HasCalculatedStride = false;
4543 while (parseOptionalToken(AsmToken::Comma)) {
4544 SMLoc Loc = getLoc();
4545 StringRef NextKind;
4547 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4548 if (!ParseRes.isSuccess())
4549 return ParseRes;
4550
4551 // Any Kind suffices must match on all regs in the list.
4552 if (Kind != NextKind)
4553 return Error(Loc, "mismatched register size suffix");
4554
4555 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4556 unsigned PrevRegVal =
4557 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4558 if (!HasCalculatedStride) {
4559 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4560 : (RegVal + NumRegs - PrevRegVal);
4561 HasCalculatedStride = true;
4562 }
4563
4564 // Register must be incremental (with a wraparound at last register).
4565 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4566 return Error(Loc, "registers must have the same sequential stride");
4567
4568 PrevReg = Reg;
4569 ++Count;
4570 }
4571 }
4572
4573 if (parseToken(AsmToken::RCurly, "'}' expected"))
4574 return ParseStatus::Failure;
4575
4576 if (Count > 4)
4577 return Error(S, "invalid number of vectors");
4578
4579 unsigned NumElements = 0;
4580 unsigned ElementWidth = 0;
4581 if (!Kind.empty()) {
4582 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4583 std::tie(NumElements, ElementWidth) = *VK;
4584 }
4585
4586 Operands.push_back(AArch64Operand::CreateVectorList(
4587 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4588 getLoc(), getContext()));
4589
4590 return ParseStatus::Success;
4591}
4592
4593/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4594bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4595 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4596 if (!ParseRes.isSuccess())
4597 return true;
4598
4599 return tryParseVectorIndex(Operands).isFailure();
4600}
4601
4602ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4603 SMLoc StartLoc = getLoc();
4604
4605 MCRegister RegNum;
4606 ParseStatus Res = tryParseScalarRegister(RegNum);
4607 if (!Res.isSuccess())
4608 return Res;
4609
4610 if (!parseOptionalToken(AsmToken::Comma)) {
4611 Operands.push_back(AArch64Operand::CreateReg(
4612 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4613 return ParseStatus::Success;
4614 }
4615
4616 parseOptionalToken(AsmToken::Hash);
4617
4618 if (getTok().isNot(AsmToken::Integer))
4619 return Error(getLoc(), "index must be absent or #0");
4620
4621 const MCExpr *ImmVal;
4622 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4623 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4624 return Error(getLoc(), "index must be absent or #0");
4625
4626 Operands.push_back(AArch64Operand::CreateReg(
4627 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4628 return ParseStatus::Success;
4629}
4630
4631ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4632 SMLoc StartLoc = getLoc();
4633 const AsmToken &Tok = getTok();
4634 std::string Name = Tok.getString().lower();
4635
4636 unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4637
4638 if (RegNum == 0)
4639 return ParseStatus::NoMatch;
4640
4641 Operands.push_back(AArch64Operand::CreateReg(
4642 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4643 Lex(); // Eat register.
4644
4645 // Check if register is followed by an index
4646 if (parseOptionalToken(AsmToken::LBrac)) {
4647 Operands.push_back(
4648 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4649 const MCExpr *ImmVal;
4650 if (getParser().parseExpression(ImmVal))
4651 return ParseStatus::NoMatch;
4652 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4653 if (!MCE)
4654 return TokError("immediate value expected for vector index");
4655 Operands.push_back(AArch64Operand::CreateImm(
4656 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4657 getLoc(), getContext()));
4658 if (parseOptionalToken(AsmToken::Comma))
4659 if (parseOptionalMulOperand(Operands))
4660 return ParseStatus::Failure;
4661 if (parseToken(AsmToken::RBrac, "']' expected"))
4662 return ParseStatus::Failure;
4663 Operands.push_back(
4664 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4665 }
4666 return ParseStatus::Success;
4667}
4668
4669template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4670ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4671 SMLoc StartLoc = getLoc();
4672
4673 MCRegister RegNum;
4674 ParseStatus Res = tryParseScalarRegister(RegNum);
4675 if (!Res.isSuccess())
4676 return Res;
4677
4678 // No shift/extend is the default.
4679 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4680 Operands.push_back(AArch64Operand::CreateReg(
4681 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4682 return ParseStatus::Success;
4683 }
4684
4685 // Eat the comma
4686 Lex();
4687
4688 // Match the shift
4690 Res = tryParseOptionalShiftExtend(ExtOpnd);
4691 if (!Res.isSuccess())
4692 return Res;
4693
4694 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4695 Operands.push_back(AArch64Operand::CreateReg(
4696 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4697 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4698 Ext->hasShiftExtendAmount()));
4699
4700 return ParseStatus::Success;
4701}
4702
4703bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4704 MCAsmParser &Parser = getParser();
4705
4706 // Some SVE instructions have a decoration after the immediate, i.e.
4707 // "mul vl". We parse them here and add tokens, which must be present in the
4708 // asm string in the tablegen instruction.
4709 bool NextIsVL =
4710 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4711 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4712 if (!getTok().getString().equals_insensitive("mul") ||
4713 !(NextIsVL || NextIsHash))
4714 return true;
4715
4716 Operands.push_back(
4717 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4718 Lex(); // Eat the "mul"
4719
4720 if (NextIsVL) {
4721 Operands.push_back(
4722 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4723 Lex(); // Eat the "vl"
4724 return false;
4725 }
4726
4727 if (NextIsHash) {
4728 Lex(); // Eat the #
4729 SMLoc S = getLoc();
4730
4731 // Parse immediate operand.
4732 const MCExpr *ImmVal;
4733 if (!Parser.parseExpression(ImmVal))
4734 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4735 Operands.push_back(AArch64Operand::CreateImm(
4736 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4737 getContext()));
4738 return false;
4739 }
4740 }
4741
4742 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4743}
4744
4745bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4746 StringRef &VecGroup) {
4747 MCAsmParser &Parser = getParser();
4748 auto Tok = Parser.getTok();
4749 if (Tok.isNot(AsmToken::Identifier))
4750 return true;
4751
4753 .Case("vgx2", "vgx2")
4754 .Case("vgx4", "vgx4")
4755 .Default("");
4756
4757 if (VG.empty())
4758 return true;
4759
4760 VecGroup = VG;
4761 Parser.Lex(); // Eat vgx[2|4]
4762 return false;
4763}
4764
4765bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4766 auto Tok = getTok();
4767 if (Tok.isNot(AsmToken::Identifier))
4768 return true;
4769
4770 auto Keyword = Tok.getString();
4772 .Case("sm", "sm")
4773 .Case("za", "za")
4774 .Default(Keyword);
4775 Operands.push_back(
4776 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4777
4778 Lex();
4779 return false;
4780}
4781
4782/// parseOperand - Parse a arm instruction operand. For now this parses the
4783/// operand regardless of the mnemonic.
4784bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4785 bool invertCondCode) {
4786 MCAsmParser &Parser = getParser();
4787
4788 ParseStatus ResTy =
4789 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
4790
4791 // Check if the current operand has a custom associated parser, if so, try to
4792 // custom parse the operand, or fallback to the general approach.
4793 if (ResTy.isSuccess())
4794 return false;
4795 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4796 // there was a match, but an error occurred, in which case, just return that
4797 // the operand parsing failed.
4798 if (ResTy.isFailure())
4799 return true;
4800
4801 // Nothing custom, so do general case parsing.
4802 SMLoc S, E;
4803 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
4804 if (parseOptionalToken(AsmToken::Comma)) {
4805 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
4806 if (!Res.isNoMatch())
4807 return Res.isFailure();
4808 getLexer().UnLex(SavedTok);
4809 }
4810 return false;
4811 };
4812 switch (getLexer().getKind()) {
4813 default: {
4814 SMLoc S = getLoc();
4815 const MCExpr *Expr;
4816 if (parseSymbolicImmVal(Expr))
4817 return Error(S, "invalid operand");
4818
4819 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4820 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4821 return parseOptionalShiftExtend(getTok());
4822 }
4823 case AsmToken::LBrac: {
4824 Operands.push_back(
4825 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4826 Lex(); // Eat '['
4827
4828 // There's no comma after a '[', so we can parse the next operand
4829 // immediately.
4830 return parseOperand(Operands, false, false);
4831 }
4832 case AsmToken::LCurly: {
4833 if (!parseNeonVectorList(Operands))
4834 return false;
4835
4836 Operands.push_back(
4837 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4838 Lex(); // Eat '{'
4839
4840 // There's no comma after a '{', so we can parse the next operand
4841 // immediately.
4842 return parseOperand(Operands, false, false);
4843 }
4844 case AsmToken::Identifier: {
4845 // See if this is a "VG" decoration used by SME instructions.
4846 StringRef VecGroup;
4847 if (!parseOptionalVGOperand(Operands, VecGroup)) {
4848 Operands.push_back(
4849 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4850 return false;
4851 }
4852 // If we're expecting a Condition Code operand, then just parse that.
4853 if (isCondCode)
4854 return parseCondCode(Operands, invertCondCode);
4855
4856 // If it's a register name, parse it.
4857 if (!parseRegister(Operands)) {
4858 // Parse an optional shift/extend modifier.
4859 AsmToken SavedTok = getTok();
4860 if (parseOptionalToken(AsmToken::Comma)) {
4861 // The operand after the register may be a label (e.g. ADR/ADRP). Check
4862 // such cases and don't report an error when <label> happens to match a
4863 // shift/extend modifier.
4864 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
4865 /*ParseForAllFeatures=*/true);
4866 if (!Res.isNoMatch())
4867 return Res.isFailure();
4868 Res = tryParseOptionalShiftExtend(Operands);
4869 if (!Res.isNoMatch())
4870 return Res.isFailure();
4871 getLexer().UnLex(SavedTok);
4872 }
4873 return false;
4874 }
4875
4876 // See if this is a "mul vl" decoration or "mul #<int>" operand used
4877 // by SVE instructions.
4878 if (!parseOptionalMulOperand(Operands))
4879 return false;
4880
4881 // If this is a two-word mnemonic, parse its special keyword
4882 // operand as an identifier.
4883 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
4884 Mnemonic == "gcsb")
4885 return parseKeywordOperand(Operands);
4886
4887 // This was not a register so parse other operands that start with an
4888 // identifier (like labels) as expressions and create them as immediates.
4889 const MCExpr *IdVal;
4890 S = getLoc();
4891 if (getParser().parseExpression(IdVal))
4892 return true;
4893 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4894 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4895 return false;
4896 }
4897 case AsmToken::Integer:
4898 case AsmToken::Real:
4899 case AsmToken::Hash: {
4900 // #42 -> immediate.
4901 S = getLoc();
4902
4903 parseOptionalToken(AsmToken::Hash);
4904
4905 // Parse a negative sign
4906 bool isNegative = false;
4907 if (getTok().is(AsmToken::Minus)) {
4908 isNegative = true;
4909 // We need to consume this token only when we have a Real, otherwise
4910 // we let parseSymbolicImmVal take care of it
4911 if (Parser.getLexer().peekTok().is(AsmToken::Real))
4912 Lex();
4913 }
4914
4915 // The only Real that should come through here is a literal #0.0 for
4916 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4917 // so convert the value.
4918 const AsmToken &Tok = getTok();
4919 if (Tok.is(AsmToken::Real)) {
4920 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4921 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4922 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4923 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4924 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4925 return TokError("unexpected floating point literal");
4926 else if (IntVal != 0 || isNegative)
4927 return TokError("expected floating-point constant #0.0");
4928 Lex(); // Eat the token.
4929
4930 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4931 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4932 return false;
4933 }
4934
4935 const MCExpr *ImmVal;
4936 if (parseSymbolicImmVal(ImmVal))
4937 return true;
4938
4939 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4940 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4941
4942 // Parse an optional shift/extend modifier.
4943 return parseOptionalShiftExtend(Tok);
4944 }
4945 case AsmToken::Equal: {
4946 SMLoc Loc = getLoc();
4947 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4948 return TokError("unexpected token in operand");
4949 Lex(); // Eat '='
4950 const MCExpr *SubExprVal;
4951 if (getParser().parseExpression(SubExprVal))
4952 return true;
4953
4954 if (Operands.size() < 2 ||
4955 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4956 return Error(Loc, "Only valid when first operand is register");
4957
4958 bool IsXReg =
4959 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4960 Operands[1]->getReg());
4961
4962 MCContext& Ctx = getContext();
4963 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4964 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4965 if (isa<MCConstantExpr>(SubExprVal)) {
4966 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4967 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4968 while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) {
4969 ShiftAmt += 16;
4970 Imm >>= 16;
4971 }
4972 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4973 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
4974 Operands.push_back(AArch64Operand::CreateImm(
4975 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4976 if (ShiftAmt)
4977 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4978 ShiftAmt, true, S, E, Ctx));
4979 return false;
4980 }
4981 APInt Simm = APInt(64, Imm << ShiftAmt);
4982 // check if the immediate is an unsigned or signed 32-bit int for W regs
4983 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
4984 return Error(Loc, "Immediate too large for register");
4985 }
4986 // If it is a label or an imm that cannot fit in a movz, put it into CP.
4987 const MCExpr *CPLoc =
4988 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4989 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
4990 return false;
4991 }
4992 }
4993}
4994
4995bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4996 const MCExpr *Expr = nullptr;
4997 SMLoc L = getLoc();
4998 if (check(getParser().parseExpression(Expr), L, "expected expression"))
4999 return true;
5000 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5001 if (check(!Value, L, "expected constant expression"))
5002 return true;
5003 Out = Value->getValue();
5004 return false;
5005}
5006
5007bool AArch64AsmParser::parseComma() {
5008 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
5009 return true;
5010 // Eat the comma
5011 Lex();
5012 return false;
5013}
5014
5015bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5016 unsigned First, unsigned Last) {
5018 SMLoc Start, End;
5019 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register"))
5020 return true;
5021
5022 // Special handling for FP and LR; they aren't linearly after x28 in
5023 // the registers enum.
5024 unsigned RangeEnd = Last;
5025 if (Base == AArch64::X0) {
5026 if (Last == AArch64::FP) {
5027 RangeEnd = AArch64::X28;
5028 if (Reg == AArch64::FP) {
5029 Out = 29;
5030 return false;
5031 }
5032 }
5033 if (Last == AArch64::LR) {
5034 RangeEnd = AArch64::X28;
5035 if (Reg == AArch64::FP) {
5036 Out = 29;
5037 return false;
5038 } else if (Reg == AArch64::LR) {
5039 Out = 30;
5040 return false;
5041 }
5042 }
5043 }
5044
5045 if (check(Reg < First || Reg > RangeEnd, Start,
5046 Twine("expected register in range ") +
5049 return true;
5050 Out = Reg - Base;
5051 return false;
5052}
5053
5054bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5055 const MCParsedAsmOperand &Op2) const {
5056 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5057 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5058
5059 if (AOp1.isVectorList() && AOp2.isVectorList())
5060 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5061 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5062 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5063
5064 if (!AOp1.isReg() || !AOp2.isReg())
5065 return false;
5066
5067 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5068 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5069 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5070
5071 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5072 "Testing equality of non-scalar registers not supported");
5073
5074 // Check if a registers match their sub/super register classes.
5075 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5076 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
5077 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5078 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
5079 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5080 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
5081 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5082 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
5083
5084 return false;
5085}
5086
5087/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
5088/// operands.
5089bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
5090 StringRef Name, SMLoc NameLoc,
5093 .Case("beq", "b.eq")
5094 .Case("bne", "b.ne")
5095 .Case("bhs", "b.hs")
5096 .Case("bcs", "b.cs")
5097 .Case("blo", "b.lo")
5098 .Case("bcc", "b.cc")
5099 .Case("bmi", "b.mi")
5100 .Case("bpl", "b.pl")
5101 .Case("bvs", "b.vs")
5102 .Case("bvc", "b.vc")
5103 .Case("bhi", "b.hi")
5104 .Case("bls", "b.ls")
5105 .Case("bge", "b.ge")
5106 .Case("blt", "b.lt")
5107 .Case("bgt", "b.gt")
5108 .Case("ble", "b.le")
5109 .Case("bal", "b.al")
5110 .Case("bnv", "b.nv")
5111 .Default(Name);
5112
5113 // First check for the AArch64-specific .req directive.
5114 if (getTok().is(AsmToken::Identifier) &&
5115 getTok().getIdentifier().lower() == ".req") {
5116 parseDirectiveReq(Name, NameLoc);
5117 // We always return 'error' for this, as we're done with this
5118 // statement and don't need to match the 'instruction."
5119 return true;
5120 }
5121
5122 // Create the leading tokens for the mnemonic, split by '.' characters.
5123 size_t Start = 0, Next = Name.find('.');
5124 StringRef Head = Name.slice(Start, Next);
5125
5126 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
5127 // the SYS instruction.
5128 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5129 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp")
5130 return parseSysAlias(Head, NameLoc, Operands);
5131
5132 // TLBIP instructions are aliases for the SYSP instruction.
5133 if (Head == "tlbip")
5134 return parseSyspAlias(Head, NameLoc, Operands);
5135
5136 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
5137 Mnemonic = Head;
5138
5139 // Handle condition codes for a branch mnemonic
5140 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5141 Start = Next;
5142 Next = Name.find('.', Start + 1);
5143 Head = Name.slice(Start + 1, Next);
5144
5145 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5146 (Head.data() - Name.data()));
5147 std::string Suggestion;
5148 AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5149 if (CC == AArch64CC::Invalid) {
5150 std::string Msg = "invalid condition code";
5151 if (!Suggestion.empty())
5152 Msg += ", did you mean " + Suggestion + "?";
5153 return Error(SuffixLoc, Msg);
5154 }
5155 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5156 /*IsSuffix=*/true));
5157 Operands.push_back(
5158 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5159 }
5160
5161 // Add the remaining tokens in the mnemonic.
5162 while (Next != StringRef::npos) {
5163 Start = Next;
5164 Next = Name.find('.', Start + 1);
5165 Head = Name.slice(Start, Next);
5166 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5167 (Head.data() - Name.data()) + 1);
5168 Operands.push_back(AArch64Operand::CreateToken(
5169 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5170 }
5171
5172 // Conditional compare instructions have a Condition Code operand, which needs
5173 // to be parsed and an immediate operand created.
5174 bool condCodeFourthOperand =
5175 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5176 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5177 Head == "csinc" || Head == "csinv" || Head == "csneg");
5178
5179 // These instructions are aliases to some of the conditional select
5180 // instructions. However, the condition code is inverted in the aliased
5181 // instruction.
5182 //
5183 // FIXME: Is this the correct way to handle these? Or should the parser
5184 // generate the aliased instructions directly?
5185 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5186 bool condCodeThirdOperand =
5187 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5188
5189 // Read the remaining operands.
5190 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5191
5192 unsigned N = 1;
5193 do {
5194 // Parse and remember the operand.
5195 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5196 (N == 3 && condCodeThirdOperand) ||
5197 (N == 2 && condCodeSecondOperand),
5198 condCodeSecondOperand || condCodeThirdOperand)) {
5199 return true;
5200 }
5201
5202 // After successfully parsing some operands there are three special cases
5203 // to consider (i.e. notional operands not separated by commas). Two are
5204 // due to memory specifiers:
5205 // + An RBrac will end an address for load/store/prefetch
5206 // + An '!' will indicate a pre-indexed operation.
5207 //
5208 // And a further case is '}', which ends a group of tokens specifying the
5209 // SME accumulator array 'ZA' or tile vector, i.e.
5210 //
5211 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5212 //
5213 // It's someone else's responsibility to make sure these tokens are sane
5214 // in the given context!
5215
5216 if (parseOptionalToken(AsmToken::RBrac))
5217 Operands.push_back(
5218 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5219 if (parseOptionalToken(AsmToken::Exclaim))
5220 Operands.push_back(
5221 AArch64Operand::CreateToken("!", getLoc(), getContext()));
5222 if (parseOptionalToken(AsmToken::RCurly))
5223 Operands.push_back(
5224 AArch64Operand::CreateToken("}", getLoc(), getContext()));
5225
5226 ++N;
5227 } while (parseOptionalToken(AsmToken::Comma));
5228 }
5229
5230 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5231 return true;
5232
5233 return false;
5234}
5235
5236static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
5237 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5238 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5239 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5240 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5241 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5242 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5243 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5244}
5245
5246// FIXME: This entire function is a giant hack to provide us with decent
5247// operand range validation/diagnostics until TableGen/MC can be extended
5248// to support autogeneration of this kind of validation.
5249bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5251 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5252 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5253
5254 // A prefix only applies to the instruction following it. Here we extract
5255 // prefix information for the next instruction before validating the current
5256 // one so that in the case of failure we don't erronously continue using the
5257 // current prefix.
5258 PrefixInfo Prefix = NextPrefix;
5259 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5260
5261 // Before validating the instruction in isolation we run through the rules
5262 // applicable when it follows a prefix instruction.
5263 // NOTE: brk & hlt can be prefixed but require no additional validation.
5264 if (Prefix.isActive() &&
5265 (Inst.getOpcode() != AArch64::BRK) &&
5266 (Inst.getOpcode() != AArch64::HLT)) {
5267
5268 // Prefixed intructions must have a destructive operand.
5271 return Error(IDLoc, "instruction is unpredictable when following a"
5272 " movprfx, suggest replacing movprfx with mov");
5273
5274 // Destination operands must match.
5275 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5276 return Error(Loc[0], "instruction is unpredictable when following a"
5277 " movprfx writing to a different destination");
5278
5279 // Destination operand must not be used in any other location.
5280 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5281 if (Inst.getOperand(i).isReg() &&
5282 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5283 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5284 return Error(Loc[0], "instruction is unpredictable when following a"
5285 " movprfx and destination also used as non-destructive"
5286 " source");
5287 }
5288
5289 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5290 if (Prefix.isPredicated()) {
5291 int PgIdx = -1;
5292
5293 // Find the instructions general predicate.
5294 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5295 if (Inst.getOperand(i).isReg() &&
5296 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5297 PgIdx = i;
5298 break;
5299 }
5300
5301 // Instruction must be predicated if the movprfx is predicated.
5302 if (PgIdx == -1 ||
5304 return Error(IDLoc, "instruction is unpredictable when following a"
5305 " predicated movprfx, suggest using unpredicated movprfx");
5306
5307 // Instruction must use same general predicate as the movprfx.
5308 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5309 return Error(IDLoc, "instruction is unpredictable when following a"
5310 " predicated movprfx using a different general predicate");
5311
5312 // Instruction element type must match the movprfx.
5313 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5314 return Error(IDLoc, "instruction is unpredictable when following a"
5315 " predicated movprfx with a different element size");
5316 }
5317 }
5318
5319 // Check for indexed addressing modes w/ the base register being the
5320 // same as a destination/source register or pair load where
5321 // the Rt == Rt2. All of those are undefined behaviour.
5322 switch (Inst.getOpcode()) {
5323 case AArch64::LDPSWpre:
5324 case AArch64::LDPWpost:
5325 case AArch64::LDPWpre:
5326 case AArch64::LDPXpost:
5327 case AArch64::LDPXpre: {
5328 unsigned Rt = Inst.getOperand(1).getReg();
5329 unsigned Rt2 = Inst.getOperand(2).getReg();
5330 unsigned Rn = Inst.getOperand(3).getReg();
5331 if (RI->isSubRegisterEq(Rn, Rt))
5332 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5333 "is also a destination");
5334 if (RI->isSubRegisterEq(Rn, Rt2))
5335 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5336 "is also a destination");
5337 [[fallthrough]];
5338 }
5339 case AArch64::LDR_ZA:
5340 case AArch64::STR_ZA: {
5341 if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() &&
5342 Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm())
5343 return Error(Loc[1],
5344 "unpredictable instruction, immediate and offset mismatch.");
5345 break;
5346 }
5347 case AArch64::LDPDi:
5348 case AArch64::LDPQi:
5349 case AArch64::LDPSi:
5350 case AArch64::LDPSWi:
5351 case AArch64::LDPWi:
5352 case AArch64::LDPXi: {
5353 unsigned Rt = Inst.getOperand(0).getReg();
5354 unsigned Rt2 = Inst.getOperand(1).getReg();
5355 if (Rt == Rt2)
5356 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5357 break;
5358 }
5359 case AArch64::LDPDpost:
5360 case AArch64::LDPDpre:
5361 case AArch64::LDPQpost:
5362 case AArch64::LDPQpre:
5363 case AArch64::LDPSpost:
5364 case AArch64::LDPSpre:
5365 case AArch64::LDPSWpost: {
5366 unsigned Rt = Inst.getOperand(1).getReg();
5367 unsigned Rt2 = Inst.getOperand(2).getReg();
5368 if (Rt == Rt2)
5369 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5370 break;
5371 }
5372 case AArch64::STPDpost:
5373 case AArch64::STPDpre:
5374 case AArch64::STPQpost:
5375 case AArch64::STPQpre:
5376 case AArch64::STPSpost:
5377 case AArch64::STPSpre:
5378 case AArch64::STPWpost:
5379 case AArch64::STPWpre:
5380 case AArch64::STPXpost:
5381 case AArch64::STPXpre: {
5382 unsigned Rt = Inst.getOperand(1).getReg();
5383 unsigned Rt2 = Inst.getOperand(2).getReg();
5384 unsigned Rn = Inst.getOperand(3).getReg();
5385 if (RI->isSubRegisterEq(Rn, Rt))
5386 return Error(Loc[0], "unpredictable STP instruction, writeback base "
5387 "is also a source");
5388 if (RI->isSubRegisterEq(Rn, Rt2))
5389 return Error(Loc[1], "unpredictable STP instruction, writeback base "
5390 "is also a source");
5391 break;
5392 }
5393 case AArch64::LDRBBpre:
5394 case AArch64::LDRBpre:
5395 case AArch64::LDRHHpre:
5396 case AArch64::LDRHpre:
5397 case AArch64::LDRSBWpre:
5398 case AArch64::LDRSBXpre:
5399 case AArch64::LDRSHWpre:
5400 case AArch64::LDRSHXpre:
5401 case AArch64::LDRSWpre:
5402 case AArch64::LDRWpre:
5403 case AArch64::LDRXpre:
5404 case AArch64::LDRBBpost:
5405 case AArch64::LDRBpost:
5406 case AArch64::LDRHHpost:
5407 case AArch64::LDRHpost:
5408 case AArch64::LDRSBWpost:
5409 case AArch64::LDRSBXpost:
5410 case AArch64::LDRSHWpost:
5411 case AArch64::LDRSHXpost:
5412 case AArch64::LDRSWpost:
5413 case AArch64::LDRWpost:
5414 case AArch64::LDRXpost: {
5415 unsigned Rt = Inst.getOperand(1).getReg();
5416 unsigned Rn = Inst.getOperand(2).getReg();
5417 if (RI->isSubRegisterEq(Rn, Rt))
5418 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5419 "is also a source");
5420 break;
5421 }
5422 case AArch64::STRBBpost:
5423 case AArch64::STRBpost:
5424 case AArch64::STRHHpost:
5425 case AArch64::STRHpost:
5426 case AArch64::STRWpost:
5427 case AArch64::STRXpost:
5428 case AArch64::STRBBpre:
5429 case AArch64::STRBpre:
5430 case AArch64::STRHHpre:
5431 case AArch64::STRHpre:
5432 case AArch64::STRWpre:
5433 case AArch64::STRXpre: {
5434 unsigned Rt = Inst.getOperand(1).getReg();
5435 unsigned Rn = Inst.getOperand(2).getReg();
5436 if (RI->isSubRegisterEq(Rn, Rt))
5437 return Error(Loc[0], "unpredictable STR instruction, writeback base "
5438 "is also a source");
5439 break;
5440 }
5441 case AArch64::STXRB:
5442 case AArch64::STXRH:
5443 case AArch64::STXRW:
5444 case AArch64::STXRX:
5445 case AArch64::STLXRB:
5446 case AArch64::STLXRH:
5447 case AArch64::STLXRW:
5448 case AArch64::STLXRX: {
5449 unsigned Rs = Inst.getOperand(0).getReg();
5450 unsigned Rt = Inst.getOperand(1).getReg();
5451 unsigned Rn = Inst.getOperand(2).getReg();
5452 if (RI->isSubRegisterEq(Rt, Rs) ||
5453 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5454 return Error(Loc[0],
5455 "unpredictable STXR instruction, status is also a source");
5456 break;
5457 }
5458 case AArch64::STXPW:
5459 case AArch64::STXPX:
5460 case AArch64::STLXPW:
5461 case AArch64::STLXPX: {
5462 unsigned Rs = Inst.getOperand(0).getReg();
5463 unsigned Rt1 = Inst.getOperand(1).getReg();
5464 unsigned Rt2 = Inst.getOperand(2).getReg();
5465 unsigned Rn = Inst.getOperand(3).getReg();
5466 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5467 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5468 return Error(Loc[0],
5469 "unpredictable STXP instruction, status is also a source");
5470 break;
5471 }
5472 case AArch64::LDRABwriteback:
5473 case AArch64::LDRAAwriteback: {
5474 unsigned Xt = Inst.getOperand(0).getReg();
5475 unsigned Xn = Inst.getOperand(1).getReg();
5476 if (Xt == Xn)
5477 return Error(Loc[0],
5478 "unpredictable LDRA instruction, writeback base"
5479 " is also a destination");
5480 break;
5481 }
5482 }
5483
5484 // Check v8.8-A memops instructions.
5485 switch (Inst.getOpcode()) {
5486 case AArch64::CPYFP:
5487 case AArch64::CPYFPWN:
5488 case AArch64::CPYFPRN:
5489 case AArch64::CPYFPN:
5490 case AArch64::CPYFPWT:
5491 case AArch64::CPYFPWTWN:
5492 case AArch64::CPYFPWTRN:
5493 case AArch64::CPYFPWTN:
5494 case AArch64::CPYFPRT:
5495 case AArch64::CPYFPRTWN:
5496 case AArch64::CPYFPRTRN:
5497 case AArch64::CPYFPRTN:
5498 case AArch64::CPYFPT:
5499 case AArch64::CPYFPTWN:
5500 case AArch64::CPYFPTRN:
5501 case AArch64::CPYFPTN:
5502 case AArch64::CPYFM:
5503 case AArch64::CPYFMWN:
5504 case AArch64::CPYFMRN:
5505 case AArch64::CPYFMN:
5506 case AArch64::CPYFMWT:
5507 case AArch64::CPYFMWTWN:
5508 case AArch64::CPYFMWTRN:
5509 case AArch64::CPYFMWTN:
5510 case AArch64::CPYFMRT:
5511 case AArch64::CPYFMRTWN:
5512 case AArch64::CPYFMRTRN:
5513 case AArch64::CPYFMRTN:
5514 case AArch64::CPYFMT:
5515 case AArch64::CPYFMTWN:
5516 case AArch64::CPYFMTRN:
5517 case AArch64::CPYFMTN:
5518 case AArch64::CPYFE:
5519 case AArch64::CPYFEWN:
5520 case AArch64::CPYFERN:
5521 case AArch64::CPYFEN:
5522 case AArch64::CPYFEWT:
5523 case AArch64::CPYFEWTWN:
5524 case AArch64::CPYFEWTRN:
5525 case AArch64::CPYFEWTN:
5526 case AArch64::CPYFERT:
5527 case AArch64::CPYFERTWN:
5528 case AArch64::CPYFERTRN:
5529 case AArch64::CPYFERTN:
5530 case AArch64::CPYFET:
5531 case AArch64::CPYFETWN:
5532 case AArch64::CPYFETRN:
5533 case AArch64::CPYFETN:
5534 case AArch64::CPYP:
5535 case AArch64::CPYPWN:
5536 case AArch64::CPYPRN:
5537 case AArch64::CPYPN:
5538 case AArch64::CPYPWT:
5539 case AArch64::CPYPWTWN:
5540 case AArch64::CPYPWTRN:
5541 case AArch64::CPYPWTN:
5542 case AArch64::CPYPRT:
5543 case AArch64::CPYPRTWN:
5544 case AArch64::CPYPRTRN:
5545 case AArch64::CPYPRTN:
5546 case AArch64::CPYPT:
5547 case AArch64::CPYPTWN:
5548 case AArch64::CPYPTRN:
5549 case AArch64::CPYPTN:
5550 case AArch64::CPYM:
5551 case AArch64::CPYMWN:
5552 case AArch64::CPYMRN:
5553 case AArch64::CPYMN:
5554 case AArch64::CPYMWT:
5555 case AArch64::CPYMWTWN:
5556 case AArch64::CPYMWTRN:
5557 case AArch64::CPYMWTN:
5558 case AArch64::CPYMRT:
5559 case AArch64::CPYMRTWN:
5560 case AArch64::CPYMRTRN:
5561 case AArch64::CPYMRTN:
5562 case AArch64::CPYMT:
5563 case AArch64::CPYMTWN:
5564 case AArch64::CPYMTRN:
5565 case AArch64::CPYMTN:
5566 case AArch64::CPYE:
5567 case AArch64::CPYEWN:
5568 case AArch64::CPYERN:
5569 case AArch64::CPYEN:
5570 case AArch64::CPYEWT:
5571 case AArch64::CPYEWTWN:
5572 case AArch64::CPYEWTRN:
5573 case AArch64::CPYEWTN:
5574 case AArch64::CPYERT:
5575 case AArch64::CPYERTWN:
5576 case AArch64::CPYERTRN:
5577 case AArch64::CPYERTN:
5578 case AArch64::CPYET:
5579 case AArch64::CPYETWN:
5580 case AArch64::CPYETRN:
5581 case AArch64::CPYETN: {
5582 unsigned Xd_wb = Inst.getOperand(0).getReg();
5583 unsigned Xs_wb = Inst.getOperand(1).getReg();
5584 unsigned Xn_wb = Inst.getOperand(2).getReg();
5585 unsigned Xd = Inst.getOperand(3).getReg();
5586 unsigned Xs = Inst.getOperand(4).getReg();
5587 unsigned Xn = Inst.getOperand(5).getReg();
5588 if (Xd_wb != Xd)
5589 return Error(Loc[0],
5590 "invalid CPY instruction, Xd_wb and Xd do not match");
5591 if (Xs_wb != Xs)
5592 return Error(Loc[0],
5593 "invalid CPY instruction, Xs_wb and Xs do not match");
5594 if (Xn_wb != Xn)
5595 return Error(Loc[0],
5596 "invalid CPY instruction, Xn_wb and Xn do not match");
5597 if (Xd == Xs)
5598 return Error(Loc[0], "invalid CPY instruction, destination and source"
5599 " registers are the same");
5600 if (Xd == Xn)
5601 return Error(Loc[0], "invalid CPY instruction, destination and size"
5602 " registers are the same");
5603 if (Xs == Xn)
5604 return Error(Loc[0], "invalid CPY instruction, source and size"
5605 " registers are the same");
5606 break;
5607 }
5608 case AArch64::SETP:
5609 case AArch64::SETPT:
5610 case AArch64::SETPN:
5611 case AArch64::SETPTN:
5612 case AArch64::SETM:
5613 case AArch64::SETMT:
5614 case AArch64::SETMN:
5615 case AArch64::SETMTN:
5616 case AArch64::SETE:
5617 case AArch64::SETET:
5618 case AArch64::SETEN:
5619 case AArch64::SETETN:
5620 case AArch64::SETGP:
5621 case AArch64::SETGPT:
5622 case AArch64::SETGPN:
5623 case AArch64::SETGPTN:
5624 case AArch64::SETGM:
5625 case AArch64::SETGMT:
5626 case AArch64::SETGMN:
5627 case AArch64::SETGMTN:
5628 case AArch64::MOPSSETGE:
5629 case AArch64::MOPSSETGET:
5630 case AArch64::MOPSSETGEN:
5631 case AArch64::MOPSSETGETN: {
5632 unsigned Xd_wb = Inst.getOperand(0).getReg();
5633 unsigned Xn_wb = Inst.getOperand(1).getReg();
5634 unsigned Xd = Inst.getOperand(2).getReg();
5635 unsigned Xn = Inst.getOperand(3).getReg();
5636 unsigned Xm = Inst.getOperand(4).getReg();
5637 if (Xd_wb != Xd)
5638 return Error(Loc[0],
5639 "invalid SET instruction, Xd_wb and Xd do not match");
5640 if (Xn_wb != Xn)
5641 return Error(Loc[0],
5642 "invalid SET instruction, Xn_wb and Xn do not match");
5643 if (Xd == Xn)
5644 return Error(Loc[0], "invalid SET instruction, destination and size"
5645 " registers are the same");
5646 if (Xd == Xm)
5647 return Error(Loc[0], "invalid SET instruction, destination and source"
5648 " registers are the same");
5649 if (Xn == Xm)
5650 return Error(Loc[0], "invalid SET instruction, source and size"
5651 " registers are the same");
5652 break;
5653 }
5654 }
5655
5656 // Now check immediate ranges. Separate from the above as there is overlap
5657 // in the instructions being checked and this keeps the nested conditionals
5658 // to a minimum.
5659 switch (Inst.getOpcode()) {
5660 case AArch64::ADDSWri:
5661 case AArch64::ADDSXri:
5662 case AArch64::ADDWri:
5663 case AArch64::ADDXri:
5664 case AArch64::SUBSWri:
5665 case AArch64::SUBSXri:
5666 case AArch64::SUBWri:
5667 case AArch64::SUBXri: {
5668 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
5669 // some slight duplication here.
5670 if (Inst.getOperand(2).isExpr()) {
5671 const MCExpr *Expr = Inst.getOperand(2).getExpr();
5672 AArch64MCExpr::VariantKind ELFRefKind;
5673 MCSymbolRefExpr::VariantKind DarwinRefKind;
5674 int64_t Addend;
5675 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
5676
5677 // Only allow these with ADDXri.
5678 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
5679 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
5680 Inst.getOpcode() == AArch64::ADDXri)
5681 return false;
5682
5683 // Only allow these with ADDXri/ADDWri
5684 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
5685 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
5686 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
5687 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
5688 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
5689 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
5690 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
5691 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
5692 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
5693 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
5694 (Inst.getOpcode() == AArch64::ADDXri ||
5695 Inst.getOpcode() == AArch64::ADDWri))
5696 return false;
5697
5698 // Don't allow symbol refs in the immediate field otherwise
5699 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
5700 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
5701 // 'cmp w0, 'borked')
5702 return Error(Loc.back(), "invalid immediate expression");
5703 }
5704 // We don't validate more complex expressions here
5705 }
5706 return false;
5707 }
5708 default:
5709 return false;
5710 }
5711}
5712
5714 const FeatureBitset &FBS,
5715 unsigned VariantID = 0);
5716
5717bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
5720 switch (ErrCode) {
5721 case Match_InvalidTiedOperand: {
5722 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
5723 if (Op.isVectorList())
5724 return Error(Loc, "operand must match destination register list");
5725
5726 assert(Op.isReg() && "Unexpected operand type");
5727 switch (Op.getRegEqualityTy()) {
5728 case RegConstraintEqualityTy::EqualsSubReg:
5729 return Error(Loc, "operand must be 64-bit form of destination register");
5730 case RegConstraintEqualityTy::EqualsSuperReg:
5731 return Error(Loc, "operand must be 32-bit form of destination register");
5732 case RegConstraintEqualityTy::EqualsReg:
5733 return Error(Loc, "operand must match destination register");
5734 }
5735 llvm_unreachable("Unknown RegConstraintEqualityTy");
5736 }
5737 case Match_MissingFeature:
5738 return Error(Loc,
5739 "instruction requires a CPU feature not currently enabled");
5740 case Match_InvalidOperand:
5741 return Error(Loc, "invalid operand for instruction");
5742 case Match_InvalidSuffix:
5743 return Error(Loc, "invalid type suffix for instruction");
5744 case Match_InvalidCondCode:
5745 return Error(Loc, "expected AArch64 condition code");
5746 case Match_AddSubRegExtendSmall:
5747 return Error(Loc,
5748 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5749 case Match_AddSubRegExtendLarge:
5750 return Error(Loc,
5751 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5752 case Match_AddSubSecondSource:
5753 return Error(Loc,
5754 "expected compatible register, symbol or integer in range [0, 4095]");
5755 case Match_LogicalSecondSource:
5756 return Error(Loc, "expected compatible register or logical immediate");
5757 case Match_InvalidMovImm32Shift:
5758 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
5759 case Match_InvalidMovImm64Shift:
5760 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
5761 case Match_AddSubRegShift32:
5762 return Error(Loc,
5763 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5764 case Match_AddSubRegShift64:
5765 return Error(Loc,
5766 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5767 case Match_InvalidFPImm:
5768 return Error(Loc,
5769 "expected compatible register or floating-point constant");
5770 case Match_InvalidMemoryIndexedSImm6:
5771 return Error(Loc, "index must be an integer in range [-32, 31].");
5772 case Match_InvalidMemoryIndexedSImm5:
5773 return Error(Loc, "index must be an integer in range [-16, 15].");
5774 case Match_InvalidMemoryIndexed1SImm4:
5775 return Error(Loc, "index must be an integer in range [-8, 7].");
5776 case Match_InvalidMemoryIndexed2SImm4:
5777 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
5778 case Match_InvalidMemoryIndexed3SImm4:
5779 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
5780 case Match_InvalidMemoryIndexed4SImm4:
5781 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
5782 case Match_InvalidMemoryIndexed16SImm4:
5783 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
5784 case Match_InvalidMemoryIndexed32SImm4:
5785 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
5786 case Match_InvalidMemoryIndexed1SImm6:
5787 return Error(Loc, "index must be an integer in range [-32, 31].");
5788 case Match_InvalidMemoryIndexedSImm8:
5789 return Error(Loc, "index must be an integer in range [-128, 127].");
5790 case Match_InvalidMemoryIndexedSImm9:
5791 return Error(Loc, "index must be an integer in range [-256, 255].");
5792 case Match_InvalidMemoryIndexed16SImm9:
5793 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5794 case Match_InvalidMemoryIndexed8SImm10:
5795 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5796 case Match_InvalidMemoryIndexed4SImm7:
5797 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5798 case Match_InvalidMemoryIndexed8SImm7:
5799 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5800 case Match_InvalidMemoryIndexed16SImm7:
5801 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5802 case Match_InvalidMemoryIndexed8UImm5:
5803 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5804 case Match_InvalidMemoryIndexed8UImm3:
5805 return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
5806 case Match_InvalidMemoryIndexed4UImm5:
5807 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5808 case Match_InvalidMemoryIndexed2UImm5:
5809 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5810 case Match_InvalidMemoryIndexed8UImm6:
5811 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5812 case Match_InvalidMemoryIndexed16UImm6:
5813 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5814 case Match_InvalidMemoryIndexed4UImm6:
5815 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5816 case Match_InvalidMemoryIndexed2UImm6:
5817 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5818 case Match_InvalidMemoryIndexed1UImm6:
5819 return Error(Loc, "index must be in range [0, 63].");
5820 case Match_InvalidMemoryWExtend8:
5821 return Error(Loc,
5822 "expected 'uxtw' or 'sxtw' with optional shift of #0");
5823 case Match_InvalidMemoryWExtend16:
5824 return Error(Loc,
5825 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5826 case Match_InvalidMemoryWExtend32:
5827 return Error(Loc,
5828 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5829 case Match_InvalidMemoryWExtend64:
5830 return Error(Loc,
5831 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5832 case Match_InvalidMemoryWExtend128:
5833 return Error(Loc,
5834 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5835 case Match_InvalidMemoryXExtend8:
5836 return Error(Loc,
5837 "expected 'lsl' or 'sxtx' with optional shift of #0");
5838 case Match_InvalidMemoryXExtend16:
5839 return Error(Loc,
5840 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5841 case Match_InvalidMemoryXExtend32:
5842 return Error(Loc,
5843 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5844 case Match_InvalidMemoryXExtend64:
5845 return Error(Loc,
5846 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5847 case Match_InvalidMemoryXExtend128:
5848 return Error(Loc,
5849 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
5850 case Match_InvalidMemoryIndexed1:
5851 return Error(Loc, "index must be an integer in range [0, 4095].");
5852 case Match_InvalidMemoryIndexed2:
5853 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
5854 case Match_InvalidMemoryIndexed4:
5855 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
5856 case Match_InvalidMemoryIndexed8:
5857 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
5858 case Match_InvalidMemoryIndexed16:
5859 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
5860 case Match_InvalidImm0_0:
5861 return Error(Loc, "immediate must be 0.");
5862 case Match_InvalidImm0_1:
5863 return Error(Loc, "immediate must be an integer in range [0, 1].");
5864 case Match_InvalidImm0_3:
5865 return Error(Loc, "immediate must be an integer in range [0, 3].");
5866 case Match_InvalidImm0_7:
5867 return Error(Loc, "immediate must be an integer in range [0, 7].");
5868 case Match_InvalidImm0_15:
5869 return Error(Loc, "immediate must be an integer in range [0, 15].");
5870 case Match_InvalidImm0_31:
5871 return Error(Loc, "immediate must be an integer in range [0, 31].");
5872 case Match_InvalidImm0_63:
5873 return Error(Loc, "immediate must be an integer in range [0, 63].");
5874 case Match_InvalidImm0_127:
5875 return Error(Loc, "immediate must be an integer in range [0, 127].");
5876 case Match_InvalidImm0_255:
5877 return Error(Loc, "immediate must be an integer in range [0, 255].");
5878 case Match_InvalidImm0_65535:
5879 return Error(Loc, "immediate must be an integer in range [0, 65535].");
5880 case Match_InvalidImm1_8:
5881 return Error(Loc, "immediate must be an integer in range [1, 8].");
5882 case Match_InvalidImm1_16:
5883 return Error(Loc, "immediate must be an integer in range [1, 16].");
5884 case Match_InvalidImm1_32:
5885 return Error(Loc, "immediate must be an integer in range [1, 32].");
5886 case Match_InvalidImm1_64:
5887 return Error(Loc, "immediate must be an integer in range [1, 64].");
5888 case Match_InvalidMemoryIndexedRange2UImm0:
5889 return Error(Loc, "vector select offset must be the immediate range 0:1.");
5890 case Match_InvalidMemoryIndexedRange2UImm1:
5891 return Error(Loc, "vector select offset must be an immediate range of the "
5892 "form <immf>:<imml>, where the first "
5893 "immediate is a multiple of 2 in the range [0, 2], and "
5894 "the second immediate is immf + 1.");
5895 case Match_InvalidMemoryIndexedRange2UImm2:
5896 case Match_InvalidMemoryIndexedRange2UImm3:
5897 return Error(
5898 Loc,
5899 "vector select offset must be an immediate range of the form "
5900 "<immf>:<imml>, "
5901 "where the first immediate is a multiple of 2 in the range [0, 6] or "
5902 "[0, 14] "
5903 "depending on the instruction, and the second immediate is immf + 1.");
5904 case Match_InvalidMemoryIndexedRange4UImm0:
5905 return Error(Loc, "vector select offset must be the immediate range 0:3.");
5906 case Match_InvalidMemoryIndexedRange4UImm1:
5907 case Match_InvalidMemoryIndexedRange4UImm2:
5908 return Error(
5909 Loc,
5910 "vector select offset must be an immediate range of the form "
5911 "<immf>:<imml>, "
5912 "where the first immediate is a multiple of 4 in the range [0, 4] or "
5913 "[0, 12] "
5914 "depending on the instruction, and the second immediate is immf + 3.");
5915 case Match_InvalidSVEAddSubImm8:
5916 return Error(Loc, "immediate must be an integer in range [0, 255]"
5917 " with a shift amount of 0");
5918 case Match_InvalidSVEAddSubImm16:
5919 case Match_InvalidSVEAddSubImm32:
5920 case Match_InvalidSVEAddSubImm64:
5921 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
5922 "multiple of 256 in range [256, 65280]");
5923 case Match_InvalidSVECpyImm8:
5924 return Error(Loc, "immediate must be an integer in range [-128, 255]"
5925 " with a shift amount of 0");
5926 case Match_InvalidSVECpyImm16:
5927 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5928 "multiple of 256 in range [-32768, 65280]");
5929 case Match_InvalidSVECpyImm32:
5930 case Match_InvalidSVECpyImm64:
5931 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5932 "multiple of 256 in range [-32768, 32512]");
5933 case Match_InvalidIndexRange0_0:
5934 return Error(Loc, "expected lane specifier '[0]'");
5935 case Match_InvalidIndexRange1_1:
5936 return Error(Loc, "expected lane specifier '[1]'");
5937 case Match_InvalidIndexRange0_15:
5938 return Error(Loc, "vector lane must be an integer in range [0, 15].");
5939 case Match_InvalidIndexRange0_7:
5940 return Error(Loc, "vector lane must be an integer in range [0, 7].");
5941 case Match_InvalidIndexRange0_3:
5942 return Error(Loc, "vector lane must be an integer in range [0, 3].");
5943 case Match_InvalidIndexRange0_1:
5944 return Error(Loc, "vector lane must be an integer in range [0, 1].");
5945 case Match_InvalidSVEIndexRange0_63:
5946 return Error(Loc, "vector lane must be an integer in range [0, 63].");
5947 case Match_InvalidSVEIndexRange0_31:
5948 return Error(Loc, "vector lane must be an integer in range [0, 31].");
5949 case Match_InvalidSVEIndexRange0_15:
5950 return Error(Loc, "vector lane must be an integer in range [0, 15].");
5951 case Match_InvalidSVEIndexRange0_7:
5952 return Error(Loc, "vector lane must be an integer in range [0, 7].");
5953 case Match_InvalidSVEIndexRange0_3:
5954 return Error(Loc, "vector lane must be an integer in range [0, 3].");
5955 case Match_InvalidLabel:
5956 return Error(Loc, "expected label or encodable integer pc offset");
5957 case Match_MRS:
5958 return Error(Loc, "expected readable system register");
5959 case Match_MSR:
5960 case Match_InvalidSVCR:
5961 return Error(Loc, "expected writable system register or pstate");
5962 case Match_InvalidComplexRotationEven:
5963 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
5964 case Match_InvalidComplexRotationOdd:
5965 return Error(Loc, "complex rotation must be 90 or 270.");
5966 case Match_MnemonicFail: {
5967 std::string Suggestion = AArch64MnemonicSpellCheck(
5968 ((AArch64Operand &)*Operands[0]).getToken(),
5969 ComputeAvailableFeatures(STI->getFeatureBits()));
5970 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
5971 }
5972 case Match_InvalidGPR64shifted8:
5973 return Error(Loc, "register must be x0..x30 or xzr, without shift");
5974 case Match_InvalidGPR64shifted16:
5975 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
5976 case Match_InvalidGPR64shifted32:
5977 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
5978 case Match_InvalidGPR64shifted64:
5979 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
5980 case Match_InvalidGPR64shifted128:
5981 return Error(
5982 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
5983 case Match_InvalidGPR64NoXZRshifted8:
5984 return Error(Loc, "register must be x0..x30 without shift");
5985 case Match_InvalidGPR64NoXZRshifted16:
5986 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
5987 case Match_InvalidGPR64NoXZRshifted32:
5988 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
5989 case Match_InvalidGPR64NoXZRshifted64:
5990 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
5991 case Match_InvalidGPR64NoXZRshifted128:
5992 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
5993 case Match_InvalidZPR32UXTW8:
5994 case Match_InvalidZPR32SXTW8:
5995 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
5996 case Match_InvalidZPR32UXTW16:
5997 case Match_InvalidZPR32SXTW16:
5998 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
5999 case Match_InvalidZPR32UXTW32:
6000 case Match_InvalidZPR32SXTW32:
6001 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6002 case Match_InvalidZPR32UXTW64:
6003 case Match_InvalidZPR32SXTW64:
6004 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6005 case Match_InvalidZPR64UXTW8:
6006 case Match_InvalidZPR64SXTW8:
6007 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6008 case Match_InvalidZPR64UXTW16:
6009 case Match_InvalidZPR64SXTW16:
6010 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6011 case Match_InvalidZPR64UXTW32:
6012 case Match_InvalidZPR64SXTW32:
6013 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6014 case Match_InvalidZPR64UXTW64:
6015 case Match_InvalidZPR64SXTW64:
6016 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6017 case Match_InvalidZPR32LSL8:
6018 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
6019 case Match_InvalidZPR32LSL16:
6020 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6021 case Match_InvalidZPR32LSL32:
6022 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6023 case Match_InvalidZPR32LSL64:
6024 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6025 case Match_InvalidZPR64LSL8:
6026 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
6027 case Match_InvalidZPR64LSL16:
6028 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6029 case Match_InvalidZPR64LSL32:
6030 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6031 case Match_InvalidZPR64LSL64:
6032 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6033 case Match_InvalidZPR0:
6034 return Error(Loc, "expected register without element width suffix");
6035 case Match_InvalidZPR8:
6036 case Match_InvalidZPR16:
6037 case Match_InvalidZPR32:
6038 case Match_InvalidZPR64:
6039 case Match_InvalidZPR128:
6040 return Error(Loc, "invalid element width");
6041 case Match_InvalidZPR_3b8:
6042 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
6043 case Match_InvalidZPR_3b16:
6044 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
6045 case Match_InvalidZPR_3b32:
6046 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
6047 case Match_InvalidZPR_4b8:
6048 return Error(Loc,
6049 "Invalid restricted vector register, expected z0.b..z15.b");
6050 case Match_InvalidZPR_4b16:
6051 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
6052 case Match_InvalidZPR_4b32:
6053 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
6054 case Match_InvalidZPR_4b64:
6055 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
6056 case Match_InvalidSVEPattern:
6057 return Error(Loc, "invalid predicate pattern");
6058 case Match_InvalidSVEPPRorPNRAnyReg:
6059 case Match_InvalidSVEPPRorPNRBReg:
6060 case Match_InvalidSVEPredicateAnyReg:
6061 case Match_InvalidSVEPredicateBReg:
6062 case Match_InvalidSVEPredicateHReg:
6063 case Match_InvalidSVEPredicateSReg:
6064 case Match_InvalidSVEPredicateDReg:
6065 return Error(Loc, "invalid predicate register.");
6066 case Match_InvalidSVEPredicate3bAnyReg:
6067 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6068 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6069 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6070 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6071 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6072 return Error(Loc, "Invalid predicate register, expected PN in range "
6073 "pn8..pn15 with element suffix.");
6074 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6075 return Error(Loc, "invalid restricted predicate-as-counter register "
6076 "expected pn8..pn15");
6077 case Match_InvalidSVEPNPredicateBReg:
6078 case Match_InvalidSVEPNPredicateHReg:
6079 case Match_InvalidSVEPNPredicateSReg:
6080 case Match_InvalidSVEPNPredicateDReg:
6081 return Error(Loc, "Invalid predicate register, expected PN in range "
6082 "pn0..pn15 with element suffix.");
6083 case Match_InvalidSVEVecLenSpecifier:
6084 return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
6085 case Match_InvalidSVEPredicateListMul2x8:
6086 case Match_InvalidSVEPredicateListMul2x16:
6087 case Match_InvalidSVEPredicateListMul2x32:
6088 case Match_InvalidSVEPredicateListMul2x64:
6089 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6090 "predicate registers, where the first vector is a multiple of 2 "
6091 "and with correct element type");
6092 case Match_InvalidSVEExactFPImmOperandHalfOne:
6093 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
6094 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6095 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
6096 case Match_InvalidSVEExactFPImmOperandZeroOne:
6097 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
6098 case Match_InvalidMatrixTileVectorH8:
6099 case Match_InvalidMatrixTileVectorV8:
6100 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
6101 case Match_InvalidMatrixTileVectorH16:
6102 case Match_InvalidMatrixTileVectorV16:
6103 return Error(Loc,
6104 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6105 case Match_InvalidMatrixTileVectorH32:
6106 case Match_InvalidMatrixTileVectorV32:
6107 return Error(Loc,
6108 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6109 case Match_InvalidMatrixTileVectorH64:
6110 case Match_InvalidMatrixTileVectorV64:
6111 return Error(Loc,
6112 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6113 case Match_InvalidMatrixTileVectorH128:
6114 case Match_InvalidMatrixTileVectorV128:
6115 return Error(Loc,
6116 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6117 case Match_InvalidMatrixTile32:
6118 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
6119 case Match_InvalidMatrixTile64:
6120 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
6121 case Match_InvalidMatrix:
6122 return Error(Loc, "invalid matrix operand, expected za");
6123 case Match_InvalidMatrix8:
6124 return Error(Loc, "invalid matrix operand, expected suffix .b");
6125 case Match_InvalidMatrix16:
6126 return Error(Loc, "invalid matrix operand, expected suffix .h");
6127 case Match_InvalidMatrix32:
6128 return Error(Loc, "invalid matrix operand, expected suffix .s");
6129 case Match_InvalidMatrix64:
6130 return Error(Loc, "invalid matrix operand, expected suffix .d");
6131 case Match_InvalidMatrixIndexGPR32_12_15:
6132 return Error(Loc, "operand must be a register in range [w12, w15]");
6133 case Match_InvalidMatrixIndexGPR32_8_11:
6134 return Error(Loc, "operand must be a register in range [w8, w11]");
6135 case Match_InvalidSVEVectorListMul2x8:
6136 case Match_InvalidSVEVectorListMul2x16:
6137 case Match_InvalidSVEVectorListMul2x32:
6138 case Match_InvalidSVEVectorListMul2x64:
6139 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6140 "SVE vectors, where the first vector is a multiple of 2 "
6141 "and with matching element types");
6142 case Match_InvalidSVEVectorListMul4x8:
6143 case Match_InvalidSVEVectorListMul4x16:
6144 case Match_InvalidSVEVectorListMul4x32:
6145 case Match_InvalidSVEVectorListMul4x64:
6146 return Error(Loc, "Invalid vector list, expected list with 4 consecutive "
6147 "SVE vectors, where the first vector is a multiple of 4 "
6148 "and with matching element types");
6149 case Match_InvalidLookupTable:
6150 return Error(Loc, "Invalid lookup table, expected zt0");
6151 case Match_InvalidSVEVectorListStrided2x8:
6152 case Match_InvalidSVEVectorListStrided2x16:
6153 case Match_InvalidSVEVectorListStrided2x32:
6154 case Match_InvalidSVEVectorListStrided2x64:
6155 return Error(
6156 Loc,
6157 "Invalid vector list, expected list with each SVE vector in the list "
6158 "8 registers apart, and the first register in the range [z0, z7] or "
6159 "[z16, z23] and with correct element type");
6160 case Match_InvalidSVEVectorListStrided4x8:
6161 case Match_InvalidSVEVectorListStrided4x16:
6162 case Match_InvalidSVEVectorListStrided4x32:
6163 case Match_InvalidSVEVectorListStrided4x64:
6164 return Error(
6165 Loc,
6166 "Invalid vector list, expected list with each SVE vector in the list "
6167 "4 registers apart, and the first register in the range [z0, z3] or "
6168 "[z16, z19] and with correct element type");
6169 case Match_AddSubLSLImm3ShiftLarge:
6170 return Error(Loc,
6171 "expected 'lsl' with optional integer in range [0, 7]");
6172 default:
6173 llvm_unreachable("unexpected error code!");
6174 }
6175}
6176
6177static const char *getSubtargetFeatureName(uint64_t Val);
6178
6179bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6181 MCStreamer &Out,
6183 bool MatchingInlineAsm) {
6184 assert(!Operands.empty() && "Unexpect empty operand list!");
6185 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6186 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6187
6188 StringRef Tok = Op.getToken();
6189 unsigned NumOperands = Operands.size();
6190
6191 if (NumOperands == 4 && Tok == "lsl") {
6192 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6193 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6194 if (Op2.isScalarReg() && Op3.isImm()) {
6195 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6196 if (Op3CE) {
6197 uint64_t Op3Val = Op3CE->getValue();
6198 uint64_t NewOp3Val = 0;
6199 uint64_t NewOp4Val = 0;
6200 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6201 Op2.getReg())) {
6202 NewOp3Val = (32 - Op3Val) & 0x1f;
6203 NewOp4Val = 31 - Op3Val;
6204 } else {
6205 NewOp3Val = (64 - Op3Val) & 0x3f;
6206 NewOp4Val = 63 - Op3Val;
6207 }
6208
6209 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
6210 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
6211
6212 Operands[0] =
6213 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
6214 Operands.push_back(AArch64Operand::CreateImm(
6215 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
6216 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6217 Op3.getEndLoc(), getContext());
6218 }
6219 }
6220 } else if (NumOperands == 4 && Tok == "bfc") {
6221 // FIXME: Horrible hack to handle BFC->BFM alias.
6222 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6223 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6224 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6225
6226 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6227 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
6228 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
6229
6230 if (LSBCE && WidthCE) {
6231 uint64_t LSB = LSBCE->getValue();
6232 uint64_t Width = WidthCE->getValue();
6233
6234 uint64_t RegWidth = 0;
6235 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6236 Op1.getReg()))
6237 RegWidth = 64;
6238 else
6239 RegWidth = 32;
6240
6241 if (LSB >= RegWidth)
6242 return Error(LSBOp.getStartLoc(),
6243 "expected integer in range [0, 31]");
6244 if (Width < 1 || Width > RegWidth)
6245 return Error(WidthOp.getStartLoc(),
6246 "expected integer in range [1, 32]");
6247
6248 uint64_t ImmR = 0;
6249 if (RegWidth == 32)
6250 ImmR = (32 - LSB) & 0x1f;
6251 else
6252 ImmR = (64 - LSB) & 0x3f;
6253
6254 uint64_t ImmS = Width - 1;
6255
6256 if (ImmR != 0 && ImmS >= ImmR)
6257 return Error(WidthOp.getStartLoc(),
6258 "requested insert overflows register");
6259
6260 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
6261 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
6262 Operands[0] =
6263 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
6264 Operands[2] = AArch64Operand::CreateReg(
6265 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6266 SMLoc(), SMLoc(), getContext());
6267 Operands[3] = AArch64Operand::CreateImm(
6268 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
6269 Operands.emplace_back(
6270 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6271 WidthOp.getEndLoc(), getContext()));
6272 }
6273 }
6274 } else if (NumOperands == 5) {
6275 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6276 // UBFIZ -> UBFM aliases.
6277 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6278 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6279 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6280 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6281
6282 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6283 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6284 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6285
6286 if (Op3CE && Op4CE) {
6287 uint64_t Op3Val = Op3CE->getValue();
6288 uint64_t Op4Val = Op4CE->getValue();
6289
6290 uint64_t RegWidth = 0;
6291 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6292 Op1.getReg()))
6293 RegWidth = 64;
6294 else
6295 RegWidth = 32;
6296
6297 if (Op3Val >= RegWidth)
6298 return Error(Op3.getStartLoc(),
6299 "expected integer in range [0, 31]");
6300 if (Op4Val < 1 || Op4Val > RegWidth)
6301 return Error(Op4.getStartLoc(),
6302 "expected integer in range [1, 32]");
6303
6304 uint64_t NewOp3Val = 0;
6305 if (RegWidth == 32)
6306 NewOp3Val = (32 - Op3Val) & 0x1f;
6307 else
6308 NewOp3Val = (64 - Op3Val) & 0x3f;
6309
6310 uint64_t NewOp4Val = Op4Val - 1;
6311
6312 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6313 return Error(Op4.getStartLoc(),
6314 "requested insert overflows register");
6315
6316 const MCExpr *NewOp3 =
6317 MCConstantExpr::create(NewOp3Val, getContext());
6318 const MCExpr *NewOp4 =
6319 MCConstantExpr::create(NewOp4Val, getContext());
6320 Operands[3] = AArch64Operand::CreateImm(
6321 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
6322 Operands[4] = AArch64Operand::CreateImm(
6323 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6324 if (Tok == "bfi")
6325 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6326 getContext());
6327 else if (Tok == "sbfiz")
6328 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6329 getContext());
6330 else if (Tok == "ubfiz")
6331 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6332 getContext());
6333 else
6334 llvm_unreachable("No valid mnemonic for alias?");
6335 }
6336 }
6337
6338 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6339 // UBFX -> UBFM aliases.
6340 } else if (NumOperands == 5 &&
6341 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6342 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6343 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6344 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6345
6346 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6347 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6348 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6349
6350 if (Op3CE && Op4CE) {
6351 uint64_t Op3Val = Op3CE->getValue();
6352 uint64_t Op4Val = Op4CE->getValue();
6353
6354 uint64_t RegWidth = 0;
6355 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6356 Op1.getReg()))
6357 RegWidth = 64;
6358 else
6359 RegWidth = 32;
6360
6361 if (Op3Val >= RegWidth)
6362 return Error(Op3.getStartLoc(),
6363 "expected integer in range [0, 31]");
6364 if (Op4Val < 1 || Op4Val > RegWidth)
6365 return Error(Op4.getStartLoc(),
6366 "expected integer in range [1, 32]");
6367
6368 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6369
6370 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6371 return Error(Op4.getStartLoc(),
6372 "requested extract overflows register");
6373
6374 const MCExpr *NewOp4 =
6375 MCConstantExpr::create(NewOp4Val, getContext());
6376 Operands[4] = AArch64Operand::CreateImm(
6377 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6378 if (Tok == "bfxil")
6379 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6380 getContext());
6381 else if (Tok == "sbfx")
6382 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6383 getContext());
6384 else if (Tok == "ubfx")
6385 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6386 getContext());
6387 else
6388 llvm_unreachable("No valid mnemonic for alias?");
6389 }
6390 }
6391 }
6392 }
6393
6394 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6395 // instruction for FP registers correctly in some rare circumstances. Convert
6396 // it to a safe instruction and warn (because silently changing someone's
6397 // assembly is rude).
6398 if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6399 NumOperands == 4 && Tok == "movi") {
6400 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6401 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6402 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6403 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6404 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6405 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6406 if (Suffix.lower() == ".2d" &&
6407 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
6408 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
6409 " correctly on this CPU, converting to equivalent movi.16b");
6410 // Switch the suffix to .16b.
6411 unsigned Idx = Op1.isToken() ? 1 : 2;
6412 Operands[Idx] =
6413 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
6414 }
6415 }
6416 }
6417
6418 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6419 // InstAlias can't quite handle this since the reg classes aren't
6420 // subclasses.
6421 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6422 // The source register can be Wn here, but the matcher expects a
6423 // GPR64. Twiddle it here if necessary.
6424 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6425 if (Op.isScalarReg()) {
6426 unsigned Reg = getXRegFromWReg(Op.getReg());
6427 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6428 Op.getStartLoc(), Op.getEndLoc(),
6429 getContext());
6430 }
6431 }
6432 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6433 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6434 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6435 if (Op.isScalarReg() &&
6436 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6437 Op.getReg())) {
6438 // The source register can be Wn here, but the matcher expects a
6439 // GPR64. Twiddle it here if necessary.
6440 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6441 if (Op.isScalarReg()) {
6442 unsigned Reg = getXRegFromWReg(Op.getReg());
6443 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6444 Op.getStartLoc(),
6445 Op.getEndLoc(), getContext());
6446 }
6447 }
6448 }
6449 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6450 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6451 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6452 if (Op.isScalarReg() &&
6453 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6454 Op.getReg())) {
6455 // The source register can be Wn here, but the matcher expects a
6456 // GPR32. Twiddle it here if necessary.
6457 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6458 if (Op.isScalarReg()) {
6459 unsigned Reg = getWRegFromXReg(Op.getReg());
6460 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6461 Op.getStartLoc(),
6462 Op.getEndLoc(), getContext());
6463 }
6464 }
6465 }
6466
6467 MCInst Inst;
6468 FeatureBitset MissingFeatures;
6469 // First try to match against the secondary set of tables containing the
6470 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6471 unsigned MatchResult =
6472 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6473 MatchingInlineAsm, 1);
6474
6475 // If that fails, try against the alternate table containing long-form NEON:
6476 // "fadd v0.2s, v1.2s, v2.2s"
6477 if (MatchResult != Match_Success) {
6478 // But first, save the short-form match result: we can use it in case the
6479 // long-form match also fails.
6480 auto ShortFormNEONErrorInfo = ErrorInfo;
6481 auto ShortFormNEONMatchResult = MatchResult;
6482 auto ShortFormNEONMissingFeatures = MissingFeatures;
6483
6484 MatchResult =
6485 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6486 MatchingInlineAsm, 0);
6487
6488 // Now, both matches failed, and the long-form match failed on the mnemonic
6489 // suffix token operand. The short-form match failure is probably more
6490 // relevant: use it instead.
6491 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6492 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6493 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6494 MatchResult = ShortFormNEONMatchResult;
6495 ErrorInfo = ShortFormNEONErrorInfo;
6496 MissingFeatures = ShortFormNEONMissingFeatures;
6497 }
6498 }
6499
6500 switch (MatchResult) {
6501 case Match_Success: {
6502 // Perform range checking and other semantic validations
6503 SmallVector<SMLoc, 8> OperandLocs;
6504 NumOperands = Operands.size();
6505 for (unsigned i = 1; i < NumOperands; ++i)
6506 OperandLocs.push_back(Operands[i]->getStartLoc());
6507 if (validateInstruction(Inst, IDLoc, OperandLocs))
6508 return true;
6509
6510 Inst.setLoc(IDLoc);
6511 Out.emitInstruction(Inst, getSTI());
6512 return false;
6513 }
6514 case Match_MissingFeature: {
6515 assert(MissingFeatures.any() && "Unknown missing feature!");
6516 // Special case the error message for the very common case where only
6517 // a single subtarget feature is missing (neon, e.g.).
6518 std::string Msg = "instruction requires:";
6519 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
6520 if (MissingFeatures[i]) {
6521 Msg += " ";
6523 }
6524 }
6525 return Error(IDLoc, Msg);
6526 }
6527 case Match_MnemonicFail:
6528 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
6529 case Match_InvalidOperand: {
6530 SMLoc ErrorLoc = IDLoc;
6531
6532 if (ErrorInfo != ~0ULL) {
6533 if (ErrorInfo >= Operands.size())
6534 return Error(IDLoc, "too few operands for instruction",
6535 SMRange(IDLoc, getTok().getLoc()));
6536
6537 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6538 if (ErrorLoc == SMLoc())
6539 ErrorLoc = IDLoc;
6540 }
6541 // If the match failed on a suffix token operand, tweak the diagnostic
6542 // accordingly.
6543 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
6544 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
6545 MatchResult = Match_InvalidSuffix;
6546
6547 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6548 }
6549 case Match_InvalidTiedOperand:
6550 case Match_InvalidMemoryIndexed1:
6551 case Match_InvalidMemoryIndexed2:
6552 case Match_InvalidMemoryIndexed4:
6553 case Match_InvalidMemoryIndexed8:
6554 case Match_InvalidMemoryIndexed16:
6555 case Match_InvalidCondCode:
6556 case Match_AddSubLSLImm3ShiftLarge:
6557 case Match_AddSubRegExtendSmall:
6558 case Match_AddSubRegExtendLarge:
6559 case Match_AddSubSecondSource:
6560 case Match_LogicalSecondSource:
6561 case Match_AddSubRegShift32:
6562 case Match_AddSubRegShift64:
6563 case Match_InvalidMovImm32Shift:
6564 case Match_InvalidMovImm64Shift:
6565 case Match_InvalidFPImm:
6566 case Match_InvalidMemoryWExtend8:
6567 case Match_InvalidMemoryWExtend16:
6568 case Match_InvalidMemoryWExtend32:
6569 case Match_InvalidMemoryWExtend64:
6570 case Match_InvalidMemoryWExtend128:
6571 case Match_InvalidMemoryXExtend8:
6572 case Match_InvalidMemoryXExtend16:
6573 case Match_InvalidMemoryXExtend32:
6574 case Match_InvalidMemoryXExtend64:
6575 case Match_InvalidMemoryXExtend128:
6576 case Match_InvalidMemoryIndexed1SImm4:
6577 case Match_InvalidMemoryIndexed2SImm4:
6578 case Match_InvalidMemoryIndexed3SImm4:
6579 case Match_InvalidMemoryIndexed4SImm4:
6580 case Match_InvalidMemoryIndexed1SImm6:
6581 case Match_InvalidMemoryIndexed16SImm4:
6582 case Match_InvalidMemoryIndexed32SImm4:
6583 case Match_InvalidMemoryIndexed4SImm7:
6584 case Match_InvalidMemoryIndexed8SImm7:
6585 case Match_InvalidMemoryIndexed16SImm7:
6586 case Match_InvalidMemoryIndexed8UImm5:
6587 case Match_InvalidMemoryIndexed8UImm3:
6588 case Match_InvalidMemoryIndexed4UImm5:
6589 case Match_InvalidMemoryIndexed2UImm5:
6590 case Match_InvalidMemoryIndexed1UImm6:
6591 case Match_InvalidMemoryIndexed2UImm6:
6592 case Match_InvalidMemoryIndexed4UImm6:
6593 case Match_InvalidMemoryIndexed8UImm6:
6594 case Match_InvalidMemoryIndexed16UImm6:
6595 case Match_InvalidMemoryIndexedSImm6:
6596 case Match_InvalidMemoryIndexedSImm5:
6597 case Match_InvalidMemoryIndexedSImm8:
6598 case Match_InvalidMemoryIndexedSImm9:
6599 case Match_InvalidMemoryIndexed16SImm9:
6600 case Match_InvalidMemoryIndexed8SImm10:
6601 case Match_InvalidImm0_0:
6602 case Match_InvalidImm0_1:
6603 case Match_InvalidImm0_3:
6604 case Match_InvalidImm0_7:
6605 case Match_InvalidImm0_15:
6606 case Match_InvalidImm0_31:
6607 case Match_InvalidImm0_63:
6608 case Match_InvalidImm0_127:
6609 case Match_InvalidImm0_255:
6610 case Match_InvalidImm0_65535:
6611 case Match_InvalidImm1_8:
6612 case Match_InvalidImm1_16:
6613 case Match_InvalidImm1_32:
6614 case Match_InvalidImm1_64:
6615 case Match_InvalidMemoryIndexedRange2UImm0:
6616 case Match_InvalidMemoryIndexedRange2UImm1:
6617 case Match_InvalidMemoryIndexedRange2UImm2:
6618 case Match_InvalidMemoryIndexedRange2UImm3:
6619 case Match_InvalidMemoryIndexedRange4UImm0:
6620 case Match_InvalidMemoryIndexedRange4UImm1:
6621 case Match_InvalidMemoryIndexedRange4UImm2:
6622 case Match_InvalidSVEAddSubImm8:
6623 case Match_InvalidSVEAddSubImm16:
6624 case Match_InvalidSVEAddSubImm32:
6625 case Match_InvalidSVEAddSubImm64:
6626 case Match_InvalidSVECpyImm8:
6627 case Match_InvalidSVECpyImm16:
6628 case Match_InvalidSVECpyImm32:
6629 case Match_InvalidSVECpyImm64:
6630 case Match_InvalidIndexRange0_0:
6631 case Match_InvalidIndexRange1_1:
6632 case Match_InvalidIndexRange0_15:
6633 case Match_InvalidIndexRange0_7:
6634 case Match_InvalidIndexRange0_3:
6635 case Match_InvalidIndexRange0_1:
6636 case Match_InvalidSVEIndexRange0_63:
6637 case Match_InvalidSVEIndexRange0_31:
6638 case Match_InvalidSVEIndexRange0_15:
6639 case Match_InvalidSVEIndexRange0_7:
6640 case Match_InvalidSVEIndexRange0_3:
6641 case Match_InvalidLabel:
6642 case Match_InvalidComplexRotationEven:
6643 case Match_InvalidComplexRotationOdd:
6644 case Match_InvalidGPR64shifted8:
6645 case Match_InvalidGPR64shifted16:
6646 case Match_InvalidGPR64shifted32:
6647 case Match_InvalidGPR64shifted64:
6648 case Match_InvalidGPR64shifted128:
6649 case Match_InvalidGPR64NoXZRshifted8:
6650 case Match_InvalidGPR64NoXZRshifted16:
6651 case Match_InvalidGPR64NoXZRshifted32:
6652 case Match_InvalidGPR64NoXZRshifted64:
6653 case Match_InvalidGPR64NoXZRshifted128:
6654 case Match_InvalidZPR32UXTW8:
6655 case Match_InvalidZPR32UXTW16:
6656 case Match_InvalidZPR32UXTW32:
6657 case Match_InvalidZPR32UXTW64:
6658 case Match_InvalidZPR32SXTW8:
6659 case Match_InvalidZPR32SXTW16:
6660 case Match_InvalidZPR32SXTW32:
6661 case Match_InvalidZPR32SXTW64:
6662 case Match_InvalidZPR64UXTW8:
6663 case Match_InvalidZPR64SXTW8:
6664 case Match_InvalidZPR64UXTW16:
6665 case Match_InvalidZPR64SXTW16:
6666 case Match_InvalidZPR64UXTW32:
6667 case Match_InvalidZPR64SXTW32:
6668 case Match_InvalidZPR64UXTW64:
6669 case Match_InvalidZPR64SXTW64:
6670 case Match_InvalidZPR32LSL8:
6671 case Match_InvalidZPR32LSL16:
6672 case Match_InvalidZPR32LSL32:
6673 case Match_InvalidZPR32LSL64:
6674 case Match_InvalidZPR64LSL8:
6675 case Match_InvalidZPR64LSL16:
6676 case Match_InvalidZPR64LSL32:
6677 case Match_InvalidZPR64LSL64:
6678 case Match_InvalidZPR0:
6679 case Match_InvalidZPR8:
6680 case Match_InvalidZPR16:
6681 case Match_InvalidZPR32:
6682 case Match_InvalidZPR64:
6683 case Match_InvalidZPR128:
6684 case Match_InvalidZPR_3b8:
6685 case Match_InvalidZPR_3b16:
6686 case Match_InvalidZPR_3b32:
6687 case Match_InvalidZPR_4b8:
6688 case Match_InvalidZPR_4b16:
6689 case Match_InvalidZPR_4b32:
6690 case Match_InvalidZPR_4b64:
6691 case Match_InvalidSVEPPRorPNRAnyReg:
6692 case Match_InvalidSVEPPRorPNRBReg:
6693 case Match_InvalidSVEPredicateAnyReg:
6694 case Match_InvalidSVEPattern:
6695 case Match_InvalidSVEVecLenSpecifier:
6696 case Match_InvalidSVEPredicateBReg:
6697 case Match_InvalidSVEPredicateHReg:
6698 case Match_InvalidSVEPredicateSReg:
6699 case Match_InvalidSVEPredicateDReg:
6700 case Match_InvalidSVEPredicate3bAnyReg:
6701 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6702 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6703 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6704 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6705 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6706 case Match_InvalidSVEPNPredicateBReg:
6707 case Match_InvalidSVEPNPredicateHReg:
6708 case Match_InvalidSVEPNPredicateSReg:
6709 case Match_InvalidSVEPNPredicateDReg:
6710 case Match_InvalidSVEPredicateListMul2x8:
6711 case Match_InvalidSVEPredicateListMul2x16:
6712 case Match_InvalidSVEPredicateListMul2x32:
6713 case Match_InvalidSVEPredicateListMul2x64:
6714 case Match_InvalidSVEExactFPImmOperandHalfOne:
6715 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6716 case Match_InvalidSVEExactFPImmOperandZeroOne:
6717 case Match_InvalidMatrixTile32:
6718 case Match_InvalidMatrixTile64:
6719 case Match_InvalidMatrix:
6720 case Match_InvalidMatrix8:
6721 case Match_InvalidMatrix16:
6722 case Match_InvalidMatrix32:
6723 case Match_InvalidMatrix64:
6724 case Match_InvalidMatrixTileVectorH8:
6725 case Match_InvalidMatrixTileVectorH16:
6726 case Match_InvalidMatrixTileVectorH32:
6727 case Match_InvalidMatrixTileVectorH64:
6728 case Match_InvalidMatrixTileVectorH128:
6729 case Match_InvalidMatrixTileVectorV8:
6730 case Match_InvalidMatrixTileVectorV16:
6731 case Match_InvalidMatrixTileVectorV32:
6732 case Match_InvalidMatrixTileVectorV64:
6733 case Match_InvalidMatrixTileVectorV128:
6734 case Match_InvalidSVCR:
6735 case Match_InvalidMatrixIndexGPR32_12_15:
6736 case Match_InvalidMatrixIndexGPR32_8_11:
6737 case Match_InvalidLookupTable:
6738 case Match_InvalidSVEVectorListMul2x8:
6739 case Match_InvalidSVEVectorListMul2x16:
6740 case Match_InvalidSVEVectorListMul2x32:
6741 case Match_InvalidSVEVectorListMul2x64:
6742 case Match_InvalidSVEVectorListMul4x8:
6743 case Match_InvalidSVEVectorListMul4x16:
6744 case Match_InvalidSVEVectorListMul4x32:
6745 case Match_InvalidSVEVectorListMul4x64:
6746 case Match_InvalidSVEVectorListStrided2x8:
6747 case Match_InvalidSVEVectorListStrided2x16:
6748 case Match_InvalidSVEVectorListStrided2x32:
6749 case Match_InvalidSVEVectorListStrided2x64:
6750 case Match_InvalidSVEVectorListStrided4x8:
6751 case Match_InvalidSVEVectorListStrided4x16:
6752 case Match_InvalidSVEVectorListStrided4x32:
6753 case Match_InvalidSVEVectorListStrided4x64:
6754 case Match_MSR:
6755 case Match_MRS: {
6756 if (ErrorInfo >= Operands.size())
6757 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
6758 // Any time we get here, there's nothing fancy to do. Just get the
6759 // operand SMLoc and display the diagnostic.
6760 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6761 if (ErrorLoc == SMLoc())
6762 ErrorLoc = IDLoc;
6763 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6764 }
6765 }
6766
6767 llvm_unreachable("Implement any new match types added!");
6768}
6769
6770/// ParseDirective parses the arm specific directives
6771bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
6772 const MCContext::Environment Format = getContext().getObjectFileType();
6773 bool IsMachO = Format == MCContext::IsMachO;
6774 bool IsCOFF = Format == MCContext::IsCOFF;
6775
6776 auto IDVal = DirectiveID.getIdentifier().lower();
6777 SMLoc Loc = DirectiveID.getLoc();
6778 if (IDVal == ".arch")
6779 parseDirectiveArch(Loc);
6780 else if (IDVal == ".cpu")
6781 parseDirectiveCPU(Loc);
6782 else if (IDVal == ".tlsdesccall")
6783 parseDirectiveTLSDescCall(Loc);
6784 else if (IDVal == ".ltorg" || IDVal == ".pool")
6785 parseDirectiveLtorg(Loc);
6786 else if (IDVal == ".unreq")
6787 parseDirectiveUnreq(Loc);
6788 else if (IDVal == ".inst")
6789 parseDirectiveInst(Loc);
6790 else if (IDVal == ".cfi_negate_ra_state")
6791 parseDirectiveCFINegateRAState();
6792 else if (IDVal == ".cfi_b_key_frame")
6793 parseDirectiveCFIBKeyFrame();
6794 else if (IDVal == ".cfi_mte_tagged_frame")
6795 parseDirectiveCFIMTETaggedFrame();
6796 else if (IDVal == ".arch_extension")
6797 parseDirectiveArchExtension(Loc);
6798 else if (IDVal == ".variant_pcs")
6799 parseDirectiveVariantPCS(Loc);
6800 else if (IsMachO) {
6801 if (IDVal == MCLOHDirectiveName())
6802 parseDirectiveLOH(IDVal, Loc);
6803 else
6804 return true;
6805 } else if (IsCOFF) {
6806 if (IDVal == ".seh_stackalloc")
6807 parseDirectiveSEHAllocStack(Loc);
6808 else if (IDVal == ".seh_endprologue")
6809 parseDirectiveSEHPrologEnd(Loc);
6810 else if (IDVal == ".seh_save_r19r20_x")
6811 parseDirectiveSEHSaveR19R20X(Loc);
6812 else if (IDVal == ".seh_save_fplr")
6813 parseDirectiveSEHSaveFPLR(Loc);
6814 else if (IDVal == ".seh_save_fplr_x")
6815 parseDirectiveSEHSaveFPLRX(Loc);
6816 else if (IDVal == ".seh_save_reg")
6817 parseDirectiveSEHSaveReg(Loc);
6818 else if (IDVal == ".seh_save_reg_x")
6819 parseDirectiveSEHSaveRegX(Loc);
6820 else if (IDVal == ".seh_save_regp")
6821 parseDirectiveSEHSaveRegP(Loc);
6822 else if (IDVal == ".seh_save_regp_x")
6823 parseDirectiveSEHSaveRegPX(Loc);
6824 else if (IDVal == ".seh_save_lrpair")
6825 parseDirectiveSEHSaveLRPair(Loc);
6826 else if (IDVal == ".seh_save_freg")
6827 parseDirectiveSEHSaveFReg(Loc);
6828 else if (IDVal == ".seh_save_freg_x")
6829 parseDirectiveSEHSaveFRegX(Loc);
6830 else if (IDVal == ".seh_save_fregp")
6831 parseDirectiveSEHSaveFRegP(Loc);
6832 else if (IDVal == ".seh_save_fregp_x")
6833 parseDirectiveSEHSaveFRegPX(Loc);
6834 else if (IDVal == ".seh_set_fp")
6835 parseDirectiveSEHSetFP(Loc);
6836 else if (IDVal == ".seh_add_fp")
6837 parseDirectiveSEHAddFP(Loc);
6838 else if (IDVal == ".seh_nop")
6839 parseDirectiveSEHNop(Loc);
6840 else if (IDVal == ".seh_save_next")
6841 parseDirectiveSEHSaveNext(Loc);
6842 else if (IDVal == ".seh_startepilogue")
6843 parseDirectiveSEHEpilogStart(Loc);
6844 else if (IDVal == ".seh_endepilogue")
6845 parseDirectiveSEHEpilogEnd(Loc);
6846 else if (IDVal == ".seh_trap_frame")
6847 parseDirectiveSEHTrapFrame(Loc);
6848 else if (IDVal == ".seh_pushframe")
6849 parseDirectiveSEHMachineFrame(Loc);
6850 else if (IDVal == ".seh_context")
6851 parseDirectiveSEHContext(Loc);
6852 else if (IDVal == ".seh_ec_context")
6853 parseDirectiveSEHECContext(Loc);
6854 else if (IDVal == ".seh_clear_unwound_to_call")
6855 parseDirectiveSEHClearUnwoundToCall(Loc);
6856 else if (IDVal == ".seh_pac_sign_lr")
6857 parseDirectiveSEHPACSignLR(Loc);
6858 else if (IDVal == ".seh_save_any_reg")
6859 parseDirectiveSEHSaveAnyReg(Loc, false, false);
6860 else if (IDVal == ".seh_save_any_reg_p")
6861 parseDirectiveSEHSaveAnyReg(Loc, true, false);
6862 else if (IDVal == ".seh_save_any_reg_x")
6863 parseDirectiveSEHSaveAnyReg(Loc, false, true);
6864 else if (IDVal == ".seh_save_any_reg_px")
6865 parseDirectiveSEHSaveAnyReg(Loc, true, true);
6866 else
6867 return true;
6868 } else
6869 return true;
6870 return false;
6871}
6872
6873static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
6874 SmallVector<StringRef, 4> &RequestedExtensions) {
6875 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
6876 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
6877
6878 if (!NoCrypto && Crypto) {
6879 // Map 'generic' (and others) to sha2 and aes, because
6880 // that was the traditional meaning of crypto.
6881 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
6882 ArchInfo == AArch64::ARMV8_3A) {
6883 RequestedExtensions.push_back("sha2");
6884 RequestedExtensions.push_back("aes");
6885 }
6886 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
6887 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
6888 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
6889 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
6890 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
6891 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
6892 RequestedExtensions.push_back("sm4");
6893 RequestedExtensions.push_back("sha3");
6894 RequestedExtensions.push_back("sha2");
6895 RequestedExtensions.push_back("aes");
6896 }
6897 } else if (NoCrypto) {
6898 // Map 'generic' (and others) to sha2 and aes, because
6899 // that was the traditional meaning of crypto.
6900 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
6901 ArchInfo == AArch64::ARMV8_3A) {
6902 RequestedExtensions.push_back("nosha2");
6903 RequestedExtensions.push_back("noaes");
6904 }
6905 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
6906 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
6907 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
6908 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
6909 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
6910 ArchInfo == AArch64::ARMV9_4A) {
6911 RequestedExtensions.push_back("nosm4");
6912 RequestedExtensions.push_back("nosha3");
6913 RequestedExtensions.push_back("nosha2");
6914 RequestedExtensions.push_back("noaes");
6915 }
6916 }
6917}
6918
6919/// parseDirectiveArch
6920/// ::= .arch token
6921bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
6922 SMLoc ArchLoc = getLoc();
6923
6924 StringRef Arch, ExtensionString;
6925 std::tie(Arch, ExtensionString) =
6926 getParser().parseStringToEndOfStatement().trim().split('+');
6927
6928 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
6929 if (!ArchInfo)
6930 return Error(ArchLoc, "unknown arch name");
6931
6932 if (parseToken(AsmToken::EndOfStatement))
6933 return true;
6934
6935 // Get the architecture and extension features.
6936 std::vector<StringRef> AArch64Features;
6937 AArch64Features.push_back(ArchInfo->ArchFeature);
6938 AArch64::getExtensionFeatures(ArchInfo->DefaultExts, AArch64Features);
6939
6940 MCSubtargetInfo &STI = copySTI();
6941 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
6942 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
6943 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
6944
6945 SmallVector<StringRef, 4> RequestedExtensions;
6946 if (!ExtensionString.empty())
6947 ExtensionString.split(RequestedExtensions, '+');
6948
6949 ExpandCryptoAEK(*ArchInfo, RequestedExtensions);
6950
6951 FeatureBitset Features = STI.getFeatureBits();
6952 setAvailableFeatures(ComputeAvailableFeatures(Features));
6953 for (auto Name : RequestedExtensions) {
6954 bool EnableFeature = !Name.consume_front_insensitive("no");
6955
6956 for (const auto &Extension : ExtensionMap) {
6957 if (Extension.Name != Name)
6958 continue;
6959
6960 if (Extension.Features.none())
6961 report_fatal_error("unsupported architectural extension: " + Name);
6962
6963 FeatureBitset ToggleFeatures =
6964 EnableFeature
6966 : STI.ToggleFeature(Features & Extension.Features);
6967 setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
6968 break;
6969 }
6970 }
6971 return false;
6972}
6973
6974/// parseDirectiveArchExtension
6975/// ::= .arch_extension [no]feature
6976bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
6977 SMLoc ExtLoc = getLoc();
6978
6979 StringRef Name = getParser().parseStringToEndOfStatement().trim();
6980
6981 if (parseEOL())
6982 return true;
6983
6984 bool EnableFeature = true;
6985 if (Name.starts_with_insensitive("no")) {
6986 EnableFeature = false;
6987 Name = Name.substr(2);
6988 }
6989
6990 MCSubtargetInfo &STI = copySTI();
6991 FeatureBitset Features = STI.getFeatureBits();
6992 for (const auto &Extension : ExtensionMap) {
6993 if (Extension.Name != Name)
6994 continue;
6995
6996 if (Extension.Features.none())
6997 return Error(ExtLoc, "unsupported architectural extension: " + Name);
6998
6999 FeatureBitset ToggleFeatures =
7000 EnableFeature
7002 : STI.ToggleFeature(Features & Extension.Features);
7003 setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
7004 return false;
7005 }
7006
7007 return Error(ExtLoc, "unknown architectural extension: " + Name);
7008}
7009
7011 return SMLoc::getFromPointer(L.getPointer() + Offset);
7012}
7013
7014/// parseDirectiveCPU
7015/// ::= .cpu id
7016bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7017 SMLoc CurLoc = getLoc();
7018
7019 StringRef CPU, ExtensionString;
7020 std::tie(CPU, ExtensionString) =
7021 getParser().parseStringToEndOfStatement().trim().split('+');
7022
7023 if (parseToken(AsmToken::EndOfStatement))
7024 return true;
7025
7026 SmallVector<StringRef, 4> RequestedExtensions;
7027 if (!ExtensionString.empty())
7028 ExtensionString.split(RequestedExtensions, '+');
7029
7031 if (!CpuArch) {
7032 Error(CurLoc, "unknown CPU name");
7033 return false;
7034 }
7035 ExpandCryptoAEK(*CpuArch, RequestedExtensions);
7036
7037 MCSubtargetInfo &STI = copySTI();
7038 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
7039 CurLoc = incrementLoc(CurLoc, CPU.size());
7040
7041 for (auto Name : RequestedExtensions) {
7042 // Advance source location past '+'.
7043 CurLoc = incrementLoc(CurLoc, 1);
7044
7045 bool EnableFeature = !Name.consume_front_insensitive("no");
7046
7047 bool FoundExtension = false;
7048 for (const auto &Extension : ExtensionMap) {
7049 if (Extension.Name != Name)
7050 continue;
7051
7052 if (Extension.Features.none())
7053 report_fatal_error("unsupported architectural extension: " + Name);
7054
7055 FeatureBitset Features = STI.getFeatureBits();
7056 FeatureBitset ToggleFeatures =
7057 EnableFeature
7059 : STI.ToggleFeature(Features & Extension.Features);
7060 setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
7061 FoundExtension = true;
7062
7063 break;
7064 }
7065
7066 if (!FoundExtension)
7067 Error(CurLoc, "unsupported architectural extension");
7068
7069 CurLoc = incrementLoc(CurLoc, Name.size());
7070 }
7071 return false;
7072}
7073
7074/// parseDirectiveInst
7075/// ::= .inst opcode [, ...]
7076bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7077 if (getLexer().is(AsmToken::EndOfStatement))
7078 return Error(Loc, "expected expression following '.inst' directive");
7079
7080 auto parseOp = [&]() -> bool {
7081 SMLoc L = getLoc();
7082 const MCExpr *Expr = nullptr;
7083 if (check(getParser().parseExpression(Expr), L, "expected expression"))
7084 return true;
7085 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
7086 if (check(!Value, L, "expected constant expression"))
7087 return true;
7088 getTargetStreamer().emitInst(Value->getValue());
7089 return false;
7090 };
7091
7092 return parseMany(parseOp);
7093}
7094
7095// parseDirectiveTLSDescCall:
7096// ::= .tlsdesccall symbol
7097bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7099 if (check(getParser().parseIdentifier(Name), L, "expected symbol") ||
7100 parseToken(AsmToken::EndOfStatement))
7101 return true;
7102
7103 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7104 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
7105 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
7106
7107 MCInst Inst;
7108 Inst.setOpcode(AArch64::TLSDESCCALL);
7110
7111 getParser().getStreamer().emitInstruction(Inst, getSTI());
7112 return false;
7113}
7114
7115/// ::= .loh <lohName | lohId> label1, ..., labelN
7116/// The number of arguments depends on the loh identifier.
7117bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7119 if (getTok().isNot(AsmToken::Identifier)) {
7120 if (getTok().isNot(AsmToken::Integer))
7121 return TokError("expected an identifier or a number in directive");
7122 // We successfully get a numeric value for the identifier.
7123 // Check if it is valid.
7124 int64_t Id = getTok().getIntVal();
7125 if (Id <= -1U && !isValidMCLOHType(Id))
7126 return TokError("invalid numeric identifier in directive");
7127 Kind = (MCLOHType)Id;
7128 } else {
7129 StringRef Name = getTok().getIdentifier();
7130 // We successfully parse an identifier.
7131 // Check if it is a recognized one.
7132 int Id = MCLOHNameToId(Name);
7133
7134 if (Id == -1)
7135 return TokError("invalid identifier in directive");
7136 Kind = (MCLOHType)Id;
7137 }
7138 // Consume the identifier.
7139 Lex();
7140 // Get the number of arguments of this LOH.
7141 int NbArgs = MCLOHIdToNbArgs(Kind);
7142
7143 assert(NbArgs != -1 && "Invalid number of arguments");
7144
7146 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7148 if (getParser().parseIdentifier(Name))
7149 return TokError("expected identifier in directive");
7150 Args.push_back(getContext().getOrCreateSymbol(Name));
7151
7152 if (Idx + 1 == NbArgs)
7153 break;
7154 if (parseComma())
7155 return true;
7156 }
7157 if (parseEOL())
7158 return true;
7159
7160 getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
7161 return false;
7162}
7163
7164/// parseDirectiveLtorg
7165/// ::= .ltorg | .pool
7166bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7167 if (parseEOL())
7168 return true;
7169 getTargetStreamer().emitCurrentConstantPool();
7170 return false;
7171}
7172
7173/// parseDirectiveReq
7174/// ::= name .req registername
7175bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7176 Lex(); // Eat the '.req' token.
7177 SMLoc SRegLoc = getLoc();
7178 RegKind RegisterKind = RegKind::Scalar;
7179 MCRegister RegNum;
7180 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7181
7182 if (!ParseRes.isSuccess()) {
7184 RegisterKind = RegKind::NeonVector;
7185 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7186
7187 if (ParseRes.isFailure())
7188 return true;
7189
7190 if (ParseRes.isSuccess() && !Kind.empty())
7191 return Error(SRegLoc, "vector register without type specifier expected");
7192 }
7193
7194 if (!ParseRes.isSuccess()) {
7196 RegisterKind = RegKind::SVEDataVector;
7197 ParseRes =
7198 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7199
7200 if (ParseRes.isFailure())
7201 return true;
7202
7203 if (ParseRes.isSuccess() && !Kind.empty())
7204 return Error(SRegLoc,
7205 "sve vector register without type specifier expected");
7206 }
7207
7208 if (!ParseRes.isSuccess()) {
7210 RegisterKind = RegKind::SVEPredicateVector;
7211 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7212
7213 if (ParseRes.isFailure())
7214 return true;
7215
7216 if (ParseRes.isSuccess() && !Kind.empty())
7217 return Error(SRegLoc,
7218 "sve predicate register without type specifier expected");
7219 }
7220
7221 if (!ParseRes.isSuccess())
7222 return Error(SRegLoc, "register name or alias expected");
7223
7224 // Shouldn't be anything else.
7225 if (parseEOL())
7226 return true;
7227
7228 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
7229 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
7230 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
7231
7232 return false;
7233}
7234
7235/// parseDirectiveUneq
7236/// ::= .unreq registername
7237bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7238 if (getTok().isNot(AsmToken::Identifier))
7239 return TokError("unexpected input in .unreq directive.");
7240 RegisterReqs.erase(getTok().getIdentifier().lower());
7241 Lex(); // Eat the identifier.
7242 return parseToken(AsmToken::EndOfStatement);
7243}
7244
7245bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7246 if (parseEOL())
7247 return true;
7248 getStreamer().emitCFINegateRAState();
7249 return false;
7250}
7251
7252/// parseDirectiveCFIBKeyFrame
7253/// ::= .cfi_b_key
7254bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7255 if (parseEOL())
7256 return true;
7257 getStreamer().emitCFIBKeyFrame();
7258 return false;
7259}
7260
7261/// parseDirectiveCFIMTETaggedFrame
7262/// ::= .cfi_mte_tagged_frame
7263bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7264 if (parseEOL())
7265 return true;
7266 getStreamer().emitCFIMTETaggedFrame();
7267 return false;
7268}
7269
7270/// parseDirectiveVariantPCS
7271/// ::= .variant_pcs symbolname
7272bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7274 if (getParser().parseIdentifier(Name))
7275 return TokError("expected symbol name");
7276 if (parseEOL())
7277 return true;
7278 getTargetStreamer().emitDirectiveVariantPCS(
7279 getContext().getOrCreateSymbol(Name));
7280 return false;
7281}
7282
7283/// parseDirectiveSEHAllocStack
7284/// ::= .seh_stackalloc
7285bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7286 int64_t Size;
7287 if (parseImmExpr(Size))
7288 return true;
7289 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7290 return false;
7291}
7292
7293/// parseDirectiveSEHPrologEnd
7294/// ::= .seh_endprologue
7295bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7296 getTargetStreamer().emitARM64WinCFIPrologEnd();
7297 return false;
7298}
7299
7300/// parseDirectiveSEHSaveR19R20X
7301/// ::= .seh_save_r19r20_x
7302bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7303 int64_t Offset;
7304 if (parseImmExpr(Offset))
7305 return true;
7306 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7307 return false;
7308}
7309
7310/// parseDirectiveSEHSaveFPLR
7311/// ::= .seh_save_fplr
7312bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7313 int64_t Offset;
7314 if (parseImmExpr(Offset))
7315 return true;
7316 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7317 return false;
7318}
7319
7320/// parseDirectiveSEHSaveFPLRX
7321/// ::= .seh_save_fplr_x
7322bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7323 int64_t Offset;
7324 if (parseImmExpr(Offset))
7325 return true;
7326 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7327 return false;
7328}
7329
7330/// parseDirectiveSEHSaveReg
7331/// ::= .seh_save_reg
7332bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7333 unsigned Reg;
7334 int64_t Offset;
7335 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7336 parseComma() || parseImmExpr(Offset))
7337 return true;
7338 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7339 return false;
7340}
7341
7342/// parseDirectiveSEHSaveRegX
7343/// ::= .seh_save_reg_x
7344bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7345 unsigned Reg;
7346 int64_t Offset;
7347 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7348 parseComma() || parseImmExpr(Offset))
7349 return true;
7350 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7351 return false;
7352}
7353
7354/// parseDirectiveSEHSaveRegP
7355/// ::= .seh_save_regp
7356bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7357 unsigned Reg;
7358 int64_t Offset;
7359 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7360 parseComma() || parseImmExpr(Offset))
7361 return true;
7362 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7363 return false;
7364}
7365
7366/// parseDirectiveSEHSaveRegPX
7367/// ::= .seh_save_regp_x
7368bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7369 unsigned Reg;
7370 int64_t Offset;
7371 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7372 parseComma() || parseImmExpr(Offset))
7373 return true;
7374 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7375 return false;
7376}
7377
7378/// parseDirectiveSEHSaveLRPair
7379/// ::= .seh_save_lrpair
7380bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7381 unsigned Reg;
7382 int64_t Offset;
7383 L = getLoc();
7384 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7385 parseComma() || parseImmExpr(Offset))
7386 return true;
7387 if (check(((Reg - 19) % 2 != 0), L,
7388 "expected register with even offset from x19"))
7389 return true;
7390 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7391 return false;
7392}
7393
7394/// parseDirectiveSEHSaveFReg
7395/// ::= .seh_save_freg
7396bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7397 unsigned Reg;
7398 int64_t Offset;
7399 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7400 parseComma() || parseImmExpr(Offset))
7401 return true;
7402 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7403 return false;
7404}
7405
7406/// parseDirectiveSEHSaveFRegX
7407/// ::= .seh_save_freg_x
7408bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7409 unsigned Reg;
7410 int64_t Offset;
7411 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7412 parseComma() || parseImmExpr(Offset))
7413 return true;
7414 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7415 return false;
7416}
7417
7418/// parseDirectiveSEHSaveFRegP
7419/// ::= .seh_save_fregp
7420bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7421 unsigned Reg;
7422 int64_t Offset;
7423 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7424 parseComma() || parseImmExpr(Offset))
7425 return true;
7426 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7427 return false;
7428}
7429
7430/// parseDirectiveSEHSaveFRegPX
7431/// ::= .seh_save_fregp_x
7432bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7433 unsigned Reg;
7434 int64_t Offset;
7435 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7436 parseComma() || parseImmExpr(Offset))
7437 return true;
7438 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7439 return false;
7440}
7441
7442/// parseDirectiveSEHSetFP
7443/// ::= .seh_set_fp
7444bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7445 getTargetStreamer().emitARM64WinCFISetFP();
7446 return false;
7447}
7448
7449/// parseDirectiveSEHAddFP
7450/// ::= .seh_add_fp
7451bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7452 int64_t Size;
7453 if (parseImmExpr(Size))
7454 return true;
7455 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7456 return false;
7457}
7458
7459/// parseDirectiveSEHNop
7460/// ::= .seh_nop
7461bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7462 getTargetStreamer().emitARM64WinCFINop();
7463 return false;
7464}
7465
7466/// parseDirectiveSEHSaveNext
7467/// ::= .seh_save_next
7468bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7469 getTargetStreamer().emitARM64WinCFISaveNext();
7470 return false;
7471}
7472
7473/// parseDirectiveSEHEpilogStart
7474/// ::= .seh_startepilogue
7475bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7476 getTargetStreamer().emitARM64WinCFIEpilogStart();
7477 return false;
7478}
7479
7480/// parseDirectiveSEHEpilogEnd
7481/// ::= .seh_endepilogue
7482bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
7483 getTargetStreamer().emitARM64WinCFIEpilogEnd();
7484 return false;
7485}
7486
7487/// parseDirectiveSEHTrapFrame
7488/// ::= .seh_trap_frame
7489bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
7490 getTargetStreamer().emitARM64WinCFITrapFrame();
7491 return false;
7492}
7493
7494/// parseDirectiveSEHMachineFrame
7495/// ::= .seh_pushframe
7496bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
7497 getTargetStreamer().emitARM64WinCFIMachineFrame();
7498 return false;
7499}
7500
7501/// parseDirectiveSEHContext
7502/// ::= .seh_context
7503bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
7504 getTargetStreamer().emitARM64WinCFIContext();
7505 return false;
7506}
7507
7508/// parseDirectiveSEHECContext
7509/// ::= .seh_ec_context
7510bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
7511 getTargetStreamer().emitARM64WinCFIECContext();
7512 return false;
7513}
7514
7515/// parseDirectiveSEHClearUnwoundToCall
7516/// ::= .seh_clear_unwound_to_call
7517bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
7518 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
7519 return false;
7520}
7521
7522/// parseDirectiveSEHPACSignLR
7523/// ::= .seh_pac_sign_lr
7524bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
7525 getTargetStreamer().emitARM64WinCFIPACSignLR();
7526 return false;
7527}
7528
7529/// parseDirectiveSEHSaveAnyReg
7530/// ::= .seh_save_any_reg
7531/// ::= .seh_save_any_reg_p
7532/// ::= .seh_save_any_reg_x
7533/// ::= .seh_save_any_reg_px
7534bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
7535 bool Writeback) {
7537 SMLoc Start, End;
7538 int64_t Offset;
7539 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") ||
7540 parseComma() || parseImmExpr(Offset))
7541 return true;
7542
7543 if (Reg == AArch64::FP || Reg == AArch64::LR ||
7544 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
7545 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7546 return Error(L, "invalid save_any_reg offset");
7547 unsigned EncodedReg;
7548 if (Reg == AArch64::FP)
7549 EncodedReg = 29;
7550 else if (Reg == AArch64::LR)
7551 EncodedReg = 30;
7552 else
7553 EncodedReg = Reg - AArch64::X0;
7554 if (Paired) {
7555 if (Reg == AArch64::LR)
7556 return Error(Start, "lr cannot be paired with another register");
7557 if (Writeback)
7558 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset);
7559 else
7560 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset);
7561 } else {
7562 if (Writeback)
7563 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset);
7564 else
7565 getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset);
7566 }
7567 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
7568 unsigned EncodedReg = Reg - AArch64::D0;
7569 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7570 return Error(L, "invalid save_any_reg offset");
7571 if (Paired) {
7572 if (Reg == AArch64::D31)
7573 return Error(Start, "d31 cannot be paired with another register");
7574 if (Writeback)
7575 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset);
7576 else
7577 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset);
7578 } else {
7579 if (Writeback)
7580 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset);
7581 else
7582 getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset);
7583 }
7584 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
7585 unsigned EncodedReg = Reg - AArch64::Q0;
7586 if (Offset < 0 || Offset % 16)
7587 return Error(L, "invalid save_any_reg offset");
7588 if (Paired) {
7589 if (Reg == AArch64::Q31)
7590 return Error(Start, "q31 cannot be paired with another register");
7591 if (Writeback)
7592 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset);
7593 else
7594 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset);
7595 } else {
7596 if (Writeback)
7597 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset);
7598 else
7599 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset);
7600 }
7601 } else {
7602 return Error(Start, "save_any_reg register must be x, q or d register");
7603 }
7604 return false;
7605}
7606
7607bool AArch64AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
7608 // Try @AUTH expressions: they're more complex than the usual symbol variants.
7609 if (!parseAuthExpr(Res, EndLoc))
7610 return false;
7611 return getParser().parsePrimaryExpr(Res, EndLoc, nullptr);
7612}
7613
7614/// parseAuthExpr
7615/// ::= _sym@AUTH(ib,123[,addr])
7616/// ::= (_sym + 5)@AUTH(ib,123[,addr])
7617/// ::= (_sym - 5)@AUTH(ib,123[,addr])
7618bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
7619 MCAsmParser &Parser = getParser();
7620 MCContext &Ctx = getContext();
7621
7622 AsmToken Tok = Parser.getTok();
7623
7624 // Look for '_sym@AUTH' ...
7625 if (Tok.is(AsmToken::Identifier) && Tok.getIdentifier().ends_with("@AUTH")) {
7626 StringRef SymName = Tok.getIdentifier().drop_back(strlen("@AUTH"));
7627 if (SymName.contains('@'))
7628 return TokError(
7629 "combination of @AUTH with other modifiers not supported");
7630 Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx);
7631
7632 Parser.Lex(); // Eat the identifier.
7633 } else {
7634 // ... or look for a more complex symbol reference, such as ...
7636
7637 // ... '"_long sym"@AUTH' ...
7638 if (Tok.is(AsmToken::String))
7639 Tokens.resize(2);
7640 // ... or '(_sym + 5)@AUTH'.
7641 else if (Tok.is(AsmToken::LParen))
7642 Tokens.resize(6);
7643 else
7644 return true;
7645
7646 if (Parser.getLexer().peekTokens(Tokens) != Tokens.size())
7647 return true;
7648
7649 // In either case, the expression ends with '@' 'AUTH'.
7650 if (Tokens[Tokens.size() - 2].isNot(AsmToken::At) ||
7651 Tokens[Tokens.size() - 1].isNot(AsmToken::Identifier) ||
7652 Tokens[Tokens.size() - 1].getIdentifier() != "AUTH")
7653 return true;
7654
7655 if (Tok.is(AsmToken::String)) {
7656 StringRef SymName;
7657 if (Parser.parseIdentifier(SymName))
7658 return true;
7659 Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx);
7660 } else {
7661 if (Parser.parsePrimaryExpr(Res, EndLoc, nullptr))
7662 return true;
7663 }
7664
7665 Parser.Lex(); // '@'
7666 Parser.Lex(); // 'AUTH'
7667 }
7668
7669 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
7670 if (parseToken(AsmToken::LParen, "expected '('"))
7671 return true;
7672
7673 if (Parser.getTok().isNot(AsmToken::Identifier))
7674 return TokError("expected key name");
7675
7676 StringRef KeyStr = Parser.getTok().getIdentifier();
7677 auto KeyIDOrNone = AArch64StringToPACKeyID(KeyStr);
7678 if (!KeyIDOrNone)
7679 return TokError("invalid key '" + KeyStr + "'");
7680 Parser.Lex();
7681
7682 if (parseToken(AsmToken::Comma, "expected ','"))
7683 return true;
7684
7685 if (Parser.getTok().isNot(AsmToken::Integer))
7686 return TokError("expected integer discriminator");
7687 int64_t Discriminator = Parser.getTok().getIntVal();
7688
7689 if (!isUInt<16>(Discriminator))
7690 return TokError("integer discriminator " + Twine(Discriminator) +
7691 " out of range [0, 0xFFFF]");
7692 Parser.Lex();
7693
7694 bool UseAddressDiversity = false;
7695 if (Parser.getTok().is(AsmToken::Comma)) {
7696 Parser.Lex();
7697 if (Parser.getTok().isNot(AsmToken::Identifier) ||
7698 Parser.getTok().getIdentifier() != "addr")
7699 return TokError("expected 'addr'");
7700 UseAddressDiversity = true;
7701 Parser.Lex();
7702 }
7703
7704 EndLoc = Parser.getTok().getEndLoc();
7705 if (parseToken(AsmToken::RParen, "expected ')'"))
7706 return true;
7707
7708 Res = AArch64AuthMCExpr::create(Res, Discriminator, *KeyIDOrNone,
7709 UseAddressDiversity, Ctx);
7710 return false;
7711}
7712
7713bool
7714AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
7715 AArch64MCExpr::VariantKind &ELFRefKind,
7716 MCSymbolRefExpr::VariantKind &DarwinRefKind,
7717 int64_t &Addend) {
7718 ELFRefKind = AArch64MCExpr::VK_INVALID;
7719 DarwinRefKind = MCSymbolRefExpr::VK_None;
7720 Addend = 0;
7721
7722 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
7723 ELFRefKind = AE->getKind();
7724 Expr = AE->getSubExpr();
7725 }
7726
7727 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
7728 if (SE) {
7729 // It's a simple symbol reference with no addend.
7730 DarwinRefKind = SE->getKind();
7731 return true;
7732 }
7733
7734 // Check that it looks like a symbol + an addend
7735 MCValue Res;
7736 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
7737 if (!Relocatable || Res.getSymB())
7738 return false;
7739
7740 // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
7741 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
7742 if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
7743 return false;
7744
7745 if (Res.getSymA())
7746 DarwinRefKind = Res.getSymA()->getKind();
7747 Addend = Res.getConstant();
7748
7749 // It's some symbol reference + a constant addend, but really
7750 // shouldn't use both Darwin and ELF syntax.
7751 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
7752 DarwinRefKind == MCSymbolRefExpr::VK_None;
7753}
7754
7755/// Force static initialization.
7762}
7763
7764#define GET_REGISTER_MATCHER
7765#define GET_SUBTARGET_FEATURE_NAME
7766#define GET_MATCHER_IMPLEMENTATION
7767#define GET_MNEMONIC_SPELL_CHECKER
7768#include "AArch64GenAsmMatcher.inc"
7769
7770// Define this matcher function after the auto-generated include so we
7771// have the match class enum definitions.
7772unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
7773 unsigned Kind) {
7774 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
7775
7776 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
7777 if (!Op.isImm())
7778 return Match_InvalidOperand;
7779 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
7780 if (!CE)
7781 return Match_InvalidOperand;
7782 if (CE->getValue() == ExpectedVal)
7783 return Match_Success;
7784 return Match_InvalidOperand;
7785 };
7786
7787 switch (Kind) {
7788 default:
7789 return Match_InvalidOperand;
7790 case MCK_MPR:
7791 // If the Kind is a token for the MPR register class which has the "za"
7792 // register (SME accumulator array), check if the asm is a literal "za"
7793 // token. This is for the "smstart za" alias that defines the register
7794 // as a literal token.
7795 if (Op.isTokenEqual("za"))
7796 return Match_Success;
7797 return Match_InvalidOperand;
7798
7799 // If the kind is a token for a literal immediate, check if our asm operand
7800 // matches. This is for InstAliases which have a fixed-value immediate in
7801 // the asm string, such as hints which are parsed into a specific
7802 // instruction definition.
7803#define MATCH_HASH(N) \
7804 case MCK__HASH_##N: \
7805 return MatchesOpImmediate(N);
7806 MATCH_HASH(0)
7807 MATCH_HASH(1)
7808 MATCH_HASH(2)
7809 MATCH_HASH(3)
7810 MATCH_HASH(4)
7811 MATCH_HASH(6)
7812 MATCH_HASH(7)
7813 MATCH_HASH(8)
7814 MATCH_HASH(10)
7815 MATCH_HASH(12)
7816 MATCH_HASH(14)
7817 MATCH_HASH(16)
7818 MATCH_HASH(24)
7819 MATCH_HASH(25)
7820 MATCH_HASH(26)
7821 MATCH_HASH(27)
7822 MATCH_HASH(28)
7823 MATCH_HASH(29)
7824 MATCH_HASH(30)
7825 MATCH_HASH(31)
7826 MATCH_HASH(32)
7827 MATCH_HASH(40)
7828 MATCH_HASH(48)
7829 MATCH_HASH(64)
7830#undef MATCH_HASH
7831#define MATCH_HASH_MINUS(N) \
7832 case MCK__HASH__MINUS_##N: \
7833 return MatchesOpImmediate(-N);
7837#undef MATCH_HASH_MINUS
7838 }
7839}
7840
7841ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
7842
7843 SMLoc S = getLoc();
7844
7845 if (getTok().isNot(AsmToken::Identifier))
7846 return Error(S, "expected register");
7847
7848 MCRegister FirstReg;
7849 ParseStatus Res = tryParseScalarRegister(FirstReg);
7850 if (!Res.isSuccess())
7851 return Error(S, "expected first even register of a consecutive same-size "
7852 "even/odd register pair");
7853
7854 const MCRegisterClass &WRegClass =
7855 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
7856 const MCRegisterClass &XRegClass =
7857 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
7858
7859 bool isXReg = XRegClass.contains(FirstReg),
7860 isWReg = WRegClass.contains(FirstReg);
7861 if (!isXReg && !isWReg)
7862 return Error(S, "expected first even register of a consecutive same-size "
7863 "even/odd register pair");
7864
7865 const MCRegisterInfo *RI = getContext().getRegisterInfo();
7866 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
7867
7868 if (FirstEncoding & 0x1)
7869 return Error(S, "expected first even register of a consecutive same-size "
7870 "even/odd register pair");
7871
7872 if (getTok().isNot(AsmToken::Comma))
7873 return Error(getLoc(), "expected comma");
7874 // Eat the comma
7875 Lex();
7876
7877 SMLoc E = getLoc();
7878 MCRegister SecondReg;
7879 Res = tryParseScalarRegister(SecondReg);
7880 if (!Res.isSuccess())
7881 return Error(E, "expected second odd register of a consecutive same-size "
7882 "even/odd register pair");
7883
7884 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
7885 (isXReg && !XRegClass.contains(SecondReg)) ||
7886 (isWReg && !WRegClass.contains(SecondReg)))
7887 return Error(E, "expected second odd register of a consecutive same-size "
7888 "even/odd register pair");
7889
7890 unsigned Pair = 0;
7891 if (isXReg) {
7892 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
7893 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
7894 } else {
7895 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
7896 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
7897 }
7898
7899 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
7900 getLoc(), getContext()));
7901
7902 return ParseStatus::Success;
7903}
7904
7905template <bool ParseShiftExtend, bool ParseSuffix>
7906ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
7907 const SMLoc S = getLoc();
7908 // Check for a SVE vector register specifier first.
7909 MCRegister RegNum;
7911
7912 ParseStatus Res =
7913 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7914
7915 if (!Res.isSuccess())
7916 return Res;
7917
7918 if (ParseSuffix && Kind.empty())
7919 return ParseStatus::NoMatch;
7920
7921 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
7922 if (!KindRes)
7923 return ParseStatus::NoMatch;
7924
7925 unsigned ElementWidth = KindRes->second;
7926
7927 // No shift/extend is the default.
7928 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
7929 Operands.push_back(AArch64Operand::CreateVectorReg(
7930 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
7931
7932 ParseStatus Res = tryParseVectorIndex(Operands);
7933 if (Res.isFailure())
7934 return ParseStatus::Failure;
7935 return ParseStatus::Success;
7936 }
7937
7938 // Eat the comma
7939 Lex();
7940
7941 // Match the shift
7943 Res = tryParseOptionalShiftExtend(ExtOpnd);
7944 if (!Res.isSuccess())
7945 return Res;
7946
7947 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
7948 Operands.push_back(AArch64Operand::CreateVectorReg(
7949 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
7950 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
7951 Ext->hasShiftExtendAmount()));
7952
7953 return ParseStatus::Success;
7954}
7955
7956ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
7957 MCAsmParser &Parser = getParser();
7958
7959 SMLoc SS = getLoc();
7960 const AsmToken &TokE = getTok();
7961 bool IsHash = TokE.is(AsmToken::Hash);
7962
7963 if (!IsHash && TokE.isNot(AsmToken::Identifier))
7964 return ParseStatus::NoMatch;
7965
7966 int64_t Pattern;
7967 if (IsHash) {
7968 Lex(); // Eat hash
7969
7970 // Parse the immediate operand.
7971 const MCExpr *ImmVal;
7972 SS = getLoc();
7973 if (Parser.parseExpression(ImmVal))
7974 return ParseStatus::Failure;
7975
7976 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
7977 if (!MCE)
7978 return TokError("invalid operand for instruction");
7979
7980 Pattern = MCE->getValue();
7981 } else {
7982 // Parse the pattern
7983 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
7984 if (!Pat)
7985 return ParseStatus::NoMatch;
7986
7987 Lex();
7988 Pattern = Pat->Encoding;
7989 assert(Pattern >= 0 && Pattern < 32);
7990 }
7991
7992 Operands.push_back(
7993 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
7994 SS, getLoc(), getContext()));
7995
7996 return ParseStatus::Success;
7997}
7998
8000AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8001 int64_t Pattern;
8002 SMLoc SS = getLoc();
8003 const AsmToken &TokE = getTok();
8004 // Parse the pattern
8005 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8006 TokE.getString());
8007 if (!Pat)
8008 return ParseStatus::NoMatch;
8009
8010 Lex();
8011 Pattern = Pat->Encoding;
8012 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8013
8014 Operands.push_back(
8015 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8016 SS, getLoc(), getContext()));
8017
8018 return ParseStatus::Success;
8019}
8020
8021ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8022 SMLoc SS = getLoc();
8023
8024 MCRegister XReg;
8025 if (!tryParseScalarRegister(XReg).isSuccess())
8026 return ParseStatus::NoMatch;
8027
8028 MCContext &ctx = getContext();
8029 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8030 int X8Reg = RI->getMatchingSuperReg(
8031 XReg, AArch64::x8sub_0,
8032 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8033 if (!X8Reg)
8034 return Error(SS,
8035 "expected an even-numbered x-register in the range [x0,x22]");
8036
8037 Operands.push_back(
8038 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
8039 return ParseStatus::Success;
8040}
8041
8042ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8043 SMLoc S = getLoc();
8044
8045 if (getTok().isNot(AsmToken::Integer))
8046 return ParseStatus::NoMatch;
8047
8048 if (getLexer().peekTok().isNot(AsmToken::Colon))
8049 return ParseStatus::NoMatch;
8050
8051 const MCExpr *ImmF;
8052 if (getParser().parseExpression(ImmF))
8053 return ParseStatus::NoMatch;
8054
8055 if (getTok().isNot(AsmToken::Colon))
8056 return ParseStatus::NoMatch;
8057
8058 Lex(); // Eat ':'
8059 if (getTok().isNot(AsmToken::Integer))
8060 return ParseStatus::NoMatch;
8061
8062 SMLoc E = getTok().getLoc();
8063 const MCExpr *ImmL;
8064 if (getParser().parseExpression(ImmL))
8065 return ParseStatus::NoMatch;
8066
8067 unsigned ImmFVal = dyn_cast<MCConstantExpr>(ImmF)->getValue();
8068 unsigned ImmLVal = dyn_cast<MCConstantExpr>(ImmL)->getValue();
8069
8070 Operands.push_back(
8071 AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext()));
8072 return ParseStatus::Success;
8073}
#define MATCH_HASH_MINUS(N)
static unsigned matchSVEDataVectorRegName(StringRef Name)
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind)
static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo, SmallVector< StringRef, 4 > &RequestedExtensions)
static unsigned matchSVEPredicateAsCounterRegName(StringRef Name)
static MCRegister MatchRegisterName(StringRef Name)
static bool isMatchingOrAlias(unsigned ZReg, unsigned Reg)
static const char * getSubtargetFeatureName(uint64_t Val)
static unsigned MatchNeonVectorRegName(StringRef Name)
}
static std::optional< std::pair< int, int > > parseVectorKind(StringRef Suffix, RegKind VectorKind)
Returns an optional pair of (#elements, element-width) if Suffix is a valid vector kind.
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser()
Force static initialization.
static unsigned matchMatrixRegName(StringRef Name)
static unsigned matchMatrixTileListRegName(StringRef Name)
static std::string AArch64MnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static SMLoc incrementLoc(SMLoc L, int Offset)
#define MATCH_HASH(N)
static const struct Extension ExtensionMap[]
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
static unsigned matchSVEPredicateVectorRegName(StringRef Name)
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:135
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Given that RA is a live value
@ Default
Definition: DwarfDebug.cpp:87
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Symbol * Sym
Definition: ELF_riscv.cpp:479
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
#define check(cond)
static LVOptions Options
Definition: LVOptions.cpp:25
Live Register Matrix
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
static MSP430CC::CondCodes getCondCode(unsigned Cond)
unsigned Reg
#define T
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx)
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static const AArch64MCExpr * create(const MCExpr *Expr, VariantKind Kind, MCContext &Ctx)
APInt bitcastToAPInt() const
Definition: APFloat.h:1210
Class for arbitrary precision integers.
Definition: APInt.h:76
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition: APInt.h:413
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition: APInt.h:410
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1513
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Target independent representation for an assembler token.
Definition: MCAsmMacro.h:21
SMLoc getLoc() const
Definition: MCAsmLexer.cpp:26
int64_t getIntVal() const
Definition: MCAsmMacro.h:115
bool isNot(TokenKind K) const
Definition: MCAsmMacro.h:83
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition: MCAsmMacro.h:110
bool is(TokenKind K) const
Definition: MCAsmMacro.h:82
SMLoc getEndLoc() const
Definition: MCAsmLexer.cpp:30
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Definition: MCAsmMacro.h:99
This class represents an Operation in the Expression.
Base class for user error types.
Definition: Error.h:352
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
Container class for subtarget features.
constexpr size_t size() const
void UnLex(AsmToken const &Token)
Definition: MCAsmLexer.h:93
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition: MCAsmLexer.h:111
virtual size_t peekTokens(MutableArrayRef< AsmToken > Buf, bool ShouldSkipSpace=true)=0
Look ahead an arbitrary number of tokens.
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
Generic assembler parser interface, for use by target specific assembly parsers.
Definition: MCAsmParser.h:123
virtual MCStreamer & getStreamer()=0
Return the output streamer for the assembler.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc, AsmTypeInfo *TypeInfo)=0
Parse a primary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
Definition: MCAsmParser.cpp:40
virtual bool parseIdentifier(StringRef &Res)=0
Parse an identifier or string (as a quoted identifier) and set Res to the identifier contents.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual MCAsmLexer & getLexer()=0
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
int64_t getValue() const
Definition: MCExpr.h:173
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition: MCExpr.cpp:194
Context object for machine code objects.
Definition: MCContext.h:81
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:455
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:201
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
bool evaluateAsRelocatable(MCValue &Res, const MCAsmLayout *Layout, const MCFixup *Fixup) const
Try to evaluate the expression to a relocatable value, i.e.
Definition: MCExpr.cpp:814
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
unsigned getNumOperands() const
Definition: MCInst.h:208
void setLoc(SMLoc loc)
Definition: MCInst.h:203
unsigned getOpcode() const
Definition: MCInst.h:198
void addOperand(const MCOperand Op)
Definition: MCInst.h:210
void setOpcode(unsigned Op)
Definition: MCInst.h:197
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:206
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
static MCOperand createReg(unsigned Reg)
Definition: MCInst.h:134
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:162
int64_t getImm() const
Definition: MCInst.h:80
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:141
bool isImm() const
Definition: MCInst.h:62
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:69
bool isReg() const
Definition: MCInst.h:61
const MCExpr * getExpr() const
Definition: MCInst.h:114
bool isExpr() const
Definition: MCInst.h:65
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual MCRegister getReg() const =0
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
uint16_t getEncodingValue(MCRegister RegNo) const
Returns the encoding for RegNo.
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Streaming machine code generation interface.
Definition: MCStreamer.h:212
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
MCTargetStreamer * getTargetStreamer()
Definition: MCStreamer.h:304
Generic base class for all target subtargets.
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
FeatureBitset SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
FeatureBitset ToggleFeature(uint64_t FB)
Toggle a feature and return the re-computed feature bits.
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:192
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:397
VariantKind getKind() const
Definition: MCExpr.h:412
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:40
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
virtual bool ParseDirective(AsmToken DirectiveID)
ParseDirective - Parse a target specific assembler directive This method is deprecated,...
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc)
virtual ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
tryParseRegister - parse one register if possible
virtual bool areEqualRegs(const MCParsedAsmOperand &Op1, const MCParsedAsmOperand &Op2) const
Returns whether two operands are registers and are equal.
void setAvailableFeatures(const FeatureBitset &Value)
const MCSubtargetInfo & getSTI() const
virtual unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, unsigned Kind)
Allow a target to add special case operand matching for things that tblgen doesn't/can't handle effec...
virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands)=0
ParseInstruction - Parse one assembly instruction.
virtual bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm)=0
MatchAndEmitInstruction - Recognize a series of operands of a parsed instruction as an actual MCInst ...
Target specific streamer interface.
Definition: MCStreamer.h:93
This represents an "assembler immediate".
Definition: MCValue.h:36
int64_t getConstant() const
Definition: MCValue.h:43
const MCSymbolRefExpr * getSymB() const
Definition: MCValue.h:45
const MCSymbolRefExpr * getSymA() const
Definition: MCValue.h:44
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
constexpr bool isNoMatch() const
Represents a location in source code.
Definition: SMLoc.h:23
static SMLoc getFromPointer(const char *Ptr)
Definition: SMLoc.h:36
constexpr const char * getPointer() const
Definition: SMLoc.h:34
Represents a range in source code.
Definition: SMLoc.h:48
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition: SmallSet.h:236
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
Definition: StringMap.h:127
iterator end()
Definition: StringMap.h:220
iterator find(StringRef Key)
Definition: StringMap.h:233
void erase(iterator I)
Definition: StringMap.h:414
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
Definition: StringMap.h:306
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:696
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:466
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:257
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
Definition: StringRef.h:605
std::string upper() const
Convert the given ASCII string to uppercase.
Definition: StringRef.cpp:116
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:131
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:420
StringRef take_back(size_t N=1) const
Return a StringRef equal to 'this' but with only the last N elements remaining.
Definition: StringRef.h:585
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition: StringRef.h:811
std::string lower() const
Definition: StringRef.cpp:111
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition: StringRef.h:271
static constexpr size_t npos
Definition: StringRef.h:52
StringRef drop_back(size_t N=1) const
Return a StringRef equal to 'this' but with the last N elements dropped.
Definition: StringRef.h:612
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
Definition: StringRef.h:170
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
EnvironmentType getEnvironment() const
Get the parsed environment type of this triple.
Definition: Triple.h:389
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static CondCode getInvertedCondCode(CondCode Code)
uint32_t parseGenericRegister(StringRef Name)
const SysReg * lookupSysRegByName(StringRef)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static float getFPImmFloat(unsigned Imm)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
constexpr ArchInfo ARMV8_9A
constexpr ArchInfo ARMV8_3A
constexpr ArchInfo ARMV8_7A
constexpr ArchInfo ARMV8R
constexpr ArchInfo ARMV8_4A
constexpr ArchInfo ARMV9_3A
const ArchInfo * parseArch(StringRef Arch)
constexpr ArchInfo ARMV8_6A
constexpr ArchInfo ARMV8_5A
const ArchInfo * getArchForCpu(StringRef CPU)
constexpr ArchInfo ARMV9_1A
constexpr ArchInfo ARMV9A
constexpr ArchInfo ARMV9_2A
constexpr ArchInfo ARMV9_4A
bool getExtensionFeatures(const AArch64::ExtensionBitset &Extensions, std::vector< StringRef > &Features)
constexpr ArchInfo ARMV8_8A
constexpr ArchInfo ARMV8_1A
constexpr ArchInfo ARMV8_2A
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
const CustomOperand< const MCSubtargetInfo & > Msg[]
bool isPredicated(const MCInst &MI, const MCInstrInfo *MCII)
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1529
float getFPImm(unsigned Imm)
@ CE
Windows NT (Windows on ARM)
@ SS
Definition: X86.h:207
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:31
NodeAddr< CodeNode * > Code
Definition: RDFGraph.h:388
Format
The format used for serializing/deserializing remarks.
Definition: RemarkFormat.h:25
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
static std::optional< AArch64PACKey::ID > AArch64StringToPACKeyID(StringRef Name)
Return numeric key ID for 2-letter identifier string.
bool errorToBool(Error Err)
Helper for converting an Error to a bool.
Definition: Error.h:1071
@ Offset
Definition: DWP.cpp:456
@ Length
Definition: DWP.cpp:456
static int MCLOHNameToId(StringRef Name)
static bool isMem(const MachineInstr &MI, unsigned Op)
Definition: X86InstrInfo.h:158
Target & getTheAArch64beTarget()
static StringRef MCLOHDirectiveName()
static bool isValidMCLOHType(unsigned Kind)
Target & getTheAArch64leTarget()
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
static unsigned getXRegFromWReg(unsigned Reg)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:313
Target & getTheAArch64_32Target()
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
Target & getTheARM64_32Target()
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
static int MCLOHIdToNbArgs(MCLOHType Kind)
MCLOHType
Linker Optimization Hint Type.
static unsigned getWRegFromXReg(unsigned Reg)
Target & getTheARM64Target()
DWARFExpression::Operation Op
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
#define N
const FeatureBitset Features
const char * Name
A record for a potential prefetch made during the initial scan of the loop.
AArch64::ExtensionBitset DefaultExts
Description of the encoding of one expression Op.
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
bool haveFeatures(FeatureBitset ActiveFeatures) const
FeatureBitset getRequiredFeatures() const
const char * Name
FeatureBitset FeaturesRequired