LLVM 22.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCAsmInfo.h"
29#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
40#include "llvm/MC/MCStreamer.h"
42#include "llvm/MC/MCSymbol.h"
44#include "llvm/MC/MCValue.h"
50#include "llvm/Support/SMLoc.h"
54#include <cassert>
55#include <cctype>
56#include <cstdint>
57#include <cstdio>
58#include <optional>
59#include <string>
60#include <tuple>
61#include <utility>
62#include <vector>
63
64using namespace llvm;
65
66namespace {
67
68enum class RegKind {
69 Scalar,
70 NeonVector,
71 SVEDataVector,
72 SVEPredicateAsCounter,
73 SVEPredicateVector,
74 Matrix,
75 LookupTable
76};
77
78enum class MatrixKind { Array, Tile, Row, Col };
79
80enum RegConstraintEqualityTy {
81 EqualsReg,
82 EqualsSuperReg,
83 EqualsSubReg
84};
85
86class AArch64AsmParser : public MCTargetAsmParser {
87private:
88 StringRef Mnemonic; ///< Instruction mnemonic.
89
90 // Map of register aliases registers via the .req directive.
91 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
92
93 class PrefixInfo {
94 public:
95 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
96 PrefixInfo Prefix;
97 switch (Inst.getOpcode()) {
98 case AArch64::MOVPRFX_ZZ:
99 Prefix.Active = true;
100 Prefix.Dst = Inst.getOperand(0).getReg();
101 break;
102 case AArch64::MOVPRFX_ZPmZ_B:
103 case AArch64::MOVPRFX_ZPmZ_H:
104 case AArch64::MOVPRFX_ZPmZ_S:
105 case AArch64::MOVPRFX_ZPmZ_D:
106 Prefix.Active = true;
107 Prefix.Predicated = true;
108 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
109 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
110 "No destructive element size set for movprfx");
111 Prefix.Dst = Inst.getOperand(0).getReg();
112 Prefix.Pg = Inst.getOperand(2).getReg();
113 break;
114 case AArch64::MOVPRFX_ZPzZ_B:
115 case AArch64::MOVPRFX_ZPzZ_H:
116 case AArch64::MOVPRFX_ZPzZ_S:
117 case AArch64::MOVPRFX_ZPzZ_D:
118 Prefix.Active = true;
119 Prefix.Predicated = true;
120 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
121 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
122 "No destructive element size set for movprfx");
123 Prefix.Dst = Inst.getOperand(0).getReg();
124 Prefix.Pg = Inst.getOperand(1).getReg();
125 break;
126 default:
127 break;
128 }
129
130 return Prefix;
131 }
132
133 PrefixInfo() = default;
134 bool isActive() const { return Active; }
135 bool isPredicated() const { return Predicated; }
136 unsigned getElementSize() const {
137 assert(Predicated);
138 return ElementSize;
139 }
140 MCRegister getDstReg() const { return Dst; }
141 MCRegister getPgReg() const {
142 assert(Predicated);
143 return Pg;
144 }
145
146 private:
147 bool Active = false;
148 bool Predicated = false;
149 unsigned ElementSize;
150 MCRegister Dst;
151 MCRegister Pg;
152 } NextPrefix;
153
154 AArch64TargetStreamer &getTargetStreamer() {
155 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
156 return static_cast<AArch64TargetStreamer &>(TS);
157 }
158
159 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
160
161 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 bool parseSyslAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
163 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
164 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
165 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
166 std::string &Suggestion);
167 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
168 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
169 bool parseRegister(OperandVector &Operands);
170 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
171 bool parseNeonVectorList(OperandVector &Operands);
172 bool parseOptionalMulOperand(OperandVector &Operands);
173 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
174 bool parseKeywordOperand(OperandVector &Operands);
175 bool parseOperand(OperandVector &Operands, bool isCondCode,
176 bool invertCondCode);
177 bool parseImmExpr(int64_t &Out);
178 bool parseComma();
179 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
180 unsigned Last);
181
182 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
183 OperandVector &Operands);
184
185 bool parseDataExpr(const MCExpr *&Res) override;
186 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
187
188 bool parseDirectiveArch(SMLoc L);
189 bool parseDirectiveArchExtension(SMLoc L);
190 bool parseDirectiveCPU(SMLoc L);
191 bool parseDirectiveInst(SMLoc L);
192
193 bool parseDirectiveTLSDescCall(SMLoc L);
194
195 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
196 bool parseDirectiveLtorg(SMLoc L);
197
198 bool parseDirectiveReq(StringRef Name, SMLoc L);
199 bool parseDirectiveUnreq(SMLoc L);
200 bool parseDirectiveCFINegateRAState();
201 bool parseDirectiveCFINegateRAStateWithPC();
202 bool parseDirectiveCFIBKeyFrame();
203 bool parseDirectiveCFIMTETaggedFrame();
204
205 bool parseDirectiveVariantPCS(SMLoc L);
206
207 bool parseDirectiveSEHAllocStack(SMLoc L);
208 bool parseDirectiveSEHPrologEnd(SMLoc L);
209 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
210 bool parseDirectiveSEHSaveFPLR(SMLoc L);
211 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
212 bool parseDirectiveSEHSaveReg(SMLoc L);
213 bool parseDirectiveSEHSaveRegX(SMLoc L);
214 bool parseDirectiveSEHSaveRegP(SMLoc L);
215 bool parseDirectiveSEHSaveRegPX(SMLoc L);
216 bool parseDirectiveSEHSaveLRPair(SMLoc L);
217 bool parseDirectiveSEHSaveFReg(SMLoc L);
218 bool parseDirectiveSEHSaveFRegX(SMLoc L);
219 bool parseDirectiveSEHSaveFRegP(SMLoc L);
220 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
221 bool parseDirectiveSEHSetFP(SMLoc L);
222 bool parseDirectiveSEHAddFP(SMLoc L);
223 bool parseDirectiveSEHNop(SMLoc L);
224 bool parseDirectiveSEHSaveNext(SMLoc L);
225 bool parseDirectiveSEHEpilogStart(SMLoc L);
226 bool parseDirectiveSEHEpilogEnd(SMLoc L);
227 bool parseDirectiveSEHTrapFrame(SMLoc L);
228 bool parseDirectiveSEHMachineFrame(SMLoc L);
229 bool parseDirectiveSEHContext(SMLoc L);
230 bool parseDirectiveSEHECContext(SMLoc L);
231 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
232 bool parseDirectiveSEHPACSignLR(SMLoc L);
233 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
234 bool parseDirectiveSEHAllocZ(SMLoc L);
235 bool parseDirectiveSEHSaveZReg(SMLoc L);
236 bool parseDirectiveSEHSavePReg(SMLoc L);
237 bool parseDirectiveAeabiSubSectionHeader(SMLoc L);
238 bool parseDirectiveAeabiAArch64Attr(SMLoc L);
239
240 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
241 SmallVectorImpl<SMLoc> &Loc);
242 unsigned getNumRegsForRegKind(RegKind K);
243 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
244 OperandVector &Operands, MCStreamer &Out,
245 uint64_t &ErrorInfo,
246 bool MatchingInlineAsm) override;
247 /// @name Auto-generated Match Functions
248 /// {
249
250#define GET_ASSEMBLER_HEADER
251#include "AArch64GenAsmMatcher.inc"
252
253 /// }
254
255 ParseStatus tryParseScalarRegister(MCRegister &Reg);
256 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
257 RegKind MatchKind);
258 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
259 ParseStatus tryParseSVCR(OperandVector &Operands);
260 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
261 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
262 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
263 ParseStatus tryParseSysReg(OperandVector &Operands);
264 ParseStatus tryParseSysCROperand(OperandVector &Operands);
265 template <bool IsSVEPrefetch = false>
266 ParseStatus tryParsePrefetch(OperandVector &Operands);
267 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
268 ParseStatus tryParsePSBHint(OperandVector &Operands);
269 ParseStatus tryParseBTIHint(OperandVector &Operands);
270 ParseStatus tryParseCMHPriorityHint(OperandVector &Operands);
271 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
272 ParseStatus tryParseAdrLabel(OperandVector &Operands);
273 template <bool AddFPZeroAsLiteral>
274 ParseStatus tryParseFPImm(OperandVector &Operands);
275 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
276 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
277 bool tryParseNeonVectorRegister(OperandVector &Operands);
278 ParseStatus tryParseVectorIndex(OperandVector &Operands);
279 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
280 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
281 template <bool ParseShiftExtend,
282 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
283 ParseStatus tryParseGPROperand(OperandVector &Operands);
284 ParseStatus tryParseZTOperand(OperandVector &Operands);
285 template <bool ParseShiftExtend, bool ParseSuffix>
286 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
287 template <RegKind RK>
288 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
290 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
291 template <RegKind VectorKind>
292 ParseStatus tryParseVectorList(OperandVector &Operands,
293 bool ExpectMatch = false);
294 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
295 ParseStatus tryParseSVEPattern(OperandVector &Operands);
296 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
297 ParseStatus tryParseGPR64x8(OperandVector &Operands);
298 ParseStatus tryParseImmRange(OperandVector &Operands);
299 template <int> ParseStatus tryParseAdjImm0_63(OperandVector &Operands);
300 ParseStatus tryParsePHintInstOperand(OperandVector &Operands);
301
302public:
303 enum AArch64MatchResultTy {
304 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
305#define GET_OPERAND_DIAGNOSTIC_TYPES
306#include "AArch64GenAsmMatcher.inc"
307 };
308 bool IsILP32;
309 bool IsWindowsArm64EC;
310
311 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
312 const MCInstrInfo &MII, const MCTargetOptions &Options)
313 : MCTargetAsmParser(Options, STI, MII) {
314 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
315 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
317 MCStreamer &S = getParser().getStreamer();
318 if (S.getTargetStreamer() == nullptr)
319 new AArch64TargetStreamer(S);
320
321 // Alias .hword/.word/.[dx]word to the target-independent
322 // .2byte/.4byte/.8byte directives as they have the same form and
323 // semantics:
324 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
325 Parser.addAliasForDirective(".hword", ".2byte");
326 Parser.addAliasForDirective(".word", ".4byte");
327 Parser.addAliasForDirective(".dword", ".8byte");
328 Parser.addAliasForDirective(".xword", ".8byte");
329
330 // Initialize the set of available features.
331 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
332 }
333
334 bool areEqualRegs(const MCParsedAsmOperand &Op1,
335 const MCParsedAsmOperand &Op2) const override;
336 bool parseInstruction(ParseInstructionInfo &Info, StringRef Name,
337 SMLoc NameLoc, OperandVector &Operands) override;
338 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
339 ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
340 SMLoc &EndLoc) override;
341 bool ParseDirective(AsmToken DirectiveID) override;
342 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
343 unsigned Kind) override;
344
345 static bool classifySymbolRef(const MCExpr *Expr, AArch64::Specifier &ELFSpec,
346 AArch64::Specifier &DarwinSpec,
347 int64_t &Addend);
348};
349
350/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
351/// instruction.
352class AArch64Operand : public MCParsedAsmOperand {
353private:
354 enum KindTy {
355 k_Immediate,
356 k_ShiftedImm,
357 k_ImmRange,
358 k_CondCode,
359 k_Register,
360 k_MatrixRegister,
361 k_MatrixTileList,
362 k_SVCR,
363 k_VectorList,
364 k_VectorIndex,
365 k_Token,
366 k_SysReg,
367 k_SysCR,
368 k_Prefetch,
369 k_ShiftExtend,
370 k_FPImm,
371 k_Barrier,
372 k_PSBHint,
373 k_PHint,
374 k_BTIHint,
375 k_CMHPriorityHint,
376 } Kind;
377
378 SMLoc StartLoc, EndLoc;
379
380 struct TokOp {
381 const char *Data;
382 unsigned Length;
383 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
384 };
385
386 // Separate shift/extend operand.
387 struct ShiftExtendOp {
389 unsigned Amount;
390 bool HasExplicitAmount;
391 };
392
393 struct RegOp {
394 unsigned RegNum;
395 RegKind Kind;
396 int ElementWidth;
397
398 // The register may be allowed as a different register class,
399 // e.g. for GPR64as32 or GPR32as64.
400 RegConstraintEqualityTy EqualityTy;
401
402 // In some cases the shift/extend needs to be explicitly parsed together
403 // with the register, rather than as a separate operand. This is needed
404 // for addressing modes where the instruction as a whole dictates the
405 // scaling/extend, rather than specific bits in the instruction.
406 // By parsing them as a single operand, we avoid the need to pass an
407 // extra operand in all CodeGen patterns (because all operands need to
408 // have an associated value), and we avoid the need to update TableGen to
409 // accept operands that have no associated bits in the instruction.
410 //
411 // An added benefit of parsing them together is that the assembler
412 // can give a sensible diagnostic if the scaling is not correct.
413 //
414 // The default is 'lsl #0' (HasExplicitAmount = false) if no
415 // ShiftExtend is specified.
416 ShiftExtendOp ShiftExtend;
417 };
418
419 struct MatrixRegOp {
420 unsigned RegNum;
421 unsigned ElementWidth;
422 MatrixKind Kind;
423 };
424
425 struct MatrixTileListOp {
426 unsigned RegMask = 0;
427 };
428
429 struct VectorListOp {
430 unsigned RegNum;
431 unsigned Count;
432 unsigned Stride;
433 unsigned NumElements;
434 unsigned ElementWidth;
435 RegKind RegisterKind;
436 };
437
438 struct VectorIndexOp {
439 int Val;
440 };
441
442 struct ImmOp {
443 const MCExpr *Val;
444 };
445
446 struct ShiftedImmOp {
447 const MCExpr *Val;
448 unsigned ShiftAmount;
449 };
450
451 struct ImmRangeOp {
452 unsigned First;
453 unsigned Last;
454 };
455
456 struct CondCodeOp {
458 };
459
460 struct FPImmOp {
461 uint64_t Val; // APFloat value bitcasted to uint64_t.
462 bool IsExact; // describes whether parsed value was exact.
463 };
464
465 struct BarrierOp {
466 const char *Data;
467 unsigned Length;
468 unsigned Val; // Not the enum since not all values have names.
469 bool HasnXSModifier;
470 };
471
472 struct SysRegOp {
473 const char *Data;
474 unsigned Length;
475 uint32_t MRSReg;
476 uint32_t MSRReg;
477 uint32_t PStateField;
478 };
479
480 struct SysCRImmOp {
481 unsigned Val;
482 };
483
484 struct PrefetchOp {
485 const char *Data;
486 unsigned Length;
487 unsigned Val;
488 };
489
490 struct PSBHintOp {
491 const char *Data;
492 unsigned Length;
493 unsigned Val;
494 };
495 struct PHintOp {
496 const char *Data;
497 unsigned Length;
498 unsigned Val;
499 };
500 struct BTIHintOp {
501 const char *Data;
502 unsigned Length;
503 unsigned Val;
504 };
505 struct CMHPriorityHintOp {
506 const char *Data;
507 unsigned Length;
508 unsigned Val;
509 };
510
511 struct SVCROp {
512 const char *Data;
513 unsigned Length;
514 unsigned PStateField;
515 };
516
517 union {
518 struct TokOp Tok;
519 struct RegOp Reg;
520 struct MatrixRegOp MatrixReg;
521 struct MatrixTileListOp MatrixTileList;
522 struct VectorListOp VectorList;
523 struct VectorIndexOp VectorIndex;
524 struct ImmOp Imm;
525 struct ShiftedImmOp ShiftedImm;
526 struct ImmRangeOp ImmRange;
527 struct CondCodeOp CondCode;
528 struct FPImmOp FPImm;
529 struct BarrierOp Barrier;
530 struct SysRegOp SysReg;
531 struct SysCRImmOp SysCRImm;
532 struct PrefetchOp Prefetch;
533 struct PSBHintOp PSBHint;
534 struct PHintOp PHint;
535 struct BTIHintOp BTIHint;
536 struct CMHPriorityHintOp CMHPriorityHint;
537 struct ShiftExtendOp ShiftExtend;
538 struct SVCROp SVCR;
539 };
540
541 // Keep the MCContext around as the MCExprs may need manipulated during
542 // the add<>Operands() calls.
543 MCContext &Ctx;
544
545public:
546 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
547
548 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
549 Kind = o.Kind;
550 StartLoc = o.StartLoc;
551 EndLoc = o.EndLoc;
552 switch (Kind) {
553 case k_Token:
554 Tok = o.Tok;
555 break;
556 case k_Immediate:
557 Imm = o.Imm;
558 break;
559 case k_ShiftedImm:
560 ShiftedImm = o.ShiftedImm;
561 break;
562 case k_ImmRange:
563 ImmRange = o.ImmRange;
564 break;
565 case k_CondCode:
566 CondCode = o.CondCode;
567 break;
568 case k_FPImm:
569 FPImm = o.FPImm;
570 break;
571 case k_Barrier:
572 Barrier = o.Barrier;
573 break;
574 case k_Register:
575 Reg = o.Reg;
576 break;
577 case k_MatrixRegister:
578 MatrixReg = o.MatrixReg;
579 break;
580 case k_MatrixTileList:
581 MatrixTileList = o.MatrixTileList;
582 break;
583 case k_VectorList:
584 VectorList = o.VectorList;
585 break;
586 case k_VectorIndex:
587 VectorIndex = o.VectorIndex;
588 break;
589 case k_SysReg:
590 SysReg = o.SysReg;
591 break;
592 case k_SysCR:
593 SysCRImm = o.SysCRImm;
594 break;
595 case k_Prefetch:
596 Prefetch = o.Prefetch;
597 break;
598 case k_PSBHint:
599 PSBHint = o.PSBHint;
600 break;
601 case k_PHint:
602 PHint = o.PHint;
603 break;
604 case k_BTIHint:
605 BTIHint = o.BTIHint;
606 break;
607 case k_CMHPriorityHint:
608 CMHPriorityHint = o.CMHPriorityHint;
609 break;
610 case k_ShiftExtend:
611 ShiftExtend = o.ShiftExtend;
612 break;
613 case k_SVCR:
614 SVCR = o.SVCR;
615 break;
616 }
617 }
618
619 /// getStartLoc - Get the location of the first token of this operand.
620 SMLoc getStartLoc() const override { return StartLoc; }
621 /// getEndLoc - Get the location of the last token of this operand.
622 SMLoc getEndLoc() const override { return EndLoc; }
623
624 StringRef getToken() const {
625 assert(Kind == k_Token && "Invalid access!");
626 return StringRef(Tok.Data, Tok.Length);
627 }
628
629 bool isTokenSuffix() const {
630 assert(Kind == k_Token && "Invalid access!");
631 return Tok.IsSuffix;
632 }
633
634 const MCExpr *getImm() const {
635 assert(Kind == k_Immediate && "Invalid access!");
636 return Imm.Val;
637 }
638
639 const MCExpr *getShiftedImmVal() const {
640 assert(Kind == k_ShiftedImm && "Invalid access!");
641 return ShiftedImm.Val;
642 }
643
644 unsigned getShiftedImmShift() const {
645 assert(Kind == k_ShiftedImm && "Invalid access!");
646 return ShiftedImm.ShiftAmount;
647 }
648
649 unsigned getFirstImmVal() const {
650 assert(Kind == k_ImmRange && "Invalid access!");
651 return ImmRange.First;
652 }
653
654 unsigned getLastImmVal() const {
655 assert(Kind == k_ImmRange && "Invalid access!");
656 return ImmRange.Last;
657 }
658
660 assert(Kind == k_CondCode && "Invalid access!");
661 return CondCode.Code;
662 }
663
664 APFloat getFPImm() const {
665 assert (Kind == k_FPImm && "Invalid access!");
666 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
667 }
668
669 bool getFPImmIsExact() const {
670 assert (Kind == k_FPImm && "Invalid access!");
671 return FPImm.IsExact;
672 }
673
674 unsigned getBarrier() const {
675 assert(Kind == k_Barrier && "Invalid access!");
676 return Barrier.Val;
677 }
678
679 StringRef getBarrierName() const {
680 assert(Kind == k_Barrier && "Invalid access!");
681 return StringRef(Barrier.Data, Barrier.Length);
682 }
683
684 bool getBarriernXSModifier() const {
685 assert(Kind == k_Barrier && "Invalid access!");
686 return Barrier.HasnXSModifier;
687 }
688
689 MCRegister getReg() const override {
690 assert(Kind == k_Register && "Invalid access!");
691 return Reg.RegNum;
692 }
693
694 unsigned getMatrixReg() const {
695 assert(Kind == k_MatrixRegister && "Invalid access!");
696 return MatrixReg.RegNum;
697 }
698
699 unsigned getMatrixElementWidth() const {
700 assert(Kind == k_MatrixRegister && "Invalid access!");
701 return MatrixReg.ElementWidth;
702 }
703
704 MatrixKind getMatrixKind() const {
705 assert(Kind == k_MatrixRegister && "Invalid access!");
706 return MatrixReg.Kind;
707 }
708
709 unsigned getMatrixTileListRegMask() const {
710 assert(isMatrixTileList() && "Invalid access!");
711 return MatrixTileList.RegMask;
712 }
713
714 RegConstraintEqualityTy getRegEqualityTy() const {
715 assert(Kind == k_Register && "Invalid access!");
716 return Reg.EqualityTy;
717 }
718
719 unsigned getVectorListStart() const {
720 assert(Kind == k_VectorList && "Invalid access!");
721 return VectorList.RegNum;
722 }
723
724 unsigned getVectorListCount() const {
725 assert(Kind == k_VectorList && "Invalid access!");
726 return VectorList.Count;
727 }
728
729 unsigned getVectorListStride() const {
730 assert(Kind == k_VectorList && "Invalid access!");
731 return VectorList.Stride;
732 }
733
734 int getVectorIndex() const {
735 assert(Kind == k_VectorIndex && "Invalid access!");
736 return VectorIndex.Val;
737 }
738
739 StringRef getSysReg() const {
740 assert(Kind == k_SysReg && "Invalid access!");
741 return StringRef(SysReg.Data, SysReg.Length);
742 }
743
744 unsigned getSysCR() const {
745 assert(Kind == k_SysCR && "Invalid access!");
746 return SysCRImm.Val;
747 }
748
749 unsigned getPrefetch() const {
750 assert(Kind == k_Prefetch && "Invalid access!");
751 return Prefetch.Val;
752 }
753
754 unsigned getPSBHint() const {
755 assert(Kind == k_PSBHint && "Invalid access!");
756 return PSBHint.Val;
757 }
758
759 unsigned getPHint() const {
760 assert(Kind == k_PHint && "Invalid access!");
761 return PHint.Val;
762 }
763
764 StringRef getPSBHintName() const {
765 assert(Kind == k_PSBHint && "Invalid access!");
766 return StringRef(PSBHint.Data, PSBHint.Length);
767 }
768
769 StringRef getPHintName() const {
770 assert(Kind == k_PHint && "Invalid access!");
771 return StringRef(PHint.Data, PHint.Length);
772 }
773
774 unsigned getBTIHint() const {
775 assert(Kind == k_BTIHint && "Invalid access!");
776 return BTIHint.Val;
777 }
778
779 StringRef getBTIHintName() const {
780 assert(Kind == k_BTIHint && "Invalid access!");
781 return StringRef(BTIHint.Data, BTIHint.Length);
782 }
783
784 unsigned getCMHPriorityHint() const {
785 assert(Kind == k_CMHPriorityHint && "Invalid access!");
786 return CMHPriorityHint.Val;
787 }
788
789 StringRef getCMHPriorityHintName() const {
790 assert(Kind == k_CMHPriorityHint && "Invalid access!");
791 return StringRef(CMHPriorityHint.Data, CMHPriorityHint.Length);
792 }
793
794 StringRef getSVCR() const {
795 assert(Kind == k_SVCR && "Invalid access!");
796 return StringRef(SVCR.Data, SVCR.Length);
797 }
798
799 StringRef getPrefetchName() const {
800 assert(Kind == k_Prefetch && "Invalid access!");
801 return StringRef(Prefetch.Data, Prefetch.Length);
802 }
803
804 AArch64_AM::ShiftExtendType getShiftExtendType() const {
805 if (Kind == k_ShiftExtend)
806 return ShiftExtend.Type;
807 if (Kind == k_Register)
808 return Reg.ShiftExtend.Type;
809 llvm_unreachable("Invalid access!");
810 }
811
812 unsigned getShiftExtendAmount() const {
813 if (Kind == k_ShiftExtend)
814 return ShiftExtend.Amount;
815 if (Kind == k_Register)
816 return Reg.ShiftExtend.Amount;
817 llvm_unreachable("Invalid access!");
818 }
819
820 bool hasShiftExtendAmount() const {
821 if (Kind == k_ShiftExtend)
822 return ShiftExtend.HasExplicitAmount;
823 if (Kind == k_Register)
824 return Reg.ShiftExtend.HasExplicitAmount;
825 llvm_unreachable("Invalid access!");
826 }
827
828 bool isImm() const override { return Kind == k_Immediate; }
829 bool isMem() const override { return false; }
830
831 bool isUImm6() const {
832 if (!isImm())
833 return false;
834 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
835 if (!MCE)
836 return false;
837 int64_t Val = MCE->getValue();
838 return (Val >= 0 && Val < 64);
839 }
840
841 template <int Width> bool isSImm() const {
842 return bool(isSImmScaled<Width, 1>());
843 }
844
845 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
846 return isImmScaled<Bits, Scale>(true);
847 }
848
849 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
850 DiagnosticPredicate isUImmScaled() const {
851 if (IsRange && isImmRange() &&
852 (getLastImmVal() != getFirstImmVal() + Offset))
854
855 return isImmScaled<Bits, Scale, IsRange>(false);
856 }
857
858 template <int Bits, int Scale, bool IsRange = false>
859 DiagnosticPredicate isImmScaled(bool Signed) const {
860 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
861 (isImmRange() && !IsRange))
863
864 int64_t Val;
865 if (isImmRange())
866 Val = getFirstImmVal();
867 else {
868 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
869 if (!MCE)
871 Val = MCE->getValue();
872 }
873
874 int64_t MinVal, MaxVal;
875 if (Signed) {
876 int64_t Shift = Bits - 1;
877 MinVal = (int64_t(1) << Shift) * -Scale;
878 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
879 } else {
880 MinVal = 0;
881 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
882 }
883
884 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
886
888 }
889
890 DiagnosticPredicate isSVEPattern() const {
891 if (!isImm())
893 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
894 if (!MCE)
896 int64_t Val = MCE->getValue();
897 if (Val >= 0 && Val < 32)
900 }
901
902 DiagnosticPredicate isSVEVecLenSpecifier() const {
903 if (!isImm())
905 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
906 if (!MCE)
908 int64_t Val = MCE->getValue();
909 if (Val >= 0 && Val <= 1)
912 }
913
914 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
915 AArch64::Specifier ELFSpec;
916 AArch64::Specifier DarwinSpec;
917 int64_t Addend;
918 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
919 Addend)) {
920 // If we don't understand the expression, assume the best and
921 // let the fixup and relocation code deal with it.
922 return true;
923 }
924
925 if (DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
933 ELFSpec)) {
934 // Note that we don't range-check the addend. It's adjusted modulo page
935 // size when converted, so there is no "out of range" condition when using
936 // @pageoff.
937 return true;
938 } else if (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF ||
939 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) {
940 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
941 return Addend == 0;
942 }
943
944 return false;
945 }
946
947 template <int Scale> bool isUImm12Offset() const {
948 if (!isImm())
949 return false;
950
951 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
952 if (!MCE)
953 return isSymbolicUImm12Offset(getImm());
954
955 int64_t Val = MCE->getValue();
956 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
957 }
958
959 template <int N, int M>
960 bool isImmInRange() const {
961 if (!isImm())
962 return false;
963 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
964 if (!MCE)
965 return false;
966 int64_t Val = MCE->getValue();
967 return (Val >= N && Val <= M);
968 }
969
970 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
971 // a logical immediate can always be represented when inverted.
972 template <typename T>
973 bool isLogicalImm() const {
974 if (!isImm())
975 return false;
976 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
977 if (!MCE)
978 return false;
979
980 int64_t Val = MCE->getValue();
981 // Avoid left shift by 64 directly.
982 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
983 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
984 if ((Val & Upper) && (Val & Upper) != Upper)
985 return false;
986
987 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
988 }
989
990 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
991
992 bool isImmRange() const { return Kind == k_ImmRange; }
993
994 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
995 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
996 /// immediate that can be shifted by 'Shift'.
997 template <unsigned Width>
998 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
999 if (isShiftedImm() && Width == getShiftedImmShift())
1000 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
1001 return std::make_pair(CE->getValue(), Width);
1002
1003 if (isImm())
1004 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
1005 int64_t Val = CE->getValue();
1006 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
1007 return std::make_pair(Val >> Width, Width);
1008 else
1009 return std::make_pair(Val, 0u);
1010 }
1011
1012 return {};
1013 }
1014
1015 bool isAddSubImm() const {
1016 if (!isShiftedImm() && !isImm())
1017 return false;
1018
1019 const MCExpr *Expr;
1020
1021 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
1022 if (isShiftedImm()) {
1023 unsigned Shift = ShiftedImm.ShiftAmount;
1024 Expr = ShiftedImm.Val;
1025 if (Shift != 0 && Shift != 12)
1026 return false;
1027 } else {
1028 Expr = getImm();
1029 }
1030
1031 AArch64::Specifier ELFSpec;
1032 AArch64::Specifier DarwinSpec;
1033 int64_t Addend;
1034 if (AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
1035 Addend)) {
1036 return DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
1037 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF ||
1038 (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF && Addend == 0) ||
1046 ELFSpec);
1047 }
1048
1049 // If it's a constant, it should be a real immediate in range.
1050 if (auto ShiftedVal = getShiftedVal<12>())
1051 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1052
1053 // If it's an expression, we hope for the best and let the fixup/relocation
1054 // code deal with it.
1055 return true;
1056 }
1057
1058 bool isAddSubImmNeg() const {
1059 if (!isShiftedImm() && !isImm())
1060 return false;
1061
1062 // Otherwise it should be a real negative immediate in range.
1063 if (auto ShiftedVal = getShiftedVal<12>())
1064 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1065
1066 return false;
1067 }
1068
1069 // Signed value in the range -128 to +127. For element widths of
1070 // 16 bits or higher it may also be a signed multiple of 256 in the
1071 // range -32768 to +32512.
1072 // For element-width of 8 bits a range of -128 to 255 is accepted,
1073 // since a copy of a byte can be either signed/unsigned.
1074 template <typename T>
1075 DiagnosticPredicate isSVECpyImm() const {
1076 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1078
1079 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1080 std::is_same<int8_t, T>::value;
1081 if (auto ShiftedImm = getShiftedVal<8>())
1082 if (!(IsByte && ShiftedImm->second) &&
1083 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1084 << ShiftedImm->second))
1086
1088 }
1089
1090 // Unsigned value in the range 0 to 255. For element widths of
1091 // 16 bits or higher it may also be a signed multiple of 256 in the
1092 // range 0 to 65280.
1093 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1094 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1096
1097 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1098 std::is_same<int8_t, T>::value;
1099 if (auto ShiftedImm = getShiftedVal<8>())
1100 if (!(IsByte && ShiftedImm->second) &&
1101 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1102 << ShiftedImm->second))
1104
1106 }
1107
1108 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1109 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1112 }
1113
1114 bool isCondCode() const { return Kind == k_CondCode; }
1115
1116 bool isSIMDImmType10() const {
1117 if (!isImm())
1118 return false;
1119 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1120 if (!MCE)
1121 return false;
1123 }
1124
1125 template<int N>
1126 bool isBranchTarget() const {
1127 if (!isImm())
1128 return false;
1129 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1130 if (!MCE)
1131 return true;
1132 int64_t Val = MCE->getValue();
1133 if (Val & 0x3)
1134 return false;
1135 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1136 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1137 }
1138
1139 bool isMovWSymbol(ArrayRef<AArch64::Specifier> AllowedModifiers) const {
1140 if (!isImm())
1141 return false;
1142
1143 AArch64::Specifier ELFSpec;
1144 AArch64::Specifier DarwinSpec;
1145 int64_t Addend;
1146 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFSpec, DarwinSpec,
1147 Addend)) {
1148 return false;
1149 }
1150 if (DarwinSpec != AArch64::S_None)
1151 return false;
1152
1153 return llvm::is_contained(AllowedModifiers, ELFSpec);
1154 }
1155
1156 bool isMovWSymbolG3() const {
1157 return isMovWSymbol({AArch64::S_ABS_G3, AArch64::S_PREL_G3});
1158 }
1159
1160 bool isMovWSymbolG2() const {
1161 return isMovWSymbol({AArch64::S_ABS_G2, AArch64::S_ABS_G2_S,
1165 }
1166
1167 bool isMovWSymbolG1() const {
1168 return isMovWSymbol({AArch64::S_ABS_G1, AArch64::S_ABS_G1_S,
1173 }
1174
1175 bool isMovWSymbolG0() const {
1176 return isMovWSymbol({AArch64::S_ABS_G0, AArch64::S_ABS_G0_S,
1181 }
1182
1183 template<int RegWidth, int Shift>
1184 bool isMOVZMovAlias() const {
1185 if (!isImm()) return false;
1186
1187 const MCExpr *E = getImm();
1188 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1189 uint64_t Value = CE->getValue();
1190
1191 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1192 }
1193 // Only supports the case of Shift being 0 if an expression is used as an
1194 // operand
1195 return !Shift && E;
1196 }
1197
1198 template<int RegWidth, int Shift>
1199 bool isMOVNMovAlias() const {
1200 if (!isImm()) return false;
1201
1202 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1203 if (!CE) return false;
1204 uint64_t Value = CE->getValue();
1205
1206 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1207 }
1208
1209 bool isFPImm() const {
1210 return Kind == k_FPImm &&
1211 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1212 }
1213
1214 bool isBarrier() const {
1215 return Kind == k_Barrier && !getBarriernXSModifier();
1216 }
1217 bool isBarriernXS() const {
1218 return Kind == k_Barrier && getBarriernXSModifier();
1219 }
1220 bool isSysReg() const { return Kind == k_SysReg; }
1221
1222 bool isMRSSystemRegister() const {
1223 if (!isSysReg()) return false;
1224
1225 return SysReg.MRSReg != -1U;
1226 }
1227
1228 bool isMSRSystemRegister() const {
1229 if (!isSysReg()) return false;
1230 return SysReg.MSRReg != -1U;
1231 }
1232
1233 bool isSystemPStateFieldWithImm0_1() const {
1234 if (!isSysReg()) return false;
1235 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1236 }
1237
1238 bool isSystemPStateFieldWithImm0_15() const {
1239 if (!isSysReg())
1240 return false;
1241 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1242 }
1243
1244 bool isSVCR() const {
1245 if (Kind != k_SVCR)
1246 return false;
1247 return SVCR.PStateField != -1U;
1248 }
1249
1250 bool isReg() const override {
1251 return Kind == k_Register;
1252 }
1253
1254 bool isVectorList() const { return Kind == k_VectorList; }
1255
1256 bool isScalarReg() const {
1257 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1258 }
1259
1260 bool isNeonVectorReg() const {
1261 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1262 }
1263
1264 bool isNeonVectorRegLo() const {
1265 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1266 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1267 Reg.RegNum) ||
1268 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1269 Reg.RegNum));
1270 }
1271
1272 bool isNeonVectorReg0to7() const {
1273 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1274 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1275 Reg.RegNum));
1276 }
1277
1278 bool isMatrix() const { return Kind == k_MatrixRegister; }
1279 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1280
1281 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1282 RegKind RK;
1283 switch (Class) {
1284 case AArch64::PPRRegClassID:
1285 case AArch64::PPR_3bRegClassID:
1286 case AArch64::PPR_p8to15RegClassID:
1287 case AArch64::PNRRegClassID:
1288 case AArch64::PNR_p8to15RegClassID:
1289 case AArch64::PPRorPNRRegClassID:
1290 RK = RegKind::SVEPredicateAsCounter;
1291 break;
1292 default:
1293 llvm_unreachable("Unsupported register class");
1294 }
1295
1296 return (Kind == k_Register && Reg.Kind == RK) &&
1297 AArch64MCRegisterClasses[Class].contains(getReg());
1298 }
1299
1300 template <unsigned Class> bool isSVEVectorReg() const {
1301 RegKind RK;
1302 switch (Class) {
1303 case AArch64::ZPRRegClassID:
1304 case AArch64::ZPR_3bRegClassID:
1305 case AArch64::ZPR_4bRegClassID:
1306 case AArch64::ZPRMul2_LoRegClassID:
1307 case AArch64::ZPRMul2_HiRegClassID:
1308 case AArch64::ZPR_KRegClassID:
1309 RK = RegKind::SVEDataVector;
1310 break;
1311 case AArch64::PPRRegClassID:
1312 case AArch64::PPR_3bRegClassID:
1313 case AArch64::PPR_p8to15RegClassID:
1314 case AArch64::PNRRegClassID:
1315 case AArch64::PNR_p8to15RegClassID:
1316 case AArch64::PPRorPNRRegClassID:
1317 RK = RegKind::SVEPredicateVector;
1318 break;
1319 default:
1320 llvm_unreachable("Unsupported register class");
1321 }
1322
1323 return (Kind == k_Register && Reg.Kind == RK) &&
1324 AArch64MCRegisterClasses[Class].contains(getReg());
1325 }
1326
1327 template <unsigned Class> bool isFPRasZPR() const {
1328 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1329 AArch64MCRegisterClasses[Class].contains(getReg());
1330 }
1331
1332 template <int ElementWidth, unsigned Class>
1333 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1334 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1336
1337 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1339
1341 }
1342
1343 template <int ElementWidth, unsigned Class>
1344 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1345 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1346 Reg.Kind != RegKind::SVEPredicateVector))
1348
1349 if ((isSVEPredicateAsCounterReg<Class>() ||
1350 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1351 Reg.ElementWidth == ElementWidth)
1353
1355 }
1356
1357 template <int ElementWidth, unsigned Class>
1358 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1359 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1361
1362 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1364
1366 }
1367
1368 template <int ElementWidth, unsigned Class>
1369 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1370 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1372
1373 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1375
1377 }
1378
1379 template <int ElementWidth, unsigned Class,
1380 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1381 bool ShiftWidthAlwaysSame>
1382 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1383 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1384 if (!VectorMatch.isMatch())
1386
1387 // Give a more specific diagnostic when the user has explicitly typed in
1388 // a shift-amount that does not match what is expected, but for which
1389 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1390 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1391 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1392 ShiftExtendTy == AArch64_AM::SXTW) &&
1393 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1395
1396 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1398
1400 }
1401
1402 bool isGPR32as64() const {
1403 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1404 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1405 }
1406
1407 bool isGPR64as32() const {
1408 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1409 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1410 }
1411
1412 bool isGPR64x8() const {
1413 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1414 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1415 Reg.RegNum);
1416 }
1417
1418 bool isWSeqPair() const {
1419 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1420 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1421 Reg.RegNum);
1422 }
1423
1424 bool isXSeqPair() const {
1425 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1426 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1427 Reg.RegNum);
1428 }
1429
1430 bool isSyspXzrPair() const {
1431 return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1432 }
1433
1434 template<int64_t Angle, int64_t Remainder>
1435 DiagnosticPredicate isComplexRotation() const {
1436 if (!isImm())
1438
1439 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1440 if (!CE)
1442 uint64_t Value = CE->getValue();
1443
1444 if (Value % Angle == Remainder && Value <= 270)
1447 }
1448
1449 template <unsigned RegClassID> bool isGPR64() const {
1450 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1451 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1452 }
1453
1454 template <unsigned RegClassID, int ExtWidth>
1455 DiagnosticPredicate isGPR64WithShiftExtend() const {
1456 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1458
1459 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1460 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1463 }
1464
1465 /// Is this a vector list with the type implicit (presumably attached to the
1466 /// instruction itself)?
1467 template <RegKind VectorKind, unsigned NumRegs, bool IsConsecutive = false>
1468 bool isImplicitlyTypedVectorList() const {
1469 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1470 VectorList.NumElements == 0 &&
1471 VectorList.RegisterKind == VectorKind &&
1472 (!IsConsecutive || (VectorList.Stride == 1));
1473 }
1474
1475 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1476 unsigned ElementWidth, unsigned Stride = 1>
1477 bool isTypedVectorList() const {
1478 if (Kind != k_VectorList)
1479 return false;
1480 if (VectorList.Count != NumRegs)
1481 return false;
1482 if (VectorList.RegisterKind != VectorKind)
1483 return false;
1484 if (VectorList.ElementWidth != ElementWidth)
1485 return false;
1486 if (VectorList.Stride != Stride)
1487 return false;
1488 return VectorList.NumElements == NumElements;
1489 }
1490
1491 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1492 unsigned ElementWidth, unsigned RegClass>
1493 DiagnosticPredicate isTypedVectorListMultiple() const {
1494 bool Res =
1495 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1496 if (!Res)
1498 if (!AArch64MCRegisterClasses[RegClass].contains(VectorList.RegNum))
1501 }
1502
1503 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1504 unsigned ElementWidth>
1505 DiagnosticPredicate isTypedVectorListStrided() const {
1506 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1507 ElementWidth, Stride>();
1508 if (!Res)
1510 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1511 ((VectorList.RegNum >= AArch64::Z16) &&
1512 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1515 }
1516
1517 template <int Min, int Max>
1518 DiagnosticPredicate isVectorIndex() const {
1519 if (Kind != k_VectorIndex)
1521 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1524 }
1525
1526 bool isToken() const override { return Kind == k_Token; }
1527
1528 bool isTokenEqual(StringRef Str) const {
1529 return Kind == k_Token && getToken() == Str;
1530 }
1531 bool isSysCR() const { return Kind == k_SysCR; }
1532 bool isPrefetch() const { return Kind == k_Prefetch; }
1533 bool isPSBHint() const { return Kind == k_PSBHint; }
1534 bool isPHint() const { return Kind == k_PHint; }
1535 bool isBTIHint() const { return Kind == k_BTIHint; }
1536 bool isCMHPriorityHint() const { return Kind == k_CMHPriorityHint; }
1537 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1538 bool isShifter() const {
1539 if (!isShiftExtend())
1540 return false;
1541
1542 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1543 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1544 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1545 ST == AArch64_AM::MSL);
1546 }
1547
1548 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1549 if (Kind != k_FPImm)
1551
1552 if (getFPImmIsExact()) {
1553 // Lookup the immediate from table of supported immediates.
1554 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1555 assert(Desc && "Unknown enum value");
1556
1557 // Calculate its FP value.
1558 APFloat RealVal(APFloat::IEEEdouble());
1559 auto StatusOrErr =
1560 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1561 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1562 llvm_unreachable("FP immediate is not exact");
1563
1564 if (getFPImm().bitwiseIsEqual(RealVal))
1566 }
1567
1569 }
1570
1571 template <unsigned ImmA, unsigned ImmB>
1572 DiagnosticPredicate isExactFPImm() const {
1573 DiagnosticPredicate Res = DiagnosticPredicate::NoMatch;
1574 if ((Res = isExactFPImm<ImmA>()))
1576 if ((Res = isExactFPImm<ImmB>()))
1578 return Res;
1579 }
1580
1581 bool isExtend() const {
1582 if (!isShiftExtend())
1583 return false;
1584
1585 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1586 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1587 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1588 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1589 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1590 ET == AArch64_AM::LSL) &&
1591 getShiftExtendAmount() <= 4;
1592 }
1593
1594 bool isExtend64() const {
1595 if (!isExtend())
1596 return false;
1597 // Make sure the extend expects a 32-bit source register.
1598 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1599 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1600 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1601 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1602 }
1603
1604 bool isExtendLSL64() const {
1605 if (!isExtend())
1606 return false;
1607 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1608 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1609 ET == AArch64_AM::LSL) &&
1610 getShiftExtendAmount() <= 4;
1611 }
1612
1613 bool isLSLImm3Shift() const {
1614 if (!isShiftExtend())
1615 return false;
1616 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1617 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1618 }
1619
1620 template<int Width> bool isMemXExtend() const {
1621 if (!isExtend())
1622 return false;
1623 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1624 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1625 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1626 getShiftExtendAmount() == 0);
1627 }
1628
1629 template<int Width> bool isMemWExtend() const {
1630 if (!isExtend())
1631 return false;
1632 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1633 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1634 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1635 getShiftExtendAmount() == 0);
1636 }
1637
1638 template <unsigned width>
1639 bool isArithmeticShifter() const {
1640 if (!isShifter())
1641 return false;
1642
1643 // An arithmetic shifter is LSL, LSR, or ASR.
1644 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1645 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1646 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1647 }
1648
1649 template <unsigned width>
1650 bool isLogicalShifter() const {
1651 if (!isShifter())
1652 return false;
1653
1654 // A logical shifter is LSL, LSR, ASR or ROR.
1655 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1656 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1657 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1658 getShiftExtendAmount() < width;
1659 }
1660
1661 bool isMovImm32Shifter() const {
1662 if (!isShifter())
1663 return false;
1664
1665 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1666 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1667 if (ST != AArch64_AM::LSL)
1668 return false;
1669 uint64_t Val = getShiftExtendAmount();
1670 return (Val == 0 || Val == 16);
1671 }
1672
1673 bool isMovImm64Shifter() const {
1674 if (!isShifter())
1675 return false;
1676
1677 // A MOVi shifter is LSL of 0 or 16.
1678 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1679 if (ST != AArch64_AM::LSL)
1680 return false;
1681 uint64_t Val = getShiftExtendAmount();
1682 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1683 }
1684
1685 bool isLogicalVecShifter() const {
1686 if (!isShifter())
1687 return false;
1688
1689 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1690 unsigned Shift = getShiftExtendAmount();
1691 return getShiftExtendType() == AArch64_AM::LSL &&
1692 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1693 }
1694
1695 bool isLogicalVecHalfWordShifter() const {
1696 if (!isLogicalVecShifter())
1697 return false;
1698
1699 // A logical vector shifter is a left shift by 0 or 8.
1700 unsigned Shift = getShiftExtendAmount();
1701 return getShiftExtendType() == AArch64_AM::LSL &&
1702 (Shift == 0 || Shift == 8);
1703 }
1704
1705 bool isMoveVecShifter() const {
1706 if (!isShiftExtend())
1707 return false;
1708
1709 // A logical vector shifter is a left shift by 8 or 16.
1710 unsigned Shift = getShiftExtendAmount();
1711 return getShiftExtendType() == AArch64_AM::MSL &&
1712 (Shift == 8 || Shift == 16);
1713 }
1714
1715 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1716 // to LDUR/STUR when the offset is not legal for the former but is for
1717 // the latter. As such, in addition to checking for being a legal unscaled
1718 // address, also check that it is not a legal scaled address. This avoids
1719 // ambiguity in the matcher.
1720 template<int Width>
1721 bool isSImm9OffsetFB() const {
1722 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1723 }
1724
1725 bool isAdrpLabel() const {
1726 // Validation was handled during parsing, so we just verify that
1727 // something didn't go haywire.
1728 if (!isImm())
1729 return false;
1730
1731 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1732 int64_t Val = CE->getValue();
1733 int64_t Min = - (4096 * (1LL << (21 - 1)));
1734 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1735 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1736 }
1737
1738 return true;
1739 }
1740
1741 bool isAdrLabel() const {
1742 // Validation was handled during parsing, so we just verify that
1743 // something didn't go haywire.
1744 if (!isImm())
1745 return false;
1746
1747 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1748 int64_t Val = CE->getValue();
1749 int64_t Min = - (1LL << (21 - 1));
1750 int64_t Max = ((1LL << (21 - 1)) - 1);
1751 return Val >= Min && Val <= Max;
1752 }
1753
1754 return true;
1755 }
1756
1757 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1758 DiagnosticPredicate isMatrixRegOperand() const {
1759 if (!isMatrix())
1761 if (getMatrixKind() != Kind ||
1762 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1763 EltSize != getMatrixElementWidth())
1766 }
1767
1768 bool isPAuthPCRelLabel16Operand() const {
1769 // PAuth PCRel16 operands are similar to regular branch targets, but only
1770 // negative values are allowed for concrete immediates as signing instr
1771 // should be in a lower address.
1772 if (!isImm())
1773 return false;
1774 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1775 if (!MCE)
1776 return true;
1777 int64_t Val = MCE->getValue();
1778 if (Val & 0b11)
1779 return false;
1780 return (Val <= 0) && (Val > -(1 << 18));
1781 }
1782
1783 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1784 // Add as immediates when possible. Null MCExpr = 0.
1785 if (!Expr)
1787 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1788 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1789 else
1791 }
1792
1793 void addRegOperands(MCInst &Inst, unsigned N) const {
1794 assert(N == 1 && "Invalid number of operands!");
1796 }
1797
1798 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1799 assert(N == 1 && "Invalid number of operands!");
1800 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1801 }
1802
1803 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1804 assert(N == 1 && "Invalid number of operands!");
1805 assert(
1806 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1807
1808 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1809 MCRegister Reg = RI->getRegClass(AArch64::GPR32RegClassID)
1811
1813 }
1814
1815 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1816 assert(N == 1 && "Invalid number of operands!");
1817 assert(
1818 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1819
1820 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1821 MCRegister Reg = RI->getRegClass(AArch64::GPR64RegClassID)
1823
1825 }
1826
1827 template <int Width>
1828 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1829 unsigned Base;
1830 switch (Width) {
1831 case 8: Base = AArch64::B0; break;
1832 case 16: Base = AArch64::H0; break;
1833 case 32: Base = AArch64::S0; break;
1834 case 64: Base = AArch64::D0; break;
1835 case 128: Base = AArch64::Q0; break;
1836 default:
1837 llvm_unreachable("Unsupported width");
1838 }
1839 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1840 }
1841
1842 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1843 assert(N == 1 && "Invalid number of operands!");
1844 unsigned Reg = getReg();
1845 // Normalise to PPR
1846 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1847 Reg = Reg - AArch64::PN0 + AArch64::P0;
1849 }
1850
1851 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1852 assert(N == 1 && "Invalid number of operands!");
1853 Inst.addOperand(
1854 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1855 }
1856
1857 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1858 assert(N == 1 && "Invalid number of operands!");
1859 assert(
1860 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1861 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1862 }
1863
1864 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1865 assert(N == 1 && "Invalid number of operands!");
1866 assert(
1867 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1869 }
1870
1871 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1872 assert(N == 1 && "Invalid number of operands!");
1874 }
1875
1876 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1877 assert(N == 1 && "Invalid number of operands!");
1879 }
1880
1881 enum VecListIndexType {
1882 VecListIdx_DReg = 0,
1883 VecListIdx_QReg = 1,
1884 VecListIdx_ZReg = 2,
1885 VecListIdx_PReg = 3,
1886 };
1887
1888 template <VecListIndexType RegTy, unsigned NumRegs,
1889 bool IsConsecutive = false>
1890 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1891 assert(N == 1 && "Invalid number of operands!");
1892 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1893 "Expected consecutive registers");
1894 static const unsigned FirstRegs[][5] = {
1895 /* DReg */ { AArch64::Q0,
1896 AArch64::D0, AArch64::D0_D1,
1897 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1898 /* QReg */ { AArch64::Q0,
1899 AArch64::Q0, AArch64::Q0_Q1,
1900 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1901 /* ZReg */ { AArch64::Z0,
1902 AArch64::Z0, AArch64::Z0_Z1,
1903 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1904 /* PReg */ { AArch64::P0,
1905 AArch64::P0, AArch64::P0_P1 }
1906 };
1907
1908 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1909 " NumRegs must be <= 4 for ZRegs");
1910
1911 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1912 " NumRegs must be <= 2 for PRegs");
1913
1914 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1915 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1916 FirstRegs[(unsigned)RegTy][0]));
1917 }
1918
1919 template <unsigned NumRegs>
1920 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1921 assert(N == 1 && "Invalid number of operands!");
1922 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1923
1924 switch (NumRegs) {
1925 case 2:
1926 if (getVectorListStart() < AArch64::Z16) {
1927 assert((getVectorListStart() < AArch64::Z8) &&
1928 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1930 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1931 } else {
1932 assert((getVectorListStart() < AArch64::Z24) &&
1933 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1935 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1936 }
1937 break;
1938 case 4:
1939 if (getVectorListStart() < AArch64::Z16) {
1940 assert((getVectorListStart() < AArch64::Z4) &&
1941 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1943 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1944 } else {
1945 assert((getVectorListStart() < AArch64::Z20) &&
1946 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1948 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1949 }
1950 break;
1951 default:
1952 llvm_unreachable("Unsupported number of registers for strided vec list");
1953 }
1954 }
1955
1956 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1957 assert(N == 1 && "Invalid number of operands!");
1958 unsigned RegMask = getMatrixTileListRegMask();
1959 assert(RegMask <= 0xFF && "Invalid mask!");
1960 Inst.addOperand(MCOperand::createImm(RegMask));
1961 }
1962
1963 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1964 assert(N == 1 && "Invalid number of operands!");
1965 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1966 }
1967
1968 template <unsigned ImmIs0, unsigned ImmIs1>
1969 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1970 assert(N == 1 && "Invalid number of operands!");
1971 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1972 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1973 }
1974
1975 void addImmOperands(MCInst &Inst, unsigned N) const {
1976 assert(N == 1 && "Invalid number of operands!");
1977 // If this is a pageoff symrefexpr with an addend, adjust the addend
1978 // to be only the page-offset portion. Otherwise, just add the expr
1979 // as-is.
1980 addExpr(Inst, getImm());
1981 }
1982
1983 template <int Shift>
1984 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1985 assert(N == 2 && "Invalid number of operands!");
1986 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1987 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1988 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1989 } else if (isShiftedImm()) {
1990 addExpr(Inst, getShiftedImmVal());
1991 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1992 } else {
1993 addExpr(Inst, getImm());
1995 }
1996 }
1997
1998 template <int Shift>
1999 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2000 assert(N == 2 && "Invalid number of operands!");
2001 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2002 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
2003 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
2004 } else
2005 llvm_unreachable("Not a shifted negative immediate");
2006 }
2007
2008 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2009 assert(N == 1 && "Invalid number of operands!");
2011 }
2012
2013 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
2014 assert(N == 1 && "Invalid number of operands!");
2015 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2016 if (!MCE)
2017 addExpr(Inst, getImm());
2018 else
2019 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
2020 }
2021
2022 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2023 addImmOperands(Inst, N);
2024 }
2025
2026 template<int Scale>
2027 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2028 assert(N == 1 && "Invalid number of operands!");
2029 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2030
2031 if (!MCE) {
2033 return;
2034 }
2035 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2036 }
2037
2038 void addUImm6Operands(MCInst &Inst, unsigned N) const {
2039 assert(N == 1 && "Invalid number of operands!");
2040 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2042 }
2043
2044 template <int Scale>
2045 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
2046 assert(N == 1 && "Invalid number of operands!");
2047 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2048 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2049 }
2050
2051 template <int Scale>
2052 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2053 assert(N == 1 && "Invalid number of operands!");
2054 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
2055 }
2056
2057 template <typename T>
2058 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2059 assert(N == 1 && "Invalid number of operands!");
2060 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2061 std::make_unsigned_t<T> Val = MCE->getValue();
2062 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2063 Inst.addOperand(MCOperand::createImm(encoding));
2064 }
2065
2066 template <typename T>
2067 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2068 assert(N == 1 && "Invalid number of operands!");
2069 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2070 std::make_unsigned_t<T> Val = ~MCE->getValue();
2071 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2072 Inst.addOperand(MCOperand::createImm(encoding));
2073 }
2074
2075 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2076 assert(N == 1 && "Invalid number of operands!");
2077 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2078 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
2079 Inst.addOperand(MCOperand::createImm(encoding));
2080 }
2081
2082 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2083 // Branch operands don't encode the low bits, so shift them off
2084 // here. If it's a label, however, just put it on directly as there's
2085 // not enough information now to do anything.
2086 assert(N == 1 && "Invalid number of operands!");
2087 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2088 if (!MCE) {
2089 addExpr(Inst, getImm());
2090 return;
2091 }
2092 assert(MCE && "Invalid constant immediate operand!");
2093 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2094 }
2095
2096 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2097 // PC-relative operands don't encode the low bits, so shift them off
2098 // here. If it's a label, however, just put it on directly as there's
2099 // not enough information now to do anything.
2100 assert(N == 1 && "Invalid number of operands!");
2101 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2102 if (!MCE) {
2103 addExpr(Inst, getImm());
2104 return;
2105 }
2106 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2107 }
2108
2109 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2110 // Branch operands don't encode the low bits, so shift them off
2111 // here. If it's a label, however, just put it on directly as there's
2112 // not enough information now to do anything.
2113 assert(N == 1 && "Invalid number of operands!");
2114 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2115 if (!MCE) {
2116 addExpr(Inst, getImm());
2117 return;
2118 }
2119 assert(MCE && "Invalid constant immediate operand!");
2120 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2121 }
2122
2123 void addPCRelLabel9Operands(MCInst &Inst, unsigned N) const {
2124 // Branch operands don't encode the low bits, so shift them off
2125 // here. If it's a label, however, just put it on directly as there's
2126 // not enough information now to do anything.
2127 assert(N == 1 && "Invalid number of operands!");
2128 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2129 if (!MCE) {
2130 addExpr(Inst, getImm());
2131 return;
2132 }
2133 assert(MCE && "Invalid constant immediate operand!");
2134 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2135 }
2136
2137 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2138 // Branch operands don't encode the low bits, so shift them off
2139 // here. If it's a label, however, just put it on directly as there's
2140 // not enough information now to do anything.
2141 assert(N == 1 && "Invalid number of operands!");
2142 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2143 if (!MCE) {
2144 addExpr(Inst, getImm());
2145 return;
2146 }
2147 assert(MCE && "Invalid constant immediate operand!");
2148 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2149 }
2150
2151 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2152 assert(N == 1 && "Invalid number of operands!");
2154 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2155 }
2156
2157 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2158 assert(N == 1 && "Invalid number of operands!");
2159 Inst.addOperand(MCOperand::createImm(getBarrier()));
2160 }
2161
2162 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2163 assert(N == 1 && "Invalid number of operands!");
2164 Inst.addOperand(MCOperand::createImm(getBarrier()));
2165 }
2166
2167 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2168 assert(N == 1 && "Invalid number of operands!");
2169
2170 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2171 }
2172
2173 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2174 assert(N == 1 && "Invalid number of operands!");
2175
2176 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2177 }
2178
2179 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2180 assert(N == 1 && "Invalid number of operands!");
2181
2182 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2183 }
2184
2185 void addSVCROperands(MCInst &Inst, unsigned N) const {
2186 assert(N == 1 && "Invalid number of operands!");
2187
2188 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2189 }
2190
2191 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2192 assert(N == 1 && "Invalid number of operands!");
2193
2194 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2195 }
2196
2197 void addSysCROperands(MCInst &Inst, unsigned N) const {
2198 assert(N == 1 && "Invalid number of operands!");
2199 Inst.addOperand(MCOperand::createImm(getSysCR()));
2200 }
2201
2202 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2203 assert(N == 1 && "Invalid number of operands!");
2204 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2205 }
2206
2207 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2208 assert(N == 1 && "Invalid number of operands!");
2209 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2210 }
2211
2212 void addPHintOperands(MCInst &Inst, unsigned N) const {
2213 assert(N == 1 && "Invalid number of operands!");
2214 Inst.addOperand(MCOperand::createImm(getPHint()));
2215 }
2216
2217 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2218 assert(N == 1 && "Invalid number of operands!");
2219 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2220 }
2221
2222 void addCMHPriorityHintOperands(MCInst &Inst, unsigned N) const {
2223 assert(N == 1 && "Invalid number of operands!");
2224 Inst.addOperand(MCOperand::createImm(getCMHPriorityHint()));
2225 }
2226
2227 void addShifterOperands(MCInst &Inst, unsigned N) const {
2228 assert(N == 1 && "Invalid number of operands!");
2229 unsigned Imm =
2230 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2232 }
2233
2234 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2235 assert(N == 1 && "Invalid number of operands!");
2236 unsigned Imm = getShiftExtendAmount();
2238 }
2239
2240 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2241 assert(N == 1 && "Invalid number of operands!");
2242
2243 if (!isScalarReg())
2244 return;
2245
2246 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2247 MCRegister Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2249 if (Reg != AArch64::XZR)
2250 llvm_unreachable("wrong register");
2251
2252 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2253 }
2254
2255 void addExtendOperands(MCInst &Inst, unsigned N) const {
2256 assert(N == 1 && "Invalid number of operands!");
2257 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2258 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2259 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2261 }
2262
2263 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2264 assert(N == 1 && "Invalid number of operands!");
2265 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2266 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2267 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2269 }
2270
2271 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2272 assert(N == 2 && "Invalid number of operands!");
2273 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2274 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2275 Inst.addOperand(MCOperand::createImm(IsSigned));
2276 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2277 }
2278
2279 // For 8-bit load/store instructions with a register offset, both the
2280 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2281 // they're disambiguated by whether the shift was explicit or implicit rather
2282 // than its size.
2283 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2284 assert(N == 2 && "Invalid number of operands!");
2285 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2286 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2287 Inst.addOperand(MCOperand::createImm(IsSigned));
2288 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2289 }
2290
2291 template<int Shift>
2292 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2293 assert(N == 1 && "Invalid number of operands!");
2294
2295 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2296 if (CE) {
2297 uint64_t Value = CE->getValue();
2298 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2299 } else {
2300 addExpr(Inst, getImm());
2301 }
2302 }
2303
2304 template<int Shift>
2305 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2306 assert(N == 1 && "Invalid number of operands!");
2307
2308 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2309 uint64_t Value = CE->getValue();
2310 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2311 }
2312
2313 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2314 assert(N == 1 && "Invalid number of operands!");
2315 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2316 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2317 }
2318
2319 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2320 assert(N == 1 && "Invalid number of operands!");
2321 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2322 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2323 }
2324
2325 void print(raw_ostream &OS, const MCAsmInfo &MAI) const override;
2326
2327 static std::unique_ptr<AArch64Operand>
2328 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2329 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2330 Op->Tok.Data = Str.data();
2331 Op->Tok.Length = Str.size();
2332 Op->Tok.IsSuffix = IsSuffix;
2333 Op->StartLoc = S;
2334 Op->EndLoc = S;
2335 return Op;
2336 }
2337
2338 static std::unique_ptr<AArch64Operand>
2339 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2340 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2342 unsigned ShiftAmount = 0,
2343 unsigned HasExplicitAmount = false) {
2344 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2345 Op->Reg.RegNum = RegNum;
2346 Op->Reg.Kind = Kind;
2347 Op->Reg.ElementWidth = 0;
2348 Op->Reg.EqualityTy = EqTy;
2349 Op->Reg.ShiftExtend.Type = ExtTy;
2350 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2351 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2352 Op->StartLoc = S;
2353 Op->EndLoc = E;
2354 return Op;
2355 }
2356
2357 static std::unique_ptr<AArch64Operand>
2358 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2359 SMLoc S, SMLoc E, MCContext &Ctx,
2361 unsigned ShiftAmount = 0,
2362 unsigned HasExplicitAmount = false) {
2363 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2364 Kind == RegKind::SVEPredicateVector ||
2365 Kind == RegKind::SVEPredicateAsCounter) &&
2366 "Invalid vector kind");
2367 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2368 HasExplicitAmount);
2369 Op->Reg.ElementWidth = ElementWidth;
2370 return Op;
2371 }
2372
2373 static std::unique_ptr<AArch64Operand>
2374 CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2375 unsigned NumElements, unsigned ElementWidth,
2376 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2377 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2378 Op->VectorList.RegNum = RegNum;
2379 Op->VectorList.Count = Count;
2380 Op->VectorList.Stride = Stride;
2381 Op->VectorList.NumElements = NumElements;
2382 Op->VectorList.ElementWidth = ElementWidth;
2383 Op->VectorList.RegisterKind = RegisterKind;
2384 Op->StartLoc = S;
2385 Op->EndLoc = E;
2386 return Op;
2387 }
2388
2389 static std::unique_ptr<AArch64Operand>
2390 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2391 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2392 Op->VectorIndex.Val = Idx;
2393 Op->StartLoc = S;
2394 Op->EndLoc = E;
2395 return Op;
2396 }
2397
2398 static std::unique_ptr<AArch64Operand>
2399 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2400 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2401 Op->MatrixTileList.RegMask = RegMask;
2402 Op->StartLoc = S;
2403 Op->EndLoc = E;
2404 return Op;
2405 }
2406
2407 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2408 const unsigned ElementWidth) {
2409 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2410 RegMap = {
2411 {{0, AArch64::ZAB0},
2412 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2413 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2414 {{8, AArch64::ZAB0},
2415 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2416 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2417 {{16, AArch64::ZAH0},
2418 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2419 {{16, AArch64::ZAH1},
2420 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2421 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2422 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2423 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2424 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2425 };
2426
2427 if (ElementWidth == 64)
2428 OutRegs.insert(Reg);
2429 else {
2430 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2431 assert(!Regs.empty() && "Invalid tile or element width!");
2432 OutRegs.insert_range(Regs);
2433 }
2434 }
2435
2436 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2437 SMLoc E, MCContext &Ctx) {
2438 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2439 Op->Imm.Val = Val;
2440 Op->StartLoc = S;
2441 Op->EndLoc = E;
2442 return Op;
2443 }
2444
2445 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2446 unsigned ShiftAmount,
2447 SMLoc S, SMLoc E,
2448 MCContext &Ctx) {
2449 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2450 Op->ShiftedImm .Val = Val;
2451 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2452 Op->StartLoc = S;
2453 Op->EndLoc = E;
2454 return Op;
2455 }
2456
2457 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2458 unsigned Last, SMLoc S,
2459 SMLoc E,
2460 MCContext &Ctx) {
2461 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2462 Op->ImmRange.First = First;
2463 Op->ImmRange.Last = Last;
2464 Op->EndLoc = E;
2465 return Op;
2466 }
2467
2468 static std::unique_ptr<AArch64Operand>
2469 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2470 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2471 Op->CondCode.Code = Code;
2472 Op->StartLoc = S;
2473 Op->EndLoc = E;
2474 return Op;
2475 }
2476
2477 static std::unique_ptr<AArch64Operand>
2478 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2479 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2480 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2481 Op->FPImm.IsExact = IsExact;
2482 Op->StartLoc = S;
2483 Op->EndLoc = S;
2484 return Op;
2485 }
2486
2487 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2488 StringRef Str,
2489 SMLoc S,
2490 MCContext &Ctx,
2491 bool HasnXSModifier) {
2492 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2493 Op->Barrier.Val = Val;
2494 Op->Barrier.Data = Str.data();
2495 Op->Barrier.Length = Str.size();
2496 Op->Barrier.HasnXSModifier = HasnXSModifier;
2497 Op->StartLoc = S;
2498 Op->EndLoc = S;
2499 return Op;
2500 }
2501
2502 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2503 uint32_t MRSReg,
2504 uint32_t MSRReg,
2505 uint32_t PStateField,
2506 MCContext &Ctx) {
2507 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2508 Op->SysReg.Data = Str.data();
2509 Op->SysReg.Length = Str.size();
2510 Op->SysReg.MRSReg = MRSReg;
2511 Op->SysReg.MSRReg = MSRReg;
2512 Op->SysReg.PStateField = PStateField;
2513 Op->StartLoc = S;
2514 Op->EndLoc = S;
2515 return Op;
2516 }
2517
2518 static std::unique_ptr<AArch64Operand>
2519 CreatePHintInst(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2520 auto Op = std::make_unique<AArch64Operand>(k_PHint, Ctx);
2521 Op->PHint.Val = Val;
2522 Op->PHint.Data = Str.data();
2523 Op->PHint.Length = Str.size();
2524 Op->StartLoc = S;
2525 Op->EndLoc = S;
2526 return Op;
2527 }
2528
2529 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2530 SMLoc E, MCContext &Ctx) {
2531 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2532 Op->SysCRImm.Val = Val;
2533 Op->StartLoc = S;
2534 Op->EndLoc = E;
2535 return Op;
2536 }
2537
2538 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2539 StringRef Str,
2540 SMLoc S,
2541 MCContext &Ctx) {
2542 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2543 Op->Prefetch.Val = Val;
2544 Op->Barrier.Data = Str.data();
2545 Op->Barrier.Length = Str.size();
2546 Op->StartLoc = S;
2547 Op->EndLoc = S;
2548 return Op;
2549 }
2550
2551 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2552 StringRef Str,
2553 SMLoc S,
2554 MCContext &Ctx) {
2555 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2556 Op->PSBHint.Val = Val;
2557 Op->PSBHint.Data = Str.data();
2558 Op->PSBHint.Length = Str.size();
2559 Op->StartLoc = S;
2560 Op->EndLoc = S;
2561 return Op;
2562 }
2563
2564 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2565 StringRef Str,
2566 SMLoc S,
2567 MCContext &Ctx) {
2568 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2569 Op->BTIHint.Val = Val | 32;
2570 Op->BTIHint.Data = Str.data();
2571 Op->BTIHint.Length = Str.size();
2572 Op->StartLoc = S;
2573 Op->EndLoc = S;
2574 return Op;
2575 }
2576
2577 static std::unique_ptr<AArch64Operand>
2578 CreateCMHPriorityHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2579 auto Op = std::make_unique<AArch64Operand>(k_CMHPriorityHint, Ctx);
2580 Op->CMHPriorityHint.Val = Val;
2581 Op->CMHPriorityHint.Data = Str.data();
2582 Op->CMHPriorityHint.Length = Str.size();
2583 Op->StartLoc = S;
2584 Op->EndLoc = S;
2585 return Op;
2586 }
2587
2588 static std::unique_ptr<AArch64Operand>
2589 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2590 SMLoc S, SMLoc E, MCContext &Ctx) {
2591 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2592 Op->MatrixReg.RegNum = RegNum;
2593 Op->MatrixReg.ElementWidth = ElementWidth;
2594 Op->MatrixReg.Kind = Kind;
2595 Op->StartLoc = S;
2596 Op->EndLoc = E;
2597 return Op;
2598 }
2599
2600 static std::unique_ptr<AArch64Operand>
2601 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2602 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2603 Op->SVCR.PStateField = PStateField;
2604 Op->SVCR.Data = Str.data();
2605 Op->SVCR.Length = Str.size();
2606 Op->StartLoc = S;
2607 Op->EndLoc = S;
2608 return Op;
2609 }
2610
2611 static std::unique_ptr<AArch64Operand>
2612 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2613 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2614 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2615 Op->ShiftExtend.Type = ShOp;
2616 Op->ShiftExtend.Amount = Val;
2617 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2618 Op->StartLoc = S;
2619 Op->EndLoc = E;
2620 return Op;
2621 }
2622};
2623
2624} // end anonymous namespace.
2625
2626void AArch64Operand::print(raw_ostream &OS, const MCAsmInfo &MAI) const {
2627 switch (Kind) {
2628 case k_FPImm:
2629 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2630 if (!getFPImmIsExact())
2631 OS << " (inexact)";
2632 OS << ">";
2633 break;
2634 case k_Barrier: {
2635 StringRef Name = getBarrierName();
2636 if (!Name.empty())
2637 OS << "<barrier " << Name << ">";
2638 else
2639 OS << "<barrier invalid #" << getBarrier() << ">";
2640 break;
2641 }
2642 case k_Immediate:
2643 MAI.printExpr(OS, *getImm());
2644 break;
2645 case k_ShiftedImm: {
2646 unsigned Shift = getShiftedImmShift();
2647 OS << "<shiftedimm ";
2648 MAI.printExpr(OS, *getShiftedImmVal());
2649 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2650 break;
2651 }
2652 case k_ImmRange: {
2653 OS << "<immrange ";
2654 OS << getFirstImmVal();
2655 OS << ":" << getLastImmVal() << ">";
2656 break;
2657 }
2658 case k_CondCode:
2659 OS << "<condcode " << getCondCode() << ">";
2660 break;
2661 case k_VectorList: {
2662 OS << "<vectorlist ";
2663 unsigned Reg = getVectorListStart();
2664 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2665 OS << Reg + i * getVectorListStride() << " ";
2666 OS << ">";
2667 break;
2668 }
2669 case k_VectorIndex:
2670 OS << "<vectorindex " << getVectorIndex() << ">";
2671 break;
2672 case k_SysReg:
2673 OS << "<sysreg: " << getSysReg() << '>';
2674 break;
2675 case k_Token:
2676 OS << "'" << getToken() << "'";
2677 break;
2678 case k_SysCR:
2679 OS << "c" << getSysCR();
2680 break;
2681 case k_Prefetch: {
2682 StringRef Name = getPrefetchName();
2683 if (!Name.empty())
2684 OS << "<prfop " << Name << ">";
2685 else
2686 OS << "<prfop invalid #" << getPrefetch() << ">";
2687 break;
2688 }
2689 case k_PSBHint:
2690 OS << getPSBHintName();
2691 break;
2692 case k_PHint:
2693 OS << getPHintName();
2694 break;
2695 case k_BTIHint:
2696 OS << getBTIHintName();
2697 break;
2698 case k_CMHPriorityHint:
2699 OS << getCMHPriorityHintName();
2700 break;
2701 case k_MatrixRegister:
2702 OS << "<matrix " << getMatrixReg() << ">";
2703 break;
2704 case k_MatrixTileList: {
2705 OS << "<matrixlist ";
2706 unsigned RegMask = getMatrixTileListRegMask();
2707 unsigned MaxBits = 8;
2708 for (unsigned I = MaxBits; I > 0; --I)
2709 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2710 OS << '>';
2711 break;
2712 }
2713 case k_SVCR: {
2714 OS << getSVCR();
2715 break;
2716 }
2717 case k_Register:
2718 OS << "<register " << getReg() << ">";
2719 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2720 break;
2721 [[fallthrough]];
2722 case k_ShiftExtend:
2723 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2724 << getShiftExtendAmount();
2725 if (!hasShiftExtendAmount())
2726 OS << "<imp>";
2727 OS << '>';
2728 break;
2729 }
2730}
2731
2732/// @name Auto-generated Match Functions
2733/// {
2734
2736
2737/// }
2738
2739static unsigned MatchNeonVectorRegName(StringRef Name) {
2740 return StringSwitch<unsigned>(Name.lower())
2741 .Case("v0", AArch64::Q0)
2742 .Case("v1", AArch64::Q1)
2743 .Case("v2", AArch64::Q2)
2744 .Case("v3", AArch64::Q3)
2745 .Case("v4", AArch64::Q4)
2746 .Case("v5", AArch64::Q5)
2747 .Case("v6", AArch64::Q6)
2748 .Case("v7", AArch64::Q7)
2749 .Case("v8", AArch64::Q8)
2750 .Case("v9", AArch64::Q9)
2751 .Case("v10", AArch64::Q10)
2752 .Case("v11", AArch64::Q11)
2753 .Case("v12", AArch64::Q12)
2754 .Case("v13", AArch64::Q13)
2755 .Case("v14", AArch64::Q14)
2756 .Case("v15", AArch64::Q15)
2757 .Case("v16", AArch64::Q16)
2758 .Case("v17", AArch64::Q17)
2759 .Case("v18", AArch64::Q18)
2760 .Case("v19", AArch64::Q19)
2761 .Case("v20", AArch64::Q20)
2762 .Case("v21", AArch64::Q21)
2763 .Case("v22", AArch64::Q22)
2764 .Case("v23", AArch64::Q23)
2765 .Case("v24", AArch64::Q24)
2766 .Case("v25", AArch64::Q25)
2767 .Case("v26", AArch64::Q26)
2768 .Case("v27", AArch64::Q27)
2769 .Case("v28", AArch64::Q28)
2770 .Case("v29", AArch64::Q29)
2771 .Case("v30", AArch64::Q30)
2772 .Case("v31", AArch64::Q31)
2773 .Default(0);
2774}
2775
2776/// Returns an optional pair of (#elements, element-width) if Suffix
2777/// is a valid vector kind. Where the number of elements in a vector
2778/// or the vector width is implicit or explicitly unknown (but still a
2779/// valid suffix kind), 0 is used.
2780static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2781 RegKind VectorKind) {
2782 std::pair<int, int> Res = {-1, -1};
2783
2784 switch (VectorKind) {
2785 case RegKind::NeonVector:
2787 .Case("", {0, 0})
2788 .Case(".1d", {1, 64})
2789 .Case(".1q", {1, 128})
2790 // '.2h' needed for fp16 scalar pairwise reductions
2791 .Case(".2h", {2, 16})
2792 .Case(".2b", {2, 8})
2793 .Case(".2s", {2, 32})
2794 .Case(".2d", {2, 64})
2795 // '.4b' is another special case for the ARMv8.2a dot product
2796 // operand
2797 .Case(".4b", {4, 8})
2798 .Case(".4h", {4, 16})
2799 .Case(".4s", {4, 32})
2800 .Case(".8b", {8, 8})
2801 .Case(".8h", {8, 16})
2802 .Case(".16b", {16, 8})
2803 // Accept the width neutral ones, too, for verbose syntax. If
2804 // those aren't used in the right places, the token operand won't
2805 // match so all will work out.
2806 .Case(".b", {0, 8})
2807 .Case(".h", {0, 16})
2808 .Case(".s", {0, 32})
2809 .Case(".d", {0, 64})
2810 .Default({-1, -1});
2811 break;
2812 case RegKind::SVEPredicateAsCounter:
2813 case RegKind::SVEPredicateVector:
2814 case RegKind::SVEDataVector:
2815 case RegKind::Matrix:
2817 .Case("", {0, 0})
2818 .Case(".b", {0, 8})
2819 .Case(".h", {0, 16})
2820 .Case(".s", {0, 32})
2821 .Case(".d", {0, 64})
2822 .Case(".q", {0, 128})
2823 .Default({-1, -1});
2824 break;
2825 default:
2826 llvm_unreachable("Unsupported RegKind");
2827 }
2828
2829 if (Res == std::make_pair(-1, -1))
2830 return std::nullopt;
2831
2832 return std::optional<std::pair<int, int>>(Res);
2833}
2834
2835static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2836 return parseVectorKind(Suffix, VectorKind).has_value();
2837}
2838
2840 return StringSwitch<unsigned>(Name.lower())
2841 .Case("z0", AArch64::Z0)
2842 .Case("z1", AArch64::Z1)
2843 .Case("z2", AArch64::Z2)
2844 .Case("z3", AArch64::Z3)
2845 .Case("z4", AArch64::Z4)
2846 .Case("z5", AArch64::Z5)
2847 .Case("z6", AArch64::Z6)
2848 .Case("z7", AArch64::Z7)
2849 .Case("z8", AArch64::Z8)
2850 .Case("z9", AArch64::Z9)
2851 .Case("z10", AArch64::Z10)
2852 .Case("z11", AArch64::Z11)
2853 .Case("z12", AArch64::Z12)
2854 .Case("z13", AArch64::Z13)
2855 .Case("z14", AArch64::Z14)
2856 .Case("z15", AArch64::Z15)
2857 .Case("z16", AArch64::Z16)
2858 .Case("z17", AArch64::Z17)
2859 .Case("z18", AArch64::Z18)
2860 .Case("z19", AArch64::Z19)
2861 .Case("z20", AArch64::Z20)
2862 .Case("z21", AArch64::Z21)
2863 .Case("z22", AArch64::Z22)
2864 .Case("z23", AArch64::Z23)
2865 .Case("z24", AArch64::Z24)
2866 .Case("z25", AArch64::Z25)
2867 .Case("z26", AArch64::Z26)
2868 .Case("z27", AArch64::Z27)
2869 .Case("z28", AArch64::Z28)
2870 .Case("z29", AArch64::Z29)
2871 .Case("z30", AArch64::Z30)
2872 .Case("z31", AArch64::Z31)
2873 .Default(0);
2874}
2875
2877 return StringSwitch<unsigned>(Name.lower())
2878 .Case("p0", AArch64::P0)
2879 .Case("p1", AArch64::P1)
2880 .Case("p2", AArch64::P2)
2881 .Case("p3", AArch64::P3)
2882 .Case("p4", AArch64::P4)
2883 .Case("p5", AArch64::P5)
2884 .Case("p6", AArch64::P6)
2885 .Case("p7", AArch64::P7)
2886 .Case("p8", AArch64::P8)
2887 .Case("p9", AArch64::P9)
2888 .Case("p10", AArch64::P10)
2889 .Case("p11", AArch64::P11)
2890 .Case("p12", AArch64::P12)
2891 .Case("p13", AArch64::P13)
2892 .Case("p14", AArch64::P14)
2893 .Case("p15", AArch64::P15)
2894 .Default(0);
2895}
2896
2898 return StringSwitch<unsigned>(Name.lower())
2899 .Case("pn0", AArch64::PN0)
2900 .Case("pn1", AArch64::PN1)
2901 .Case("pn2", AArch64::PN2)
2902 .Case("pn3", AArch64::PN3)
2903 .Case("pn4", AArch64::PN4)
2904 .Case("pn5", AArch64::PN5)
2905 .Case("pn6", AArch64::PN6)
2906 .Case("pn7", AArch64::PN7)
2907 .Case("pn8", AArch64::PN8)
2908 .Case("pn9", AArch64::PN9)
2909 .Case("pn10", AArch64::PN10)
2910 .Case("pn11", AArch64::PN11)
2911 .Case("pn12", AArch64::PN12)
2912 .Case("pn13", AArch64::PN13)
2913 .Case("pn14", AArch64::PN14)
2914 .Case("pn15", AArch64::PN15)
2915 .Default(0);
2916}
2917
2919 return StringSwitch<unsigned>(Name.lower())
2920 .Case("za0.d", AArch64::ZAD0)
2921 .Case("za1.d", AArch64::ZAD1)
2922 .Case("za2.d", AArch64::ZAD2)
2923 .Case("za3.d", AArch64::ZAD3)
2924 .Case("za4.d", AArch64::ZAD4)
2925 .Case("za5.d", AArch64::ZAD5)
2926 .Case("za6.d", AArch64::ZAD6)
2927 .Case("za7.d", AArch64::ZAD7)
2928 .Case("za0.s", AArch64::ZAS0)
2929 .Case("za1.s", AArch64::ZAS1)
2930 .Case("za2.s", AArch64::ZAS2)
2931 .Case("za3.s", AArch64::ZAS3)
2932 .Case("za0.h", AArch64::ZAH0)
2933 .Case("za1.h", AArch64::ZAH1)
2934 .Case("za0.b", AArch64::ZAB0)
2935 .Default(0);
2936}
2937
2938static unsigned matchMatrixRegName(StringRef Name) {
2939 return StringSwitch<unsigned>(Name.lower())
2940 .Case("za", AArch64::ZA)
2941 .Case("za0.q", AArch64::ZAQ0)
2942 .Case("za1.q", AArch64::ZAQ1)
2943 .Case("za2.q", AArch64::ZAQ2)
2944 .Case("za3.q", AArch64::ZAQ3)
2945 .Case("za4.q", AArch64::ZAQ4)
2946 .Case("za5.q", AArch64::ZAQ5)
2947 .Case("za6.q", AArch64::ZAQ6)
2948 .Case("za7.q", AArch64::ZAQ7)
2949 .Case("za8.q", AArch64::ZAQ8)
2950 .Case("za9.q", AArch64::ZAQ9)
2951 .Case("za10.q", AArch64::ZAQ10)
2952 .Case("za11.q", AArch64::ZAQ11)
2953 .Case("za12.q", AArch64::ZAQ12)
2954 .Case("za13.q", AArch64::ZAQ13)
2955 .Case("za14.q", AArch64::ZAQ14)
2956 .Case("za15.q", AArch64::ZAQ15)
2957 .Case("za0.d", AArch64::ZAD0)
2958 .Case("za1.d", AArch64::ZAD1)
2959 .Case("za2.d", AArch64::ZAD2)
2960 .Case("za3.d", AArch64::ZAD3)
2961 .Case("za4.d", AArch64::ZAD4)
2962 .Case("za5.d", AArch64::ZAD5)
2963 .Case("za6.d", AArch64::ZAD6)
2964 .Case("za7.d", AArch64::ZAD7)
2965 .Case("za0.s", AArch64::ZAS0)
2966 .Case("za1.s", AArch64::ZAS1)
2967 .Case("za2.s", AArch64::ZAS2)
2968 .Case("za3.s", AArch64::ZAS3)
2969 .Case("za0.h", AArch64::ZAH0)
2970 .Case("za1.h", AArch64::ZAH1)
2971 .Case("za0.b", AArch64::ZAB0)
2972 .Case("za0h.q", AArch64::ZAQ0)
2973 .Case("za1h.q", AArch64::ZAQ1)
2974 .Case("za2h.q", AArch64::ZAQ2)
2975 .Case("za3h.q", AArch64::ZAQ3)
2976 .Case("za4h.q", AArch64::ZAQ4)
2977 .Case("za5h.q", AArch64::ZAQ5)
2978 .Case("za6h.q", AArch64::ZAQ6)
2979 .Case("za7h.q", AArch64::ZAQ7)
2980 .Case("za8h.q", AArch64::ZAQ8)
2981 .Case("za9h.q", AArch64::ZAQ9)
2982 .Case("za10h.q", AArch64::ZAQ10)
2983 .Case("za11h.q", AArch64::ZAQ11)
2984 .Case("za12h.q", AArch64::ZAQ12)
2985 .Case("za13h.q", AArch64::ZAQ13)
2986 .Case("za14h.q", AArch64::ZAQ14)
2987 .Case("za15h.q", AArch64::ZAQ15)
2988 .Case("za0h.d", AArch64::ZAD0)
2989 .Case("za1h.d", AArch64::ZAD1)
2990 .Case("za2h.d", AArch64::ZAD2)
2991 .Case("za3h.d", AArch64::ZAD3)
2992 .Case("za4h.d", AArch64::ZAD4)
2993 .Case("za5h.d", AArch64::ZAD5)
2994 .Case("za6h.d", AArch64::ZAD6)
2995 .Case("za7h.d", AArch64::ZAD7)
2996 .Case("za0h.s", AArch64::ZAS0)
2997 .Case("za1h.s", AArch64::ZAS1)
2998 .Case("za2h.s", AArch64::ZAS2)
2999 .Case("za3h.s", AArch64::ZAS3)
3000 .Case("za0h.h", AArch64::ZAH0)
3001 .Case("za1h.h", AArch64::ZAH1)
3002 .Case("za0h.b", AArch64::ZAB0)
3003 .Case("za0v.q", AArch64::ZAQ0)
3004 .Case("za1v.q", AArch64::ZAQ1)
3005 .Case("za2v.q", AArch64::ZAQ2)
3006 .Case("za3v.q", AArch64::ZAQ3)
3007 .Case("za4v.q", AArch64::ZAQ4)
3008 .Case("za5v.q", AArch64::ZAQ5)
3009 .Case("za6v.q", AArch64::ZAQ6)
3010 .Case("za7v.q", AArch64::ZAQ7)
3011 .Case("za8v.q", AArch64::ZAQ8)
3012 .Case("za9v.q", AArch64::ZAQ9)
3013 .Case("za10v.q", AArch64::ZAQ10)
3014 .Case("za11v.q", AArch64::ZAQ11)
3015 .Case("za12v.q", AArch64::ZAQ12)
3016 .Case("za13v.q", AArch64::ZAQ13)
3017 .Case("za14v.q", AArch64::ZAQ14)
3018 .Case("za15v.q", AArch64::ZAQ15)
3019 .Case("za0v.d", AArch64::ZAD0)
3020 .Case("za1v.d", AArch64::ZAD1)
3021 .Case("za2v.d", AArch64::ZAD2)
3022 .Case("za3v.d", AArch64::ZAD3)
3023 .Case("za4v.d", AArch64::ZAD4)
3024 .Case("za5v.d", AArch64::ZAD5)
3025 .Case("za6v.d", AArch64::ZAD6)
3026 .Case("za7v.d", AArch64::ZAD7)
3027 .Case("za0v.s", AArch64::ZAS0)
3028 .Case("za1v.s", AArch64::ZAS1)
3029 .Case("za2v.s", AArch64::ZAS2)
3030 .Case("za3v.s", AArch64::ZAS3)
3031 .Case("za0v.h", AArch64::ZAH0)
3032 .Case("za1v.h", AArch64::ZAH1)
3033 .Case("za0v.b", AArch64::ZAB0)
3034 .Default(0);
3035}
3036
3037bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
3038 SMLoc &EndLoc) {
3039 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
3040}
3041
3042ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
3043 SMLoc &EndLoc) {
3044 StartLoc = getLoc();
3045 ParseStatus Res = tryParseScalarRegister(Reg);
3046 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3047 return Res;
3048}
3049
3050// Matches a register name or register alias previously defined by '.req'
3051unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3052 RegKind Kind) {
3053 unsigned RegNum = 0;
3054 if ((RegNum = matchSVEDataVectorRegName(Name)))
3055 return Kind == RegKind::SVEDataVector ? RegNum : 0;
3056
3057 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
3058 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
3059
3060 if ((RegNum = matchSVEPredicateAsCounterRegName(Name)))
3061 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
3062
3063 if ((RegNum = MatchNeonVectorRegName(Name)))
3064 return Kind == RegKind::NeonVector ? RegNum : 0;
3065
3066 if ((RegNum = matchMatrixRegName(Name)))
3067 return Kind == RegKind::Matrix ? RegNum : 0;
3068
3069 if (Name.equals_insensitive("zt0"))
3070 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3071
3072 // The parsed register must be of RegKind Scalar
3073 if ((RegNum = MatchRegisterName(Name)))
3074 return (Kind == RegKind::Scalar) ? RegNum : 0;
3075
3076 if (!RegNum) {
3077 // Handle a few common aliases of registers.
3078 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
3079 .Case("fp", AArch64::FP)
3080 .Case("lr", AArch64::LR)
3081 .Case("x31", AArch64::XZR)
3082 .Case("w31", AArch64::WZR)
3083 .Default(0))
3084 return Kind == RegKind::Scalar ? RegNum : 0;
3085
3086 // Check for aliases registered via .req. Canonicalize to lower case.
3087 // That's more consistent since register names are case insensitive, and
3088 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3089 auto Entry = RegisterReqs.find(Name.lower());
3090 if (Entry == RegisterReqs.end())
3091 return 0;
3092
3093 // set RegNum if the match is the right kind of register
3094 if (Kind == Entry->getValue().first)
3095 RegNum = Entry->getValue().second;
3096 }
3097 return RegNum;
3098}
3099
3100unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3101 switch (K) {
3102 case RegKind::Scalar:
3103 case RegKind::NeonVector:
3104 case RegKind::SVEDataVector:
3105 return 32;
3106 case RegKind::Matrix:
3107 case RegKind::SVEPredicateVector:
3108 case RegKind::SVEPredicateAsCounter:
3109 return 16;
3110 case RegKind::LookupTable:
3111 return 1;
3112 }
3113 llvm_unreachable("Unsupported RegKind");
3114}
3115
3116/// tryParseScalarRegister - Try to parse a register name. The token must be an
3117/// Identifier when called, and if it is a register name the token is eaten and
3118/// the register is added to the operand list.
3119ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3120 const AsmToken &Tok = getTok();
3121 if (Tok.isNot(AsmToken::Identifier))
3122 return ParseStatus::NoMatch;
3123
3124 std::string lowerCase = Tok.getString().lower();
3125 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3126 if (Reg == 0)
3127 return ParseStatus::NoMatch;
3128
3129 RegNum = Reg;
3130 Lex(); // Eat identifier token.
3131 return ParseStatus::Success;
3132}
3133
3134/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3135ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3136 SMLoc S = getLoc();
3137
3138 if (getTok().isNot(AsmToken::Identifier))
3139 return Error(S, "Expected cN operand where 0 <= N <= 15");
3140
3141 StringRef Tok = getTok().getIdentifier();
3142 if (Tok[0] != 'c' && Tok[0] != 'C')
3143 return Error(S, "Expected cN operand where 0 <= N <= 15");
3144
3145 uint32_t CRNum;
3146 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3147 if (BadNum || CRNum > 15)
3148 return Error(S, "Expected cN operand where 0 <= N <= 15");
3149
3150 Lex(); // Eat identifier token.
3151 Operands.push_back(
3152 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3153 return ParseStatus::Success;
3154}
3155
3156// Either an identifier for named values or a 6-bit immediate.
3157ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3158 SMLoc S = getLoc();
3159 const AsmToken &Tok = getTok();
3160
3161 unsigned MaxVal = 63;
3162
3163 // Immediate case, with optional leading hash:
3164 if (parseOptionalToken(AsmToken::Hash) ||
3165 Tok.is(AsmToken::Integer)) {
3166 const MCExpr *ImmVal;
3167 if (getParser().parseExpression(ImmVal))
3168 return ParseStatus::Failure;
3169
3170 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3171 if (!MCE)
3172 return TokError("immediate value expected for prefetch operand");
3173 unsigned prfop = MCE->getValue();
3174 if (prfop > MaxVal)
3175 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3176 "] expected");
3177
3178 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3179 Operands.push_back(AArch64Operand::CreatePrefetch(
3180 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3181 return ParseStatus::Success;
3182 }
3183
3184 if (Tok.isNot(AsmToken::Identifier))
3185 return TokError("prefetch hint expected");
3186
3187 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3188 if (!RPRFM)
3189 return TokError("prefetch hint expected");
3190
3191 Operands.push_back(AArch64Operand::CreatePrefetch(
3192 RPRFM->Encoding, Tok.getString(), S, getContext()));
3193 Lex(); // Eat identifier token.
3194 return ParseStatus::Success;
3195}
3196
3197/// tryParsePrefetch - Try to parse a prefetch operand.
3198template <bool IsSVEPrefetch>
3199ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3200 SMLoc S = getLoc();
3201 const AsmToken &Tok = getTok();
3202
3203 auto LookupByName = [](StringRef N) {
3204 if (IsSVEPrefetch) {
3205 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3206 return std::optional<unsigned>(Res->Encoding);
3207 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3208 return std::optional<unsigned>(Res->Encoding);
3209 return std::optional<unsigned>();
3210 };
3211
3212 auto LookupByEncoding = [](unsigned E) {
3213 if (IsSVEPrefetch) {
3214 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3215 return std::optional<StringRef>(Res->Name);
3216 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3217 return std::optional<StringRef>(Res->Name);
3218 return std::optional<StringRef>();
3219 };
3220 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3221
3222 // Either an identifier for named values or a 5-bit immediate.
3223 // Eat optional hash.
3224 if (parseOptionalToken(AsmToken::Hash) ||
3225 Tok.is(AsmToken::Integer)) {
3226 const MCExpr *ImmVal;
3227 if (getParser().parseExpression(ImmVal))
3228 return ParseStatus::Failure;
3229
3230 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3231 if (!MCE)
3232 return TokError("immediate value expected for prefetch operand");
3233 unsigned prfop = MCE->getValue();
3234 if (prfop > MaxVal)
3235 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3236 "] expected");
3237
3238 auto PRFM = LookupByEncoding(MCE->getValue());
3239 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3240 S, getContext()));
3241 return ParseStatus::Success;
3242 }
3243
3244 if (Tok.isNot(AsmToken::Identifier))
3245 return TokError("prefetch hint expected");
3246
3247 auto PRFM = LookupByName(Tok.getString());
3248 if (!PRFM)
3249 return TokError("prefetch hint expected");
3250
3251 Operands.push_back(AArch64Operand::CreatePrefetch(
3252 *PRFM, Tok.getString(), S, getContext()));
3253 Lex(); // Eat identifier token.
3254 return ParseStatus::Success;
3255}
3256
3257/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3258ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3259 SMLoc S = getLoc();
3260 const AsmToken &Tok = getTok();
3261 if (Tok.isNot(AsmToken::Identifier))
3262 return TokError("invalid operand for instruction");
3263
3264 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3265 if (!PSB)
3266 return TokError("invalid operand for instruction");
3267
3268 Operands.push_back(AArch64Operand::CreatePSBHint(
3269 PSB->Encoding, Tok.getString(), S, getContext()));
3270 Lex(); // Eat identifier token.
3271 return ParseStatus::Success;
3272}
3273
3274ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3275 SMLoc StartLoc = getLoc();
3276
3277 MCRegister RegNum;
3278
3279 // The case where xzr, xzr is not present is handled by an InstAlias.
3280
3281 auto RegTok = getTok(); // in case we need to backtrack
3282 if (!tryParseScalarRegister(RegNum).isSuccess())
3283 return ParseStatus::NoMatch;
3284
3285 if (RegNum != AArch64::XZR) {
3286 getLexer().UnLex(RegTok);
3287 return ParseStatus::NoMatch;
3288 }
3289
3290 if (parseComma())
3291 return ParseStatus::Failure;
3292
3293 if (!tryParseScalarRegister(RegNum).isSuccess())
3294 return TokError("expected register operand");
3295
3296 if (RegNum != AArch64::XZR)
3297 return TokError("xzr must be followed by xzr");
3298
3299 // We need to push something, since we claim this is an operand in .td.
3300 // See also AArch64AsmParser::parseKeywordOperand.
3301 Operands.push_back(AArch64Operand::CreateReg(
3302 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3303
3304 return ParseStatus::Success;
3305}
3306
3307/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3308ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3309 SMLoc S = getLoc();
3310 const AsmToken &Tok = getTok();
3311 if (Tok.isNot(AsmToken::Identifier))
3312 return TokError("invalid operand for instruction");
3313
3314 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3315 if (!BTI)
3316 return TokError("invalid operand for instruction");
3317
3318 Operands.push_back(AArch64Operand::CreateBTIHint(
3319 BTI->Encoding, Tok.getString(), S, getContext()));
3320 Lex(); // Eat identifier token.
3321 return ParseStatus::Success;
3322}
3323
3324/// tryParseCMHPriorityHint - Try to parse a CMHPriority operand
3325ParseStatus AArch64AsmParser::tryParseCMHPriorityHint(OperandVector &Operands) {
3326 SMLoc S = getLoc();
3327 const AsmToken &Tok = getTok();
3328 if (Tok.isNot(AsmToken::Identifier))
3329 return TokError("invalid operand for instruction");
3330
3331 auto CMHPriority =
3332 AArch64CMHPriorityHint::lookupCMHPriorityHintByName(Tok.getString());
3333 if (!CMHPriority)
3334 return TokError("invalid operand for instruction");
3335
3336 Operands.push_back(AArch64Operand::CreateCMHPriorityHint(
3337 CMHPriority->Encoding, Tok.getString(), S, getContext()));
3338 Lex(); // Eat identifier token.
3339 return ParseStatus::Success;
3340}
3341
3342/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3343/// instruction.
3344ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3345 SMLoc S = getLoc();
3346 const MCExpr *Expr = nullptr;
3347
3348 if (getTok().is(AsmToken::Hash)) {
3349 Lex(); // Eat hash token.
3350 }
3351
3352 if (parseSymbolicImmVal(Expr))
3353 return ParseStatus::Failure;
3354
3355 AArch64::Specifier ELFSpec;
3356 AArch64::Specifier DarwinSpec;
3357 int64_t Addend;
3358 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3359 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3360 // No modifier was specified at all; this is the syntax for an ELF basic
3361 // ADRP relocation (unfortunately).
3362 Expr =
3364 } else if ((DarwinSpec == AArch64::S_MACHO_GOTPAGE ||
3365 DarwinSpec == AArch64::S_MACHO_TLVPPAGE) &&
3366 Addend != 0) {
3367 return Error(S, "gotpage label reference not allowed an addend");
3368 } else if (DarwinSpec != AArch64::S_MACHO_PAGE &&
3369 DarwinSpec != AArch64::S_MACHO_GOTPAGE &&
3370 DarwinSpec != AArch64::S_MACHO_TLVPPAGE &&
3371 ELFSpec != AArch64::S_ABS_PAGE_NC &&
3372 ELFSpec != AArch64::S_GOT_PAGE &&
3373 ELFSpec != AArch64::S_GOT_AUTH_PAGE &&
3374 ELFSpec != AArch64::S_GOT_PAGE_LO15 &&
3375 ELFSpec != AArch64::S_GOTTPREL_PAGE &&
3376 ELFSpec != AArch64::S_TLSDESC_PAGE &&
3377 ELFSpec != AArch64::S_TLSDESC_AUTH_PAGE) {
3378 // The operand must be an @page or @gotpage qualified symbolref.
3379 return Error(S, "page or gotpage label reference expected");
3380 }
3381 }
3382
3383 // We have either a label reference possibly with addend or an immediate. The
3384 // addend is a raw value here. The linker will adjust it to only reference the
3385 // page.
3386 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3387 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3388
3389 return ParseStatus::Success;
3390}
3391
3392/// tryParseAdrLabel - Parse and validate a source label for the ADR
3393/// instruction.
3394ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3395 SMLoc S = getLoc();
3396 const MCExpr *Expr = nullptr;
3397
3398 // Leave anything with a bracket to the default for SVE
3399 if (getTok().is(AsmToken::LBrac))
3400 return ParseStatus::NoMatch;
3401
3402 if (getTok().is(AsmToken::Hash))
3403 Lex(); // Eat hash token.
3404
3405 if (parseSymbolicImmVal(Expr))
3406 return ParseStatus::Failure;
3407
3408 AArch64::Specifier ELFSpec;
3409 AArch64::Specifier DarwinSpec;
3410 int64_t Addend;
3411 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3412 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3413 // No modifier was specified at all; this is the syntax for an ELF basic
3414 // ADR relocation (unfortunately).
3416 } else if (ELFSpec != AArch64::S_GOT_AUTH_PAGE) {
3417 // For tiny code model, we use :got_auth: operator to fill 21-bit imm of
3418 // adr. It's not actually GOT entry page address but the GOT address
3419 // itself - we just share the same variant kind with :got_auth: operator
3420 // applied for adrp.
3421 // TODO: can we somehow get current TargetMachine object to call
3422 // getCodeModel() on it to ensure we are using tiny code model?
3423 return Error(S, "unexpected adr label");
3424 }
3425 }
3426
3427 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3428 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3429 return ParseStatus::Success;
3430}
3431
3432/// tryParseFPImm - A floating point immediate expression operand.
3433template <bool AddFPZeroAsLiteral>
3434ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3435 SMLoc S = getLoc();
3436
3437 bool Hash = parseOptionalToken(AsmToken::Hash);
3438
3439 // Handle negation, as that still comes through as a separate token.
3440 bool isNegative = parseOptionalToken(AsmToken::Minus);
3441
3442 const AsmToken &Tok = getTok();
3443 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3444 if (!Hash)
3445 return ParseStatus::NoMatch;
3446 return TokError("invalid floating point immediate");
3447 }
3448
3449 // Parse hexadecimal representation.
3450 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3451 if (Tok.getIntVal() > 255 || isNegative)
3452 return TokError("encoded floating point value out of range");
3453
3455 Operands.push_back(
3456 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3457 } else {
3458 // Parse FP representation.
3459 APFloat RealVal(APFloat::IEEEdouble());
3460 auto StatusOrErr =
3461 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3462 if (errorToBool(StatusOrErr.takeError()))
3463 return TokError("invalid floating point representation");
3464
3465 if (isNegative)
3466 RealVal.changeSign();
3467
3468 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3469 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3470 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3471 } else
3472 Operands.push_back(AArch64Operand::CreateFPImm(
3473 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3474 }
3475
3476 Lex(); // Eat the token.
3477
3478 return ParseStatus::Success;
3479}
3480
3481/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3482/// a shift suffix, for example '#1, lsl #12'.
3483ParseStatus
3484AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3485 SMLoc S = getLoc();
3486
3487 if (getTok().is(AsmToken::Hash))
3488 Lex(); // Eat '#'
3489 else if (getTok().isNot(AsmToken::Integer))
3490 // Operand should start from # or should be integer, emit error otherwise.
3491 return ParseStatus::NoMatch;
3492
3493 if (getTok().is(AsmToken::Integer) &&
3494 getLexer().peekTok().is(AsmToken::Colon))
3495 return tryParseImmRange(Operands);
3496
3497 const MCExpr *Imm = nullptr;
3498 if (parseSymbolicImmVal(Imm))
3499 return ParseStatus::Failure;
3500 else if (getTok().isNot(AsmToken::Comma)) {
3501 Operands.push_back(
3502 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3503 return ParseStatus::Success;
3504 }
3505
3506 // Eat ','
3507 Lex();
3508 StringRef VecGroup;
3509 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3510 Operands.push_back(
3511 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3512 Operands.push_back(
3513 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3514 return ParseStatus::Success;
3515 }
3516
3517 // The optional operand must be "lsl #N" where N is non-negative.
3518 if (!getTok().is(AsmToken::Identifier) ||
3519 !getTok().getIdentifier().equals_insensitive("lsl"))
3520 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3521
3522 // Eat 'lsl'
3523 Lex();
3524
3525 parseOptionalToken(AsmToken::Hash);
3526
3527 if (getTok().isNot(AsmToken::Integer))
3528 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3529
3530 int64_t ShiftAmount = getTok().getIntVal();
3531
3532 if (ShiftAmount < 0)
3533 return Error(getLoc(), "positive shift amount required");
3534 Lex(); // Eat the number
3535
3536 // Just in case the optional lsl #0 is used for immediates other than zero.
3537 if (ShiftAmount == 0 && Imm != nullptr) {
3538 Operands.push_back(
3539 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3540 return ParseStatus::Success;
3541 }
3542
3543 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3544 getLoc(), getContext()));
3545 return ParseStatus::Success;
3546}
3547
3548/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3549/// suggestion to help common typos.
3551AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3552 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3553 .Case("eq", AArch64CC::EQ)
3554 .Case("ne", AArch64CC::NE)
3555 .Case("cs", AArch64CC::HS)
3556 .Case("hs", AArch64CC::HS)
3557 .Case("cc", AArch64CC::LO)
3558 .Case("lo", AArch64CC::LO)
3559 .Case("mi", AArch64CC::MI)
3560 .Case("pl", AArch64CC::PL)
3561 .Case("vs", AArch64CC::VS)
3562 .Case("vc", AArch64CC::VC)
3563 .Case("hi", AArch64CC::HI)
3564 .Case("ls", AArch64CC::LS)
3565 .Case("ge", AArch64CC::GE)
3566 .Case("lt", AArch64CC::LT)
3567 .Case("gt", AArch64CC::GT)
3568 .Case("le", AArch64CC::LE)
3569 .Case("al", AArch64CC::AL)
3570 .Case("nv", AArch64CC::NV)
3571 // SVE condition code aliases:
3572 .Case("none", AArch64CC::EQ)
3573 .Case("any", AArch64CC::NE)
3574 .Case("nlast", AArch64CC::HS)
3575 .Case("last", AArch64CC::LO)
3576 .Case("first", AArch64CC::MI)
3577 .Case("nfrst", AArch64CC::PL)
3578 .Case("pmore", AArch64CC::HI)
3579 .Case("plast", AArch64CC::LS)
3580 .Case("tcont", AArch64CC::GE)
3581 .Case("tstop", AArch64CC::LT)
3582 .Default(AArch64CC::Invalid);
3583
3584 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3585 Suggestion = "nfrst";
3586
3587 return CC;
3588}
3589
3590/// parseCondCode - Parse a Condition Code operand.
3591bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3592 bool invertCondCode) {
3593 SMLoc S = getLoc();
3594 const AsmToken &Tok = getTok();
3595 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3596
3597 StringRef Cond = Tok.getString();
3598 std::string Suggestion;
3599 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3600 if (CC == AArch64CC::Invalid) {
3601 std::string Msg = "invalid condition code";
3602 if (!Suggestion.empty())
3603 Msg += ", did you mean " + Suggestion + "?";
3604 return TokError(Msg);
3605 }
3606 Lex(); // Eat identifier token.
3607
3608 if (invertCondCode) {
3609 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3610 return TokError("condition codes AL and NV are invalid for this instruction");
3612 }
3613
3614 Operands.push_back(
3615 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3616 return false;
3617}
3618
3619ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3620 const AsmToken &Tok = getTok();
3621 SMLoc S = getLoc();
3622
3623 if (Tok.isNot(AsmToken::Identifier))
3624 return TokError("invalid operand for instruction");
3625
3626 unsigned PStateImm = -1;
3627 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3628 if (!SVCR)
3629 return ParseStatus::NoMatch;
3630 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3631 PStateImm = SVCR->Encoding;
3632
3633 Operands.push_back(
3634 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3635 Lex(); // Eat identifier token.
3636 return ParseStatus::Success;
3637}
3638
3639ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3640 const AsmToken &Tok = getTok();
3641 SMLoc S = getLoc();
3642
3643 StringRef Name = Tok.getString();
3644
3645 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3646 Lex(); // eat "za[.(b|h|s|d)]"
3647 unsigned ElementWidth = 0;
3648 auto DotPosition = Name.find('.');
3649 if (DotPosition != StringRef::npos) {
3650 const auto &KindRes =
3651 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3652 if (!KindRes)
3653 return TokError(
3654 "Expected the register to be followed by element width suffix");
3655 ElementWidth = KindRes->second;
3656 }
3657 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3658 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3659 getContext()));
3660 if (getLexer().is(AsmToken::LBrac)) {
3661 // There's no comma after matrix operand, so we can parse the next operand
3662 // immediately.
3663 if (parseOperand(Operands, false, false))
3664 return ParseStatus::NoMatch;
3665 }
3666 return ParseStatus::Success;
3667 }
3668
3669 // Try to parse matrix register.
3670 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3671 if (!Reg)
3672 return ParseStatus::NoMatch;
3673
3674 size_t DotPosition = Name.find('.');
3675 assert(DotPosition != StringRef::npos && "Unexpected register");
3676
3677 StringRef Head = Name.take_front(DotPosition);
3678 StringRef Tail = Name.drop_front(DotPosition);
3679 StringRef RowOrColumn = Head.take_back();
3680
3681 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3682 .Case("h", MatrixKind::Row)
3683 .Case("v", MatrixKind::Col)
3684 .Default(MatrixKind::Tile);
3685
3686 // Next up, parsing the suffix
3687 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3688 if (!KindRes)
3689 return TokError(
3690 "Expected the register to be followed by element width suffix");
3691 unsigned ElementWidth = KindRes->second;
3692
3693 Lex();
3694
3695 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3696 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3697
3698 if (getLexer().is(AsmToken::LBrac)) {
3699 // There's no comma after matrix operand, so we can parse the next operand
3700 // immediately.
3701 if (parseOperand(Operands, false, false))
3702 return ParseStatus::NoMatch;
3703 }
3704 return ParseStatus::Success;
3705}
3706
3707/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3708/// them if present.
3709ParseStatus
3710AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3711 const AsmToken &Tok = getTok();
3712 std::string LowerID = Tok.getString().lower();
3714 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3715 .Case("lsl", AArch64_AM::LSL)
3716 .Case("lsr", AArch64_AM::LSR)
3717 .Case("asr", AArch64_AM::ASR)
3718 .Case("ror", AArch64_AM::ROR)
3719 .Case("msl", AArch64_AM::MSL)
3720 .Case("uxtb", AArch64_AM::UXTB)
3721 .Case("uxth", AArch64_AM::UXTH)
3722 .Case("uxtw", AArch64_AM::UXTW)
3723 .Case("uxtx", AArch64_AM::UXTX)
3724 .Case("sxtb", AArch64_AM::SXTB)
3725 .Case("sxth", AArch64_AM::SXTH)
3726 .Case("sxtw", AArch64_AM::SXTW)
3727 .Case("sxtx", AArch64_AM::SXTX)
3729
3731 return ParseStatus::NoMatch;
3732
3733 SMLoc S = Tok.getLoc();
3734 Lex();
3735
3736 bool Hash = parseOptionalToken(AsmToken::Hash);
3737
3738 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3739 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3740 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3741 ShOp == AArch64_AM::MSL) {
3742 // We expect a number here.
3743 return TokError("expected #imm after shift specifier");
3744 }
3745
3746 // "extend" type operations don't need an immediate, #0 is implicit.
3747 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3748 Operands.push_back(
3749 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3750 return ParseStatus::Success;
3751 }
3752
3753 // Make sure we do actually have a number, identifier or a parenthesized
3754 // expression.
3755 SMLoc E = getLoc();
3756 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3757 !getTok().is(AsmToken::Identifier))
3758 return Error(E, "expected integer shift amount");
3759
3760 const MCExpr *ImmVal;
3761 if (getParser().parseExpression(ImmVal))
3762 return ParseStatus::Failure;
3763
3764 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3765 if (!MCE)
3766 return Error(E, "expected constant '#imm' after shift specifier");
3767
3768 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3769 Operands.push_back(AArch64Operand::CreateShiftExtend(
3770 ShOp, MCE->getValue(), true, S, E, getContext()));
3771 return ParseStatus::Success;
3772}
3773
3774static const struct Extension {
3775 const char *Name;
3777} ExtensionMap[] = {
3778 {"crc", {AArch64::FeatureCRC}},
3779 {"sm4", {AArch64::FeatureSM4}},
3780 {"sha3", {AArch64::FeatureSHA3}},
3781 {"sha2", {AArch64::FeatureSHA2}},
3782 {"aes", {AArch64::FeatureAES}},
3783 {"crypto", {AArch64::FeatureCrypto}},
3784 {"fp", {AArch64::FeatureFPARMv8}},
3785 {"simd", {AArch64::FeatureNEON}},
3786 {"ras", {AArch64::FeatureRAS}},
3787 {"rasv2", {AArch64::FeatureRASv2}},
3788 {"lse", {AArch64::FeatureLSE}},
3789 {"predres", {AArch64::FeaturePredRes}},
3790 {"predres2", {AArch64::FeatureSPECRES2}},
3791 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3792 {"mte", {AArch64::FeatureMTE}},
3793 {"memtag", {AArch64::FeatureMTE}},
3794 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3795 {"pan", {AArch64::FeaturePAN}},
3796 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3797 {"ccpp", {AArch64::FeatureCCPP}},
3798 {"rcpc", {AArch64::FeatureRCPC}},
3799 {"rng", {AArch64::FeatureRandGen}},
3800 {"sve", {AArch64::FeatureSVE}},
3801 {"sve-b16b16", {AArch64::FeatureSVEB16B16}},
3802 {"sve2", {AArch64::FeatureSVE2}},
3803 {"sve-aes", {AArch64::FeatureSVEAES}},
3804 {"sve2-aes", {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3805 {"sve-sm4", {AArch64::FeatureSVESM4}},
3806 {"sve2-sm4", {AArch64::FeatureAliasSVE2SM4, AArch64::FeatureSVESM4}},
3807 {"sve-sha3", {AArch64::FeatureSVESHA3}},
3808 {"sve2-sha3", {AArch64::FeatureAliasSVE2SHA3, AArch64::FeatureSVESHA3}},
3809 {"sve-bitperm", {AArch64::FeatureSVEBitPerm}},
3810 {"sve2-bitperm",
3811 {AArch64::FeatureAliasSVE2BitPerm, AArch64::FeatureSVEBitPerm,
3812 AArch64::FeatureSVE2}},
3813 {"sve2p1", {AArch64::FeatureSVE2p1}},
3814 {"ls64", {AArch64::FeatureLS64}},
3815 {"xs", {AArch64::FeatureXS}},
3816 {"pauth", {AArch64::FeaturePAuth}},
3817 {"flagm", {AArch64::FeatureFlagM}},
3818 {"rme", {AArch64::FeatureRME}},
3819 {"sme", {AArch64::FeatureSME}},
3820 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3821 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3822 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3823 {"sme2", {AArch64::FeatureSME2}},
3824 {"sme2p1", {AArch64::FeatureSME2p1}},
3825 {"sme-b16b16", {AArch64::FeatureSMEB16B16}},
3826 {"hbc", {AArch64::FeatureHBC}},
3827 {"mops", {AArch64::FeatureMOPS}},
3828 {"mec", {AArch64::FeatureMEC}},
3829 {"the", {AArch64::FeatureTHE}},
3830 {"d128", {AArch64::FeatureD128}},
3831 {"lse128", {AArch64::FeatureLSE128}},
3832 {"ite", {AArch64::FeatureITE}},
3833 {"cssc", {AArch64::FeatureCSSC}},
3834 {"rcpc3", {AArch64::FeatureRCPC3}},
3835 {"gcs", {AArch64::FeatureGCS}},
3836 {"bf16", {AArch64::FeatureBF16}},
3837 {"compnum", {AArch64::FeatureComplxNum}},
3838 {"dotprod", {AArch64::FeatureDotProd}},
3839 {"f32mm", {AArch64::FeatureMatMulFP32}},
3840 {"f64mm", {AArch64::FeatureMatMulFP64}},
3841 {"fp16", {AArch64::FeatureFullFP16}},
3842 {"fp16fml", {AArch64::FeatureFP16FML}},
3843 {"i8mm", {AArch64::FeatureMatMulInt8}},
3844 {"lor", {AArch64::FeatureLOR}},
3845 {"profile", {AArch64::FeatureSPE}},
3846 // "rdma" is the name documented by binutils for the feature, but
3847 // binutils also accepts incomplete prefixes of features, so "rdm"
3848 // works too. Support both spellings here.
3849 {"rdm", {AArch64::FeatureRDM}},
3850 {"rdma", {AArch64::FeatureRDM}},
3851 {"sb", {AArch64::FeatureSB}},
3852 {"ssbs", {AArch64::FeatureSSBS}},
3853 {"tme", {AArch64::FeatureTME}},
3854 {"fp8", {AArch64::FeatureFP8}},
3855 {"faminmax", {AArch64::FeatureFAMINMAX}},
3856 {"fp8fma", {AArch64::FeatureFP8FMA}},
3857 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3858 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3859 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3860 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3861 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3862 {"lut", {AArch64::FeatureLUT}},
3863 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3864 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3865 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3866 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3867 {"cpa", {AArch64::FeatureCPA}},
3868 {"tlbiw", {AArch64::FeatureTLBIW}},
3869 {"pops", {AArch64::FeaturePoPS}},
3870 {"cmpbr", {AArch64::FeatureCMPBR}},
3871 {"f8f32mm", {AArch64::FeatureF8F32MM}},
3872 {"f8f16mm", {AArch64::FeatureF8F16MM}},
3873 {"fprcvt", {AArch64::FeatureFPRCVT}},
3874 {"lsfe", {AArch64::FeatureLSFE}},
3875 {"sme2p2", {AArch64::FeatureSME2p2}},
3876 {"ssve-aes", {AArch64::FeatureSSVE_AES}},
3877 {"sve2p2", {AArch64::FeatureSVE2p2}},
3878 {"sve-aes2", {AArch64::FeatureSVEAES2}},
3879 {"sve-bfscale", {AArch64::FeatureSVEBFSCALE}},
3880 {"sve-f16f32mm", {AArch64::FeatureSVE_F16F32MM}},
3881 {"lsui", {AArch64::FeatureLSUI}},
3882 {"occmo", {AArch64::FeatureOCCMO}},
3883 {"pcdphint", {AArch64::FeaturePCDPHINT}},
3884 {"ssve-bitperm", {AArch64::FeatureSSVE_BitPerm}},
3885 {"sme-mop4", {AArch64::FeatureSME_MOP4}},
3886 {"sme-tmop", {AArch64::FeatureSME_TMOP}},
3887 {"cmh", {AArch64::FeatureCMH}},
3888 {"lscp", {AArch64::FeatureLSCP}},
3889 {"tlbid", {AArch64::FeatureTLBID}},
3890 {"mpamv2", {AArch64::FeatureMPAMv2}},
3891 {"mtetc", {AArch64::FeatureMTETC}},
3892 {"gcie", {AArch64::FeatureGCIE}},
3893 {"sme2p3", {AArch64::FeatureSME2p3}},
3894 {"sve2p3", {AArch64::FeatureSVE2p3}},
3895 {"sve-b16mm", {AArch64::FeatureSVE_B16MM}},
3896 {"f16mm", {AArch64::FeatureF16MM}},
3897 {"f16f32dot", {AArch64::FeatureF16F32DOT}},
3898 {"f16f32mm", {AArch64::FeatureF16F32MM}},
3900
3901static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3902 if (FBS[AArch64::HasV8_0aOps])
3903 Str += "ARMv8a";
3904 if (FBS[AArch64::HasV8_1aOps])
3905 Str += "ARMv8.1a";
3906 else if (FBS[AArch64::HasV8_2aOps])
3907 Str += "ARMv8.2a";
3908 else if (FBS[AArch64::HasV8_3aOps])
3909 Str += "ARMv8.3a";
3910 else if (FBS[AArch64::HasV8_4aOps])
3911 Str += "ARMv8.4a";
3912 else if (FBS[AArch64::HasV8_5aOps])
3913 Str += "ARMv8.5a";
3914 else if (FBS[AArch64::HasV8_6aOps])
3915 Str += "ARMv8.6a";
3916 else if (FBS[AArch64::HasV8_7aOps])
3917 Str += "ARMv8.7a";
3918 else if (FBS[AArch64::HasV8_8aOps])
3919 Str += "ARMv8.8a";
3920 else if (FBS[AArch64::HasV8_9aOps])
3921 Str += "ARMv8.9a";
3922 else if (FBS[AArch64::HasV9_0aOps])
3923 Str += "ARMv9-a";
3924 else if (FBS[AArch64::HasV9_1aOps])
3925 Str += "ARMv9.1a";
3926 else if (FBS[AArch64::HasV9_2aOps])
3927 Str += "ARMv9.2a";
3928 else if (FBS[AArch64::HasV9_3aOps])
3929 Str += "ARMv9.3a";
3930 else if (FBS[AArch64::HasV9_4aOps])
3931 Str += "ARMv9.4a";
3932 else if (FBS[AArch64::HasV9_5aOps])
3933 Str += "ARMv9.5a";
3934 else if (FBS[AArch64::HasV9_6aOps])
3935 Str += "ARMv9.6a";
3936 else if (FBS[AArch64::HasV9_7aOps])
3937 Str += "ARMv9.7a";
3938 else if (FBS[AArch64::HasV8_0rOps])
3939 Str += "ARMv8r";
3940 else {
3941 SmallVector<std::string, 2> ExtMatches;
3942 for (const auto& Ext : ExtensionMap) {
3943 // Use & in case multiple features are enabled
3944 if ((FBS & Ext.Features) != FeatureBitset())
3945 ExtMatches.push_back(Ext.Name);
3946 }
3947 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3948 }
3949}
3950
3951void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3952 SMLoc S) {
3953 const uint16_t Op2 = Encoding & 7;
3954 const uint16_t Cm = (Encoding & 0x78) >> 3;
3955 const uint16_t Cn = (Encoding & 0x780) >> 7;
3956 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3957
3958 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3959
3960 Operands.push_back(
3961 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3962 Operands.push_back(
3963 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3964 Operands.push_back(
3965 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3966 Expr = MCConstantExpr::create(Op2, getContext());
3967 Operands.push_back(
3968 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3969}
3970
3971/// parseSysAlias - The IC, DC, AT, TLBI, MLBI and GIC{R} and GSB instructions
3972/// are simple aliases for the SYS instruction. Parse them specially so that
3973/// we create a SYS MCInst.
3974bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3975 OperandVector &Operands) {
3976 if (Name.contains('.'))
3977 return TokError("invalid operand");
3978
3979 Mnemonic = Name;
3980 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3981
3982 const AsmToken &Tok = getTok();
3983 StringRef Op = Tok.getString();
3984 SMLoc S = Tok.getLoc();
3985 bool ExpectRegister = true;
3986 bool OptionalRegister = false;
3987 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3988
3989 if (Mnemonic == "ic") {
3990 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3991 if (!IC)
3992 return TokError("invalid operand for IC instruction");
3993 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3994 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3996 return TokError(Str);
3997 }
3998 ExpectRegister = IC->NeedsReg;
3999 createSysAlias(IC->Encoding, Operands, S);
4000 } else if (Mnemonic == "dc") {
4001 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
4002 if (!DC)
4003 return TokError("invalid operand for DC instruction");
4004 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
4005 std::string Str("DC " + std::string(DC->Name) + " requires: ");
4007 return TokError(Str);
4008 }
4009 createSysAlias(DC->Encoding, Operands, S);
4010 } else if (Mnemonic == "at") {
4011 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
4012 if (!AT)
4013 return TokError("invalid operand for AT instruction");
4014 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
4015 std::string Str("AT " + std::string(AT->Name) + " requires: ");
4017 return TokError(Str);
4018 }
4019 createSysAlias(AT->Encoding, Operands, S);
4020 } else if (Mnemonic == "tlbi") {
4021 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
4022 if (!TLBI)
4023 return TokError("invalid operand for TLBI instruction");
4024 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
4025 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
4027 return TokError(Str);
4028 }
4029 ExpectRegister = TLBI->NeedsReg;
4030 bool hasTLBID = getSTI().hasFeature(AArch64::FeatureTLBID);
4031 if (hasAll || hasTLBID) {
4032 OptionalRegister = TLBI->OptionalReg;
4033 }
4034 createSysAlias(TLBI->Encoding, Operands, S);
4035 } else if (Mnemonic == "mlbi") {
4036 const AArch64MLBI::MLBI *MLBI = AArch64MLBI::lookupMLBIByName(Op);
4037 if (!MLBI)
4038 return TokError("invalid operand for MLBI instruction");
4039 else if (!MLBI->haveFeatures(getSTI().getFeatureBits())) {
4040 std::string Str("MLBI " + std::string(MLBI->Name) + " requires: ");
4042 return TokError(Str);
4043 }
4044 ExpectRegister = MLBI->NeedsReg;
4045 createSysAlias(MLBI->Encoding, Operands, S);
4046 } else if (Mnemonic == "gic") {
4047 const AArch64GIC::GIC *GIC = AArch64GIC::lookupGICByName(Op);
4048 if (!GIC)
4049 return TokError("invalid operand for GIC instruction");
4050 else if (!GIC->haveFeatures(getSTI().getFeatureBits())) {
4051 std::string Str("GIC " + std::string(GIC->Name) + " requires: ");
4053 return TokError(Str);
4054 }
4055 ExpectRegister = true;
4056 createSysAlias(GIC->Encoding, Operands, S);
4057 } else if (Mnemonic == "gsb") {
4058 const AArch64GSB::GSB *GSB = AArch64GSB::lookupGSBByName(Op);
4059 if (!GSB)
4060 return TokError("invalid operand for GSB instruction");
4061 else if (!GSB->haveFeatures(getSTI().getFeatureBits())) {
4062 std::string Str("GSB " + std::string(GSB->Name) + " requires: ");
4064 return TokError(Str);
4065 }
4066 ExpectRegister = false;
4067 createSysAlias(GSB->Encoding, Operands, S);
4068 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" ||
4069 Mnemonic == "cosp") {
4070
4071 if (Op.lower() != "rctx")
4072 return TokError("invalid operand for prediction restriction instruction");
4073
4074 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
4075 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
4076
4077 if (Mnemonic == "cosp" && !hasSpecres2)
4078 return TokError("COSP requires: predres2");
4079 if (!hasPredres)
4080 return TokError(Mnemonic.upper() + "RCTX requires: predres");
4081
4082 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
4083 : Mnemonic == "dvp" ? 0b101
4084 : Mnemonic == "cosp" ? 0b110
4085 : Mnemonic == "cpp" ? 0b111
4086 : 0;
4087 assert(PRCTX_Op2 &&
4088 "Invalid mnemonic for prediction restriction instruction");
4089 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
4090 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
4091
4092 createSysAlias(Encoding, Operands, S);
4093 }
4094
4095 Lex(); // Eat operand.
4096
4097 bool HasRegister = false;
4098
4099 // Check for the optional register operand.
4100 if (parseOptionalToken(AsmToken::Comma)) {
4101 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
4102 return TokError("expected register operand");
4103 HasRegister = true;
4104 }
4105
4106 if (!OptionalRegister) {
4107 if (ExpectRegister && !HasRegister)
4108 return TokError("specified " + Mnemonic + " op requires a register");
4109 else if (!ExpectRegister && HasRegister)
4110 return TokError("specified " + Mnemonic + " op does not use a register");
4111 }
4112
4113 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4114 return true;
4115
4116 return false;
4117}
4118
4119/// parseSyslAlias - The GICR instructions are simple aliases for
4120/// the SYSL instruction. Parse them specially so that we create a
4121/// SYS MCInst.
4122bool AArch64AsmParser::parseSyslAlias(StringRef Name, SMLoc NameLoc,
4123 OperandVector &Operands) {
4124
4125 Mnemonic = Name;
4126 Operands.push_back(
4127 AArch64Operand::CreateToken("sysl", NameLoc, getContext()));
4128
4129 // Now expect two operands (identifier + register)
4130 SMLoc startLoc = getLoc();
4131 const AsmToken &regTok = getTok();
4132 StringRef reg = regTok.getString();
4133 unsigned RegNum = matchRegisterNameAlias(reg.lower(), RegKind::Scalar);
4134 if (!RegNum)
4135 return TokError("expected register operand");
4136
4137 Operands.push_back(AArch64Operand::CreateReg(
4138 RegNum, RegKind::Scalar, startLoc, getLoc(), getContext(), EqualsReg));
4139
4140 Lex(); // Eat token
4141 if (parseToken(AsmToken::Comma))
4142 return true;
4143
4144 // Check for identifier
4145 const AsmToken &operandTok = getTok();
4146 StringRef Op = operandTok.getString();
4147 SMLoc S2 = operandTok.getLoc();
4148 Lex(); // Eat token
4149
4150 if (Mnemonic == "gicr") {
4151 const AArch64GICR::GICR *GICR = AArch64GICR::lookupGICRByName(Op);
4152 if (!GICR)
4153 return Error(S2, "invalid operand for GICR instruction");
4154 else if (!GICR->haveFeatures(getSTI().getFeatureBits())) {
4155 std::string Str("GICR " + std::string(GICR->Name) + " requires: ");
4157 return Error(S2, Str);
4158 }
4159 createSysAlias(GICR->Encoding, Operands, S2);
4160 }
4161
4162 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4163 return true;
4164
4165 return false;
4166}
4167
4168/// parseSyspAlias - The TLBIP instructions are simple aliases for
4169/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
4170bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4171 OperandVector &Operands) {
4172 if (Name.contains('.'))
4173 return TokError("invalid operand");
4174
4175 Mnemonic = Name;
4176 Operands.push_back(
4177 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
4178
4179 const AsmToken &Tok = getTok();
4180 StringRef Op = Tok.getString();
4181 SMLoc S = Tok.getLoc();
4182
4183 if (Mnemonic == "tlbip") {
4184 bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
4185 if (HasnXSQualifier) {
4186 Op = Op.drop_back(3);
4187 }
4188 const AArch64TLBIP::TLBIP *TLBIPorig = AArch64TLBIP::lookupTLBIPByName(Op);
4189 if (!TLBIPorig)
4190 return TokError("invalid operand for TLBIP instruction");
4191 const AArch64TLBIP::TLBIP TLBIP(
4192 TLBIPorig->Name, TLBIPorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
4193 TLBIPorig->NeedsReg, TLBIPorig->OptionalReg,
4194 HasnXSQualifier
4195 ? TLBIPorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
4196 : TLBIPorig->FeaturesRequired);
4197 if (!TLBIP.haveFeatures(getSTI().getFeatureBits())) {
4198 std::string Name =
4199 std::string(TLBIP.Name) + (HasnXSQualifier ? "nXS" : "");
4200 std::string Str("TLBIP " + Name + " requires: ");
4201 setRequiredFeatureString(TLBIP.getRequiredFeatures(), Str);
4202 return TokError(Str);
4203 }
4204 createSysAlias(TLBIP.Encoding, Operands, S);
4205 }
4206
4207 Lex(); // Eat operand.
4208
4209 if (parseComma())
4210 return true;
4211
4212 if (Tok.isNot(AsmToken::Identifier))
4213 return TokError("expected register identifier");
4214 auto Result = tryParseSyspXzrPair(Operands);
4215 if (Result.isNoMatch())
4216 Result = tryParseGPRSeqPair(Operands);
4217 if (!Result.isSuccess())
4218 return TokError("specified " + Mnemonic +
4219 " op requires a pair of registers");
4220
4221 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4222 return true;
4223
4224 return false;
4225}
4226
4227ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
4228 MCAsmParser &Parser = getParser();
4229 const AsmToken &Tok = getTok();
4230
4231 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
4232 return TokError("'csync' operand expected");
4233 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4234 // Immediate operand.
4235 const MCExpr *ImmVal;
4236 SMLoc ExprLoc = getLoc();
4237 AsmToken IntTok = Tok;
4238 if (getParser().parseExpression(ImmVal))
4239 return ParseStatus::Failure;
4240 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4241 if (!MCE)
4242 return Error(ExprLoc, "immediate value expected for barrier operand");
4243 int64_t Value = MCE->getValue();
4244 if (Mnemonic == "dsb" && Value > 15) {
4245 // This case is a no match here, but it might be matched by the nXS
4246 // variant. Deliberately not unlex the optional '#' as it is not necessary
4247 // to characterize an integer immediate.
4248 Parser.getLexer().UnLex(IntTok);
4249 return ParseStatus::NoMatch;
4250 }
4251 if (Value < 0 || Value > 15)
4252 return Error(ExprLoc, "barrier operand out of range");
4253 auto DB = AArch64DB::lookupDBByEncoding(Value);
4254 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
4255 ExprLoc, getContext(),
4256 false /*hasnXSModifier*/));
4257 return ParseStatus::Success;
4258 }
4259
4260 if (Tok.isNot(AsmToken::Identifier))
4261 return TokError("invalid operand for instruction");
4262
4263 StringRef Operand = Tok.getString();
4264 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4265 auto DB = AArch64DB::lookupDBByName(Operand);
4266 // The only valid named option for ISB is 'sy'
4267 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4268 return TokError("'sy' or #imm operand expected");
4269 // The only valid named option for TSB is 'csync'
4270 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4271 return TokError("'csync' operand expected");
4272 if (!DB && !TSB) {
4273 if (Mnemonic == "dsb") {
4274 // This case is a no match here, but it might be matched by the nXS
4275 // variant.
4276 return ParseStatus::NoMatch;
4277 }
4278 return TokError("invalid barrier option name");
4279 }
4280
4281 Operands.push_back(AArch64Operand::CreateBarrier(
4282 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
4283 getContext(), false /*hasnXSModifier*/));
4284 Lex(); // Consume the option
4285
4286 return ParseStatus::Success;
4287}
4288
4289ParseStatus
4290AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4291 const AsmToken &Tok = getTok();
4292
4293 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4294 if (Mnemonic != "dsb")
4295 return ParseStatus::Failure;
4296
4297 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4298 // Immediate operand.
4299 const MCExpr *ImmVal;
4300 SMLoc ExprLoc = getLoc();
4301 if (getParser().parseExpression(ImmVal))
4302 return ParseStatus::Failure;
4303 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4304 if (!MCE)
4305 return Error(ExprLoc, "immediate value expected for barrier operand");
4306 int64_t Value = MCE->getValue();
4307 // v8.7-A DSB in the nXS variant accepts only the following immediate
4308 // values: 16, 20, 24, 28.
4309 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4310 return Error(ExprLoc, "barrier operand out of range");
4311 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4312 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4313 ExprLoc, getContext(),
4314 true /*hasnXSModifier*/));
4315 return ParseStatus::Success;
4316 }
4317
4318 if (Tok.isNot(AsmToken::Identifier))
4319 return TokError("invalid operand for instruction");
4320
4321 StringRef Operand = Tok.getString();
4322 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4323
4324 if (!DB)
4325 return TokError("invalid barrier option name");
4326
4327 Operands.push_back(
4328 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4329 getContext(), true /*hasnXSModifier*/));
4330 Lex(); // Consume the option
4331
4332 return ParseStatus::Success;
4333}
4334
4335ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4336 const AsmToken &Tok = getTok();
4337
4338 if (Tok.isNot(AsmToken::Identifier))
4339 return ParseStatus::NoMatch;
4340
4341 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4342 return ParseStatus::NoMatch;
4343
4344 int MRSReg, MSRReg;
4345 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4346 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4347 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4348 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4349 } else
4350 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4351
4352 unsigned PStateImm = -1;
4353 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4354 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4355 PStateImm = PState15->Encoding;
4356 if (!PState15) {
4357 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4358 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4359 PStateImm = PState1->Encoding;
4360 }
4361
4362 Operands.push_back(
4363 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4364 PStateImm, getContext()));
4365 Lex(); // Eat identifier
4366
4367 return ParseStatus::Success;
4368}
4369
4370ParseStatus
4371AArch64AsmParser::tryParsePHintInstOperand(OperandVector &Operands) {
4372 SMLoc S = getLoc();
4373 const AsmToken &Tok = getTok();
4374 if (Tok.isNot(AsmToken::Identifier))
4375 return TokError("invalid operand for instruction");
4376
4378 if (!PH)
4379 return TokError("invalid operand for instruction");
4380
4381 Operands.push_back(AArch64Operand::CreatePHintInst(
4382 PH->Encoding, Tok.getString(), S, getContext()));
4383 Lex(); // Eat identifier token.
4384 return ParseStatus::Success;
4385}
4386
4387/// tryParseNeonVectorRegister - Parse a vector register operand.
4388bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4389 if (getTok().isNot(AsmToken::Identifier))
4390 return true;
4391
4392 SMLoc S = getLoc();
4393 // Check for a vector register specifier first.
4394 StringRef Kind;
4395 MCRegister Reg;
4396 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4397 if (!Res.isSuccess())
4398 return true;
4399
4400 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4401 if (!KindRes)
4402 return true;
4403
4404 unsigned ElementWidth = KindRes->second;
4405 Operands.push_back(
4406 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4407 S, getLoc(), getContext()));
4408
4409 // If there was an explicit qualifier, that goes on as a literal text
4410 // operand.
4411 if (!Kind.empty())
4412 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4413
4414 return tryParseVectorIndex(Operands).isFailure();
4415}
4416
4417ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4418 SMLoc SIdx = getLoc();
4419 if (parseOptionalToken(AsmToken::LBrac)) {
4420 const MCExpr *ImmVal;
4421 if (getParser().parseExpression(ImmVal))
4422 return ParseStatus::NoMatch;
4423 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4424 if (!MCE)
4425 return TokError("immediate value expected for vector index");
4426
4427 SMLoc E = getLoc();
4428
4429 if (parseToken(AsmToken::RBrac, "']' expected"))
4430 return ParseStatus::Failure;
4431
4432 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4433 E, getContext()));
4434 return ParseStatus::Success;
4435 }
4436
4437 return ParseStatus::NoMatch;
4438}
4439
4440// tryParseVectorRegister - Try to parse a vector register name with
4441// optional kind specifier. If it is a register specifier, eat the token
4442// and return it.
4443ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4444 StringRef &Kind,
4445 RegKind MatchKind) {
4446 const AsmToken &Tok = getTok();
4447
4448 if (Tok.isNot(AsmToken::Identifier))
4449 return ParseStatus::NoMatch;
4450
4451 StringRef Name = Tok.getString();
4452 // If there is a kind specifier, it's separated from the register name by
4453 // a '.'.
4454 size_t Start = 0, Next = Name.find('.');
4455 StringRef Head = Name.slice(Start, Next);
4456 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4457
4458 if (RegNum) {
4459 if (Next != StringRef::npos) {
4460 Kind = Name.substr(Next);
4461 if (!isValidVectorKind(Kind, MatchKind))
4462 return TokError("invalid vector kind qualifier");
4463 }
4464 Lex(); // Eat the register token.
4465
4466 Reg = RegNum;
4467 return ParseStatus::Success;
4468 }
4469
4470 return ParseStatus::NoMatch;
4471}
4472
4473ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4474 OperandVector &Operands) {
4475 ParseStatus Status =
4476 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4477 if (!Status.isSuccess())
4478 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4479 return Status;
4480}
4481
4482/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4483template <RegKind RK>
4484ParseStatus
4485AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4486 // Check for a SVE predicate register specifier first.
4487 const SMLoc S = getLoc();
4488 StringRef Kind;
4489 MCRegister RegNum;
4490 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4491 if (!Res.isSuccess())
4492 return Res;
4493
4494 const auto &KindRes = parseVectorKind(Kind, RK);
4495 if (!KindRes)
4496 return ParseStatus::NoMatch;
4497
4498 unsigned ElementWidth = KindRes->second;
4499 Operands.push_back(AArch64Operand::CreateVectorReg(
4500 RegNum, RK, ElementWidth, S,
4501 getLoc(), getContext()));
4502
4503 if (getLexer().is(AsmToken::LBrac)) {
4504 if (RK == RegKind::SVEPredicateAsCounter) {
4505 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4506 if (ResIndex.isSuccess())
4507 return ParseStatus::Success;
4508 } else {
4509 // Indexed predicate, there's no comma so try parse the next operand
4510 // immediately.
4511 if (parseOperand(Operands, false, false))
4512 return ParseStatus::NoMatch;
4513 }
4514 }
4515
4516 // Not all predicates are followed by a '/m' or '/z'.
4517 if (getTok().isNot(AsmToken::Slash))
4518 return ParseStatus::Success;
4519
4520 // But when they do they shouldn't have an element type suffix.
4521 if (!Kind.empty())
4522 return Error(S, "not expecting size suffix");
4523
4524 // Add a literal slash as operand
4525 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4526
4527 Lex(); // Eat the slash.
4528
4529 // Zeroing or merging?
4530 auto Pred = getTok().getString().lower();
4531 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4532 return Error(getLoc(), "expecting 'z' predication");
4533
4534 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4535 return Error(getLoc(), "expecting 'm' or 'z' predication");
4536
4537 // Add zero/merge token.
4538 const char *ZM = Pred == "z" ? "z" : "m";
4539 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4540
4541 Lex(); // Eat zero/merge token.
4542 return ParseStatus::Success;
4543}
4544
4545/// parseRegister - Parse a register operand.
4546bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4547 // Try for a Neon vector register.
4548 if (!tryParseNeonVectorRegister(Operands))
4549 return false;
4550
4551 if (tryParseZTOperand(Operands).isSuccess())
4552 return false;
4553
4554 // Otherwise try for a scalar register.
4555 if (tryParseGPROperand<false>(Operands).isSuccess())
4556 return false;
4557
4558 return true;
4559}
4560
4561bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4562 bool HasELFModifier = false;
4563 AArch64::Specifier RefKind;
4564 SMLoc Loc = getLexer().getLoc();
4565 if (parseOptionalToken(AsmToken::Colon)) {
4566 HasELFModifier = true;
4567
4568 if (getTok().isNot(AsmToken::Identifier))
4569 return TokError("expect relocation specifier in operand after ':'");
4570
4571 std::string LowerCase = getTok().getIdentifier().lower();
4572 RefKind = StringSwitch<AArch64::Specifier>(LowerCase)
4573 .Case("lo12", AArch64::S_LO12)
4574 .Case("abs_g3", AArch64::S_ABS_G3)
4575 .Case("abs_g2", AArch64::S_ABS_G2)
4576 .Case("abs_g2_s", AArch64::S_ABS_G2_S)
4577 .Case("abs_g2_nc", AArch64::S_ABS_G2_NC)
4578 .Case("abs_g1", AArch64::S_ABS_G1)
4579 .Case("abs_g1_s", AArch64::S_ABS_G1_S)
4580 .Case("abs_g1_nc", AArch64::S_ABS_G1_NC)
4581 .Case("abs_g0", AArch64::S_ABS_G0)
4582 .Case("abs_g0_s", AArch64::S_ABS_G0_S)
4583 .Case("abs_g0_nc", AArch64::S_ABS_G0_NC)
4584 .Case("prel_g3", AArch64::S_PREL_G3)
4585 .Case("prel_g2", AArch64::S_PREL_G2)
4586 .Case("prel_g2_nc", AArch64::S_PREL_G2_NC)
4587 .Case("prel_g1", AArch64::S_PREL_G1)
4588 .Case("prel_g1_nc", AArch64::S_PREL_G1_NC)
4589 .Case("prel_g0", AArch64::S_PREL_G0)
4590 .Case("prel_g0_nc", AArch64::S_PREL_G0_NC)
4591 .Case("dtprel_g2", AArch64::S_DTPREL_G2)
4592 .Case("dtprel_g1", AArch64::S_DTPREL_G1)
4593 .Case("dtprel_g1_nc", AArch64::S_DTPREL_G1_NC)
4594 .Case("dtprel_g0", AArch64::S_DTPREL_G0)
4595 .Case("dtprel_g0_nc", AArch64::S_DTPREL_G0_NC)
4596 .Case("dtprel_hi12", AArch64::S_DTPREL_HI12)
4597 .Case("dtprel_lo12", AArch64::S_DTPREL_LO12)
4598 .Case("dtprel_lo12_nc", AArch64::S_DTPREL_LO12_NC)
4599 .Case("pg_hi21_nc", AArch64::S_ABS_PAGE_NC)
4600 .Case("tprel_g2", AArch64::S_TPREL_G2)
4601 .Case("tprel_g1", AArch64::S_TPREL_G1)
4602 .Case("tprel_g1_nc", AArch64::S_TPREL_G1_NC)
4603 .Case("tprel_g0", AArch64::S_TPREL_G0)
4604 .Case("tprel_g0_nc", AArch64::S_TPREL_G0_NC)
4605 .Case("tprel_hi12", AArch64::S_TPREL_HI12)
4606 .Case("tprel_lo12", AArch64::S_TPREL_LO12)
4607 .Case("tprel_lo12_nc", AArch64::S_TPREL_LO12_NC)
4608 .Case("tlsdesc_lo12", AArch64::S_TLSDESC_LO12)
4609 .Case("tlsdesc_auth_lo12", AArch64::S_TLSDESC_AUTH_LO12)
4610 .Case("got", AArch64::S_GOT_PAGE)
4611 .Case("gotpage_lo15", AArch64::S_GOT_PAGE_LO15)
4612 .Case("got_lo12", AArch64::S_GOT_LO12)
4613 .Case("got_auth", AArch64::S_GOT_AUTH_PAGE)
4614 .Case("got_auth_lo12", AArch64::S_GOT_AUTH_LO12)
4615 .Case("gottprel", AArch64::S_GOTTPREL_PAGE)
4616 .Case("gottprel_lo12", AArch64::S_GOTTPREL_LO12_NC)
4617 .Case("gottprel_g1", AArch64::S_GOTTPREL_G1)
4618 .Case("gottprel_g0_nc", AArch64::S_GOTTPREL_G0_NC)
4619 .Case("tlsdesc", AArch64::S_TLSDESC_PAGE)
4620 .Case("tlsdesc_auth", AArch64::S_TLSDESC_AUTH_PAGE)
4621 .Case("secrel_lo12", AArch64::S_SECREL_LO12)
4622 .Case("secrel_hi12", AArch64::S_SECREL_HI12)
4623 .Default(AArch64::S_INVALID);
4624
4625 if (RefKind == AArch64::S_INVALID)
4626 return TokError("expect relocation specifier in operand after ':'");
4627
4628 Lex(); // Eat identifier
4629
4630 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4631 return true;
4632 }
4633
4634 if (getParser().parseExpression(ImmVal))
4635 return true;
4636
4637 if (HasELFModifier)
4638 ImmVal = MCSpecifierExpr::create(ImmVal, RefKind, getContext(), Loc);
4639
4640 SMLoc EndLoc;
4641 if (getContext().getAsmInfo()->hasSubsectionsViaSymbols()) {
4642 if (getParser().parseAtSpecifier(ImmVal, EndLoc))
4643 return true;
4644 const MCExpr *Term;
4645 MCBinaryExpr::Opcode Opcode;
4646 if (parseOptionalToken(AsmToken::Plus))
4647 Opcode = MCBinaryExpr::Add;
4648 else if (parseOptionalToken(AsmToken::Minus))
4649 Opcode = MCBinaryExpr::Sub;
4650 else
4651 return false;
4652 if (getParser().parsePrimaryExpr(Term, EndLoc))
4653 return true;
4654 ImmVal = MCBinaryExpr::create(Opcode, ImmVal, Term, getContext());
4655 }
4656
4657 return false;
4658}
4659
4660ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4661 if (getTok().isNot(AsmToken::LCurly))
4662 return ParseStatus::NoMatch;
4663
4664 auto ParseMatrixTile = [this](unsigned &Reg,
4665 unsigned &ElementWidth) -> ParseStatus {
4666 StringRef Name = getTok().getString();
4667 size_t DotPosition = Name.find('.');
4668 if (DotPosition == StringRef::npos)
4669 return ParseStatus::NoMatch;
4670
4671 unsigned RegNum = matchMatrixTileListRegName(Name);
4672 if (!RegNum)
4673 return ParseStatus::NoMatch;
4674
4675 StringRef Tail = Name.drop_front(DotPosition);
4676 const std::optional<std::pair<int, int>> &KindRes =
4677 parseVectorKind(Tail, RegKind::Matrix);
4678 if (!KindRes)
4679 return TokError(
4680 "Expected the register to be followed by element width suffix");
4681 ElementWidth = KindRes->second;
4682 Reg = RegNum;
4683 Lex(); // Eat the register.
4684 return ParseStatus::Success;
4685 };
4686
4687 SMLoc S = getLoc();
4688 auto LCurly = getTok();
4689 Lex(); // Eat left bracket token.
4690
4691 // Empty matrix list
4692 if (parseOptionalToken(AsmToken::RCurly)) {
4693 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4694 /*RegMask=*/0, S, getLoc(), getContext()));
4695 return ParseStatus::Success;
4696 }
4697
4698 // Try parse {za} alias early
4699 if (getTok().getString().equals_insensitive("za")) {
4700 Lex(); // Eat 'za'
4701
4702 if (parseToken(AsmToken::RCurly, "'}' expected"))
4703 return ParseStatus::Failure;
4704
4705 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4706 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4707 return ParseStatus::Success;
4708 }
4709
4710 SMLoc TileLoc = getLoc();
4711
4712 unsigned FirstReg, ElementWidth;
4713 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4714 if (!ParseRes.isSuccess()) {
4715 getLexer().UnLex(LCurly);
4716 return ParseRes;
4717 }
4718
4719 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4720
4721 unsigned PrevReg = FirstReg;
4722
4723 SmallSet<unsigned, 8> DRegs;
4724 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4725
4726 SmallSet<unsigned, 8> SeenRegs;
4727 SeenRegs.insert(FirstReg);
4728
4729 while (parseOptionalToken(AsmToken::Comma)) {
4730 TileLoc = getLoc();
4731 unsigned Reg, NextElementWidth;
4732 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4733 if (!ParseRes.isSuccess())
4734 return ParseRes;
4735
4736 // Element size must match on all regs in the list.
4737 if (ElementWidth != NextElementWidth)
4738 return Error(TileLoc, "mismatched register size suffix");
4739
4740 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4741 Warning(TileLoc, "tile list not in ascending order");
4742
4743 if (SeenRegs.contains(Reg))
4744 Warning(TileLoc, "duplicate tile in list");
4745 else {
4746 SeenRegs.insert(Reg);
4747 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4748 }
4749
4750 PrevReg = Reg;
4751 }
4752
4753 if (parseToken(AsmToken::RCurly, "'}' expected"))
4754 return ParseStatus::Failure;
4755
4756 unsigned RegMask = 0;
4757 for (auto Reg : DRegs)
4758 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4759 RI->getEncodingValue(AArch64::ZAD0));
4760 Operands.push_back(
4761 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4762
4763 return ParseStatus::Success;
4764}
4765
4766template <RegKind VectorKind>
4767ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4768 bool ExpectMatch) {
4769 MCAsmParser &Parser = getParser();
4770 if (!getTok().is(AsmToken::LCurly))
4771 return ParseStatus::NoMatch;
4772
4773 // Wrapper around parse function
4774 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4775 bool NoMatchIsError) -> ParseStatus {
4776 auto RegTok = getTok();
4777 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4778 if (ParseRes.isSuccess()) {
4779 if (parseVectorKind(Kind, VectorKind))
4780 return ParseRes;
4781 llvm_unreachable("Expected a valid vector kind");
4782 }
4783
4784 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4785 RegTok.getString().equals_insensitive("zt0"))
4786 return ParseStatus::NoMatch;
4787
4788 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4789 (ParseRes.isNoMatch() && NoMatchIsError &&
4790 !RegTok.getString().starts_with_insensitive("za")))
4791 return Error(Loc, "vector register expected");
4792
4793 return ParseStatus::NoMatch;
4794 };
4795
4796 unsigned NumRegs = getNumRegsForRegKind(VectorKind);
4797 SMLoc S = getLoc();
4798 auto LCurly = getTok();
4799 Lex(); // Eat left bracket token.
4800
4801 StringRef Kind;
4802 MCRegister FirstReg;
4803 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4804
4805 // Put back the original left bracket if there was no match, so that
4806 // different types of list-operands can be matched (e.g. SVE, Neon).
4807 if (ParseRes.isNoMatch())
4808 Parser.getLexer().UnLex(LCurly);
4809
4810 if (!ParseRes.isSuccess())
4811 return ParseRes;
4812
4813 MCRegister PrevReg = FirstReg;
4814 unsigned Count = 1;
4815
4816 unsigned Stride = 1;
4817 if (parseOptionalToken(AsmToken::Minus)) {
4818 SMLoc Loc = getLoc();
4819 StringRef NextKind;
4820
4821 MCRegister Reg;
4822 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4823 if (!ParseRes.isSuccess())
4824 return ParseRes;
4825
4826 // Any Kind suffices must match on all regs in the list.
4827 if (Kind != NextKind)
4828 return Error(Loc, "mismatched register size suffix");
4829
4830 unsigned Space =
4831 (PrevReg < Reg) ? (Reg - PrevReg) : (NumRegs - (PrevReg - Reg));
4832
4833 if (Space == 0 || Space > 3)
4834 return Error(Loc, "invalid number of vectors");
4835
4836 Count += Space;
4837 }
4838 else {
4839 bool HasCalculatedStride = false;
4840 while (parseOptionalToken(AsmToken::Comma)) {
4841 SMLoc Loc = getLoc();
4842 StringRef NextKind;
4843 MCRegister Reg;
4844 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4845 if (!ParseRes.isSuccess())
4846 return ParseRes;
4847
4848 // Any Kind suffices must match on all regs in the list.
4849 if (Kind != NextKind)
4850 return Error(Loc, "mismatched register size suffix");
4851
4852 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4853 unsigned PrevRegVal =
4854 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4855 if (!HasCalculatedStride) {
4856 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4857 : (NumRegs - (PrevRegVal - RegVal));
4858 HasCalculatedStride = true;
4859 }
4860
4861 // Register must be incremental (with a wraparound at last register).
4862 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4863 return Error(Loc, "registers must have the same sequential stride");
4864
4865 PrevReg = Reg;
4866 ++Count;
4867 }
4868 }
4869
4870 if (parseToken(AsmToken::RCurly, "'}' expected"))
4871 return ParseStatus::Failure;
4872
4873 if (Count > 4)
4874 return Error(S, "invalid number of vectors");
4875
4876 unsigned NumElements = 0;
4877 unsigned ElementWidth = 0;
4878 if (!Kind.empty()) {
4879 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4880 std::tie(NumElements, ElementWidth) = *VK;
4881 }
4882
4883 Operands.push_back(AArch64Operand::CreateVectorList(
4884 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4885 getLoc(), getContext()));
4886
4887 if (getTok().is(AsmToken::LBrac)) {
4888 ParseStatus Res = tryParseVectorIndex(Operands);
4889 if (Res.isFailure())
4890 return ParseStatus::Failure;
4891 return ParseStatus::Success;
4892 }
4893
4894 return ParseStatus::Success;
4895}
4896
4897/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4898bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4899 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4900 if (!ParseRes.isSuccess())
4901 return true;
4902
4903 return tryParseVectorIndex(Operands).isFailure();
4904}
4905
4906ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4907 SMLoc StartLoc = getLoc();
4908
4909 MCRegister RegNum;
4910 ParseStatus Res = tryParseScalarRegister(RegNum);
4911 if (!Res.isSuccess())
4912 return Res;
4913
4914 if (!parseOptionalToken(AsmToken::Comma)) {
4915 Operands.push_back(AArch64Operand::CreateReg(
4916 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4917 return ParseStatus::Success;
4918 }
4919
4920 parseOptionalToken(AsmToken::Hash);
4921
4922 if (getTok().isNot(AsmToken::Integer))
4923 return Error(getLoc(), "index must be absent or #0");
4924
4925 const MCExpr *ImmVal;
4926 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4927 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4928 return Error(getLoc(), "index must be absent or #0");
4929
4930 Operands.push_back(AArch64Operand::CreateReg(
4931 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4932 return ParseStatus::Success;
4933}
4934
4935ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4936 SMLoc StartLoc = getLoc();
4937 const AsmToken &Tok = getTok();
4938 std::string Name = Tok.getString().lower();
4939
4940 unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4941
4942 if (RegNum == 0)
4943 return ParseStatus::NoMatch;
4944
4945 Operands.push_back(AArch64Operand::CreateReg(
4946 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4947 Lex(); // Eat register.
4948
4949 // Check if register is followed by an index
4950 if (parseOptionalToken(AsmToken::LBrac)) {
4951 Operands.push_back(
4952 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4953 const MCExpr *ImmVal;
4954 if (getParser().parseExpression(ImmVal))
4955 return ParseStatus::NoMatch;
4956 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4957 if (!MCE)
4958 return TokError("immediate value expected for vector index");
4959 Operands.push_back(AArch64Operand::CreateImm(
4960 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4961 getLoc(), getContext()));
4962 if (parseOptionalToken(AsmToken::Comma))
4963 if (parseOptionalMulOperand(Operands))
4964 return ParseStatus::Failure;
4965 if (parseToken(AsmToken::RBrac, "']' expected"))
4966 return ParseStatus::Failure;
4967 Operands.push_back(
4968 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4969 }
4970 return ParseStatus::Success;
4971}
4972
4973template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4974ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4975 SMLoc StartLoc = getLoc();
4976
4977 MCRegister RegNum;
4978 ParseStatus Res = tryParseScalarRegister(RegNum);
4979 if (!Res.isSuccess())
4980 return Res;
4981
4982 // No shift/extend is the default.
4983 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4984 Operands.push_back(AArch64Operand::CreateReg(
4985 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4986 return ParseStatus::Success;
4987 }
4988
4989 // Eat the comma
4990 Lex();
4991
4992 // Match the shift
4994 Res = tryParseOptionalShiftExtend(ExtOpnd);
4995 if (!Res.isSuccess())
4996 return Res;
4997
4998 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4999 Operands.push_back(AArch64Operand::CreateReg(
5000 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
5001 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
5002 Ext->hasShiftExtendAmount()));
5003
5004 return ParseStatus::Success;
5005}
5006
5007bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
5008 MCAsmParser &Parser = getParser();
5009
5010 // Some SVE instructions have a decoration after the immediate, i.e.
5011 // "mul vl". We parse them here and add tokens, which must be present in the
5012 // asm string in the tablegen instruction.
5013 bool NextIsVL =
5014 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
5015 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
5016 if (!getTok().getString().equals_insensitive("mul") ||
5017 !(NextIsVL || NextIsHash))
5018 return true;
5019
5020 Operands.push_back(
5021 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
5022 Lex(); // Eat the "mul"
5023
5024 if (NextIsVL) {
5025 Operands.push_back(
5026 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
5027 Lex(); // Eat the "vl"
5028 return false;
5029 }
5030
5031 if (NextIsHash) {
5032 Lex(); // Eat the #
5033 SMLoc S = getLoc();
5034
5035 // Parse immediate operand.
5036 const MCExpr *ImmVal;
5037 if (!Parser.parseExpression(ImmVal))
5038 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
5039 Operands.push_back(AArch64Operand::CreateImm(
5040 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
5041 getContext()));
5042 return false;
5043 }
5044 }
5045
5046 return Error(getLoc(), "expected 'vl' or '#<imm>'");
5047}
5048
5049bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
5050 StringRef &VecGroup) {
5051 MCAsmParser &Parser = getParser();
5052 auto Tok = Parser.getTok();
5053 if (Tok.isNot(AsmToken::Identifier))
5054 return true;
5055
5056 StringRef VG = StringSwitch<StringRef>(Tok.getString().lower())
5057 .Case("vgx2", "vgx2")
5058 .Case("vgx4", "vgx4")
5059 .Default("");
5060
5061 if (VG.empty())
5062 return true;
5063
5064 VecGroup = VG;
5065 Parser.Lex(); // Eat vgx[2|4]
5066 return false;
5067}
5068
5069bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
5070 auto Tok = getTok();
5071 if (Tok.isNot(AsmToken::Identifier))
5072 return true;
5073
5074 auto Keyword = Tok.getString();
5075 Keyword = StringSwitch<StringRef>(Keyword.lower())
5076 .Case("sm", "sm")
5077 .Case("za", "za")
5078 .Default(Keyword);
5079 Operands.push_back(
5080 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
5081
5082 Lex();
5083 return false;
5084}
5085
5086/// parseOperand - Parse a arm instruction operand. For now this parses the
5087/// operand regardless of the mnemonic.
5088bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
5089 bool invertCondCode) {
5090 MCAsmParser &Parser = getParser();
5091
5092 ParseStatus ResTy =
5093 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
5094
5095 // Check if the current operand has a custom associated parser, if so, try to
5096 // custom parse the operand, or fallback to the general approach.
5097 if (ResTy.isSuccess())
5098 return false;
5099 // If there wasn't a custom match, try the generic matcher below. Otherwise,
5100 // there was a match, but an error occurred, in which case, just return that
5101 // the operand parsing failed.
5102 if (ResTy.isFailure())
5103 return true;
5104
5105 // Nothing custom, so do general case parsing.
5106 SMLoc S, E;
5107 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
5108 if (parseOptionalToken(AsmToken::Comma)) {
5109 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
5110 if (!Res.isNoMatch())
5111 return Res.isFailure();
5112 getLexer().UnLex(SavedTok);
5113 }
5114 return false;
5115 };
5116 switch (getLexer().getKind()) {
5117 default: {
5118 SMLoc S = getLoc();
5119 const MCExpr *Expr;
5120 if (parseSymbolicImmVal(Expr))
5121 return Error(S, "invalid operand");
5122
5123 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5124 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
5125 return parseOptionalShiftExtend(getTok());
5126 }
5127 case AsmToken::LBrac: {
5128 Operands.push_back(
5129 AArch64Operand::CreateToken("[", getLoc(), getContext()));
5130 Lex(); // Eat '['
5131
5132 // There's no comma after a '[', so we can parse the next operand
5133 // immediately.
5134 return parseOperand(Operands, false, false);
5135 }
5136 case AsmToken::LCurly: {
5137 if (!parseNeonVectorList(Operands))
5138 return false;
5139
5140 Operands.push_back(
5141 AArch64Operand::CreateToken("{", getLoc(), getContext()));
5142 Lex(); // Eat '{'
5143
5144 // There's no comma after a '{', so we can parse the next operand
5145 // immediately.
5146 return parseOperand(Operands, false, false);
5147 }
5148 case AsmToken::Identifier: {
5149 // See if this is a "VG" decoration used by SME instructions.
5150 StringRef VecGroup;
5151 if (!parseOptionalVGOperand(Operands, VecGroup)) {
5152 Operands.push_back(
5153 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
5154 return false;
5155 }
5156 // If we're expecting a Condition Code operand, then just parse that.
5157 if (isCondCode)
5158 return parseCondCode(Operands, invertCondCode);
5159
5160 // If it's a register name, parse it.
5161 if (!parseRegister(Operands)) {
5162 // Parse an optional shift/extend modifier.
5163 AsmToken SavedTok = getTok();
5164 if (parseOptionalToken(AsmToken::Comma)) {
5165 // The operand after the register may be a label (e.g. ADR/ADRP). Check
5166 // such cases and don't report an error when <label> happens to match a
5167 // shift/extend modifier.
5168 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
5169 /*ParseForAllFeatures=*/true);
5170 if (!Res.isNoMatch())
5171 return Res.isFailure();
5172 Res = tryParseOptionalShiftExtend(Operands);
5173 if (!Res.isNoMatch())
5174 return Res.isFailure();
5175 getLexer().UnLex(SavedTok);
5176 }
5177 return false;
5178 }
5179
5180 // See if this is a "mul vl" decoration or "mul #<int>" operand used
5181 // by SVE instructions.
5182 if (!parseOptionalMulOperand(Operands))
5183 return false;
5184
5185 // If this is a two-word mnemonic, parse its special keyword
5186 // operand as an identifier.
5187 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
5188 Mnemonic == "gcsb")
5189 return parseKeywordOperand(Operands);
5190
5191 // This was not a register so parse other operands that start with an
5192 // identifier (like labels) as expressions and create them as immediates.
5193 const MCExpr *IdVal, *Term;
5194 S = getLoc();
5195 if (getParser().parseExpression(IdVal))
5196 return true;
5197 if (getParser().parseAtSpecifier(IdVal, E))
5198 return true;
5199 std::optional<MCBinaryExpr::Opcode> Opcode;
5200 if (parseOptionalToken(AsmToken::Plus))
5201 Opcode = MCBinaryExpr::Add;
5202 else if (parseOptionalToken(AsmToken::Minus))
5203 Opcode = MCBinaryExpr::Sub;
5204 if (Opcode) {
5205 if (getParser().parsePrimaryExpr(Term, E))
5206 return true;
5207 IdVal = MCBinaryExpr::create(*Opcode, IdVal, Term, getContext());
5208 }
5209 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
5210
5211 // Parse an optional shift/extend modifier.
5212 return parseOptionalShiftExtend(getTok());
5213 }
5214 case AsmToken::Integer:
5215 case AsmToken::Real:
5216 case AsmToken::Hash: {
5217 // #42 -> immediate.
5218 S = getLoc();
5219
5220 parseOptionalToken(AsmToken::Hash);
5221
5222 // Parse a negative sign
5223 bool isNegative = false;
5224 if (getTok().is(AsmToken::Minus)) {
5225 isNegative = true;
5226 // We need to consume this token only when we have a Real, otherwise
5227 // we let parseSymbolicImmVal take care of it
5228 if (Parser.getLexer().peekTok().is(AsmToken::Real))
5229 Lex();
5230 }
5231
5232 // The only Real that should come through here is a literal #0.0 for
5233 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
5234 // so convert the value.
5235 const AsmToken &Tok = getTok();
5236 if (Tok.is(AsmToken::Real)) {
5237 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
5238 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5239 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
5240 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
5241 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
5242 return TokError("unexpected floating point literal");
5243 else if (IntVal != 0 || isNegative)
5244 return TokError("expected floating-point constant #0.0");
5245 Lex(); // Eat the token.
5246
5247 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
5248 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
5249 return false;
5250 }
5251
5252 const MCExpr *ImmVal;
5253 if (parseSymbolicImmVal(ImmVal))
5254 return true;
5255
5256 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5257 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
5258
5259 // Parse an optional shift/extend modifier.
5260 return parseOptionalShiftExtend(Tok);
5261 }
5262 case AsmToken::Equal: {
5263 SMLoc Loc = getLoc();
5264 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5265 return TokError("unexpected token in operand");
5266 Lex(); // Eat '='
5267 const MCExpr *SubExprVal;
5268 if (getParser().parseExpression(SubExprVal))
5269 return true;
5270
5271 if (Operands.size() < 2 ||
5272 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
5273 return Error(Loc, "Only valid when first operand is register");
5274
5275 bool IsXReg =
5276 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5277 Operands[1]->getReg());
5278
5279 MCContext& Ctx = getContext();
5280 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
5281 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
5282 if (isa<MCConstantExpr>(SubExprVal)) {
5283 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
5284 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
5285 while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) {
5286 ShiftAmt += 16;
5287 Imm >>= 16;
5288 }
5289 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
5290 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
5291 Operands.push_back(AArch64Operand::CreateImm(
5292 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
5293 if (ShiftAmt)
5294 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
5295 ShiftAmt, true, S, E, Ctx));
5296 return false;
5297 }
5298 APInt Simm = APInt(64, Imm << ShiftAmt);
5299 // check if the immediate is an unsigned or signed 32-bit int for W regs
5300 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
5301 return Error(Loc, "Immediate too large for register");
5302 }
5303 // If it is a label or an imm that cannot fit in a movz, put it into CP.
5304 const MCExpr *CPLoc =
5305 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
5306 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
5307 return false;
5308 }
5309 }
5310}
5311
5312bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
5313 const MCExpr *Expr = nullptr;
5314 SMLoc L = getLoc();
5315 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5316 return true;
5317 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5318 if (check(!Value, L, "expected constant expression"))
5319 return true;
5320 Out = Value->getValue();
5321 return false;
5322}
5323
5324bool AArch64AsmParser::parseComma() {
5325 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
5326 return true;
5327 // Eat the comma
5328 Lex();
5329 return false;
5330}
5331
5332bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5333 unsigned First, unsigned Last) {
5334 MCRegister Reg;
5335 SMLoc Start, End;
5336 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register"))
5337 return true;
5338
5339 // Special handling for FP and LR; they aren't linearly after x28 in
5340 // the registers enum.
5341 unsigned RangeEnd = Last;
5342 if (Base == AArch64::X0) {
5343 if (Last == AArch64::FP) {
5344 RangeEnd = AArch64::X28;
5345 if (Reg == AArch64::FP) {
5346 Out = 29;
5347 return false;
5348 }
5349 }
5350 if (Last == AArch64::LR) {
5351 RangeEnd = AArch64::X28;
5352 if (Reg == AArch64::FP) {
5353 Out = 29;
5354 return false;
5355 } else if (Reg == AArch64::LR) {
5356 Out = 30;
5357 return false;
5358 }
5359 }
5360 }
5361
5362 if (check(Reg < First || Reg > RangeEnd, Start,
5363 Twine("expected register in range ") +
5366 return true;
5367 Out = Reg - Base;
5368 return false;
5369}
5370
5371bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5372 const MCParsedAsmOperand &Op2) const {
5373 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5374 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5375
5376 if (AOp1.isVectorList() && AOp2.isVectorList())
5377 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5378 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5379 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5380
5381 if (!AOp1.isReg() || !AOp2.isReg())
5382 return false;
5383
5384 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5385 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5386 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5387
5388 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5389 "Testing equality of non-scalar registers not supported");
5390
5391 // Check if a registers match their sub/super register classes.
5392 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5393 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
5394 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5395 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
5396 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5397 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
5398 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5399 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
5400
5401 return false;
5402}
5403
5404/// Parse an AArch64 instruction mnemonic followed by its operands.
5405bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &Info,
5406 StringRef Name, SMLoc NameLoc,
5407 OperandVector &Operands) {
5408 Name = StringSwitch<StringRef>(Name.lower())
5409 .Case("beq", "b.eq")
5410 .Case("bne", "b.ne")
5411 .Case("bhs", "b.hs")
5412 .Case("bcs", "b.cs")
5413 .Case("blo", "b.lo")
5414 .Case("bcc", "b.cc")
5415 .Case("bmi", "b.mi")
5416 .Case("bpl", "b.pl")
5417 .Case("bvs", "b.vs")
5418 .Case("bvc", "b.vc")
5419 .Case("bhi", "b.hi")
5420 .Case("bls", "b.ls")
5421 .Case("bge", "b.ge")
5422 .Case("blt", "b.lt")
5423 .Case("bgt", "b.gt")
5424 .Case("ble", "b.le")
5425 .Case("bal", "b.al")
5426 .Case("bnv", "b.nv")
5427 .Default(Name);
5428
5429 // First check for the AArch64-specific .req directive.
5430 if (getTok().is(AsmToken::Identifier) &&
5431 getTok().getIdentifier().lower() == ".req") {
5432 parseDirectiveReq(Name, NameLoc);
5433 // We always return 'error' for this, as we're done with this
5434 // statement and don't need to match the 'instruction."
5435 return true;
5436 }
5437
5438 // Create the leading tokens for the mnemonic, split by '.' characters.
5439 size_t Start = 0, Next = Name.find('.');
5440 StringRef Head = Name.slice(Start, Next);
5441
5442 // IC, DC, AT, TLBI, MLBI, GIC{R}, GSB and Prediction invalidation
5443 // instructions are aliases for the SYS instruction.
5444 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5445 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp" ||
5446 Head == "mlbi" || Head == "gic" || Head == "gsb")
5447 return parseSysAlias(Head, NameLoc, Operands);
5448
5449 // GICR instructions are aliases for the SYSL instruction.
5450 if (Head == "gicr")
5451 return parseSyslAlias(Head, NameLoc, Operands);
5452
5453 // TLBIP instructions are aliases for the SYSP instruction.
5454 if (Head == "tlbip")
5455 return parseSyspAlias(Head, NameLoc, Operands);
5456
5457 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
5458 Mnemonic = Head;
5459
5460 // Handle condition codes for a branch mnemonic
5461 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5462 Start = Next;
5463 Next = Name.find('.', Start + 1);
5464 Head = Name.slice(Start + 1, Next);
5465
5466 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5467 (Head.data() - Name.data()));
5468 std::string Suggestion;
5469 AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5470 if (CC == AArch64CC::Invalid) {
5471 std::string Msg = "invalid condition code";
5472 if (!Suggestion.empty())
5473 Msg += ", did you mean " + Suggestion + "?";
5474 return Error(SuffixLoc, Msg);
5475 }
5476 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5477 /*IsSuffix=*/true));
5478 Operands.push_back(
5479 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5480 }
5481
5482 // Add the remaining tokens in the mnemonic.
5483 while (Next != StringRef::npos) {
5484 Start = Next;
5485 Next = Name.find('.', Start + 1);
5486 Head = Name.slice(Start, Next);
5487 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5488 (Head.data() - Name.data()) + 1);
5489 Operands.push_back(AArch64Operand::CreateToken(
5490 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5491 }
5492
5493 // Conditional compare instructions have a Condition Code operand, which needs
5494 // to be parsed and an immediate operand created.
5495 bool condCodeFourthOperand =
5496 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5497 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5498 Head == "csinc" || Head == "csinv" || Head == "csneg");
5499
5500 // These instructions are aliases to some of the conditional select
5501 // instructions. However, the condition code is inverted in the aliased
5502 // instruction.
5503 //
5504 // FIXME: Is this the correct way to handle these? Or should the parser
5505 // generate the aliased instructions directly?
5506 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5507 bool condCodeThirdOperand =
5508 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5509
5510 // Read the remaining operands.
5511 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5512
5513 unsigned N = 1;
5514 do {
5515 // Parse and remember the operand.
5516 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5517 (N == 3 && condCodeThirdOperand) ||
5518 (N == 2 && condCodeSecondOperand),
5519 condCodeSecondOperand || condCodeThirdOperand)) {
5520 return true;
5521 }
5522
5523 // After successfully parsing some operands there are three special cases
5524 // to consider (i.e. notional operands not separated by commas). Two are
5525 // due to memory specifiers:
5526 // + An RBrac will end an address for load/store/prefetch
5527 // + An '!' will indicate a pre-indexed operation.
5528 //
5529 // And a further case is '}', which ends a group of tokens specifying the
5530 // SME accumulator array 'ZA' or tile vector, i.e.
5531 //
5532 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5533 //
5534 // It's someone else's responsibility to make sure these tokens are sane
5535 // in the given context!
5536
5537 if (parseOptionalToken(AsmToken::RBrac))
5538 Operands.push_back(
5539 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5540 if (parseOptionalToken(AsmToken::Exclaim))
5541 Operands.push_back(
5542 AArch64Operand::CreateToken("!", getLoc(), getContext()));
5543 if (parseOptionalToken(AsmToken::RCurly))
5544 Operands.push_back(
5545 AArch64Operand::CreateToken("}", getLoc(), getContext()));
5546
5547 ++N;
5548 } while (parseOptionalToken(AsmToken::Comma));
5549 }
5550
5551 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5552 return true;
5553
5554 return false;
5555}
5556
5557static inline bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg) {
5558 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5559 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5560 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5561 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5562 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5563 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5564 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5565}
5566
5567// FIXME: This entire function is a giant hack to provide us with decent
5568// operand range validation/diagnostics until TableGen/MC can be extended
5569// to support autogeneration of this kind of validation.
5570bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5571 SmallVectorImpl<SMLoc> &Loc) {
5572 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5573 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5574
5575 // A prefix only applies to the instruction following it. Here we extract
5576 // prefix information for the next instruction before validating the current
5577 // one so that in the case of failure we don't erroneously continue using the
5578 // current prefix.
5579 PrefixInfo Prefix = NextPrefix;
5580 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5581
5582 // Before validating the instruction in isolation we run through the rules
5583 // applicable when it follows a prefix instruction.
5584 // NOTE: brk & hlt can be prefixed but require no additional validation.
5585 if (Prefix.isActive() &&
5586 (Inst.getOpcode() != AArch64::BRK) &&
5587 (Inst.getOpcode() != AArch64::HLT)) {
5588
5589 // Prefixed instructions must have a destructive operand.
5592 return Error(IDLoc, "instruction is unpredictable when following a"
5593 " movprfx, suggest replacing movprfx with mov");
5594
5595 // Destination operands must match.
5596 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5597 return Error(Loc[0], "instruction is unpredictable when following a"
5598 " movprfx writing to a different destination");
5599
5600 // Destination operand must not be used in any other location.
5601 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5602 if (Inst.getOperand(i).isReg() &&
5603 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5604 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5605 return Error(Loc[0], "instruction is unpredictable when following a"
5606 " movprfx and destination also used as non-destructive"
5607 " source");
5608 }
5609
5610 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5611 if (Prefix.isPredicated()) {
5612 int PgIdx = -1;
5613
5614 // Find the instructions general predicate.
5615 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5616 if (Inst.getOperand(i).isReg() &&
5617 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5618 PgIdx = i;
5619 break;
5620 }
5621
5622 // Instruction must be predicated if the movprfx is predicated.
5623 if (PgIdx == -1 ||
5625 return Error(IDLoc, "instruction is unpredictable when following a"
5626 " predicated movprfx, suggest using unpredicated movprfx");
5627
5628 // Instruction must use same general predicate as the movprfx.
5629 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5630 return Error(IDLoc, "instruction is unpredictable when following a"
5631 " predicated movprfx using a different general predicate");
5632
5633 // Instruction element type must match the movprfx.
5634 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5635 return Error(IDLoc, "instruction is unpredictable when following a"
5636 " predicated movprfx with a different element size");
5637 }
5638 }
5639
5640 // On ARM64EC, only valid registers may be used. Warn against using
5641 // explicitly disallowed registers.
5642 if (IsWindowsArm64EC) {
5643 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
5644 if (Inst.getOperand(i).isReg()) {
5645 MCRegister Reg = Inst.getOperand(i).getReg();
5646 // At this point, vector registers are matched to their
5647 // appropriately sized alias.
5648 if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
5649 (Reg == AArch64::W14 || Reg == AArch64::X14) ||
5650 (Reg == AArch64::W23 || Reg == AArch64::X23) ||
5651 (Reg == AArch64::W24 || Reg == AArch64::X24) ||
5652 (Reg == AArch64::W28 || Reg == AArch64::X28) ||
5653 (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) ||
5654 (Reg >= AArch64::D16 && Reg <= AArch64::D31) ||
5655 (Reg >= AArch64::S16 && Reg <= AArch64::S31) ||
5656 (Reg >= AArch64::H16 && Reg <= AArch64::H31) ||
5657 (Reg >= AArch64::B16 && Reg <= AArch64::B31)) {
5658 Warning(IDLoc, "register " + Twine(RI->getName(Reg)) +
5659 " is disallowed on ARM64EC.");
5660 }
5661 }
5662 }
5663 }
5664
5665 // Check for indexed addressing modes w/ the base register being the
5666 // same as a destination/source register or pair load where
5667 // the Rt == Rt2. All of those are undefined behaviour.
5668 switch (Inst.getOpcode()) {
5669 case AArch64::LDPSWpre:
5670 case AArch64::LDPWpost:
5671 case AArch64::LDPWpre:
5672 case AArch64::LDPXpost:
5673 case AArch64::LDPXpre: {
5674 MCRegister Rt = Inst.getOperand(1).getReg();
5675 MCRegister Rt2 = Inst.getOperand(2).getReg();
5676 MCRegister Rn = Inst.getOperand(3).getReg();
5677 if (RI->isSubRegisterEq(Rn, Rt))
5678 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5679 "is also a destination");
5680 if (RI->isSubRegisterEq(Rn, Rt2))
5681 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5682 "is also a destination");
5683 [[fallthrough]];
5684 }
5685 case AArch64::LDR_ZA:
5686 case AArch64::STR_ZA: {
5687 if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() &&
5688 Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm())
5689 return Error(Loc[1],
5690 "unpredictable instruction, immediate and offset mismatch.");
5691 break;
5692 }
5693 case AArch64::LDPDi:
5694 case AArch64::LDPQi:
5695 case AArch64::LDPSi:
5696 case AArch64::LDPSWi:
5697 case AArch64::LDPWi:
5698 case AArch64::LDPXi: {
5699 MCRegister Rt = Inst.getOperand(0).getReg();
5700 MCRegister Rt2 = Inst.getOperand(1).getReg();
5701 if (Rt == Rt2)
5702 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5703 break;
5704 }
5705 case AArch64::LDPDpost:
5706 case AArch64::LDPDpre:
5707 case AArch64::LDPQpost:
5708 case AArch64::LDPQpre:
5709 case AArch64::LDPSpost:
5710 case AArch64::LDPSpre:
5711 case AArch64::LDPSWpost: {
5712 MCRegister Rt = Inst.getOperand(1).getReg();
5713 MCRegister Rt2 = Inst.getOperand(2).getReg();
5714 if (Rt == Rt2)
5715 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5716 break;
5717 }
5718 case AArch64::STPDpost:
5719 case AArch64::STPDpre:
5720 case AArch64::STPQpost:
5721 case AArch64::STPQpre:
5722 case AArch64::STPSpost:
5723 case AArch64::STPSpre:
5724 case AArch64::STPWpost:
5725 case AArch64::STPWpre:
5726 case AArch64::STPXpost:
5727 case AArch64::STPXpre: {
5728 MCRegister Rt = Inst.getOperand(1).getReg();
5729 MCRegister Rt2 = Inst.getOperand(2).getReg();
5730 MCRegister Rn = Inst.getOperand(3).getReg();
5731 if (RI->isSubRegisterEq(Rn, Rt))
5732 return Error(Loc[0], "unpredictable STP instruction, writeback base "
5733 "is also a source");
5734 if (RI->isSubRegisterEq(Rn, Rt2))
5735 return Error(Loc[1], "unpredictable STP instruction, writeback base "
5736 "is also a source");
5737 break;
5738 }
5739 case AArch64::LDRBBpre:
5740 case AArch64::LDRBpre:
5741 case AArch64::LDRHHpre:
5742 case AArch64::LDRHpre:
5743 case AArch64::LDRSBWpre:
5744 case AArch64::LDRSBXpre:
5745 case AArch64::LDRSHWpre:
5746 case AArch64::LDRSHXpre:
5747 case AArch64::LDRSWpre:
5748 case AArch64::LDRWpre:
5749 case AArch64::LDRXpre:
5750 case AArch64::LDRBBpost:
5751 case AArch64::LDRBpost:
5752 case AArch64::LDRHHpost:
5753 case AArch64::LDRHpost:
5754 case AArch64::LDRSBWpost:
5755 case AArch64::LDRSBXpost:
5756 case AArch64::LDRSHWpost:
5757 case AArch64::LDRSHXpost:
5758 case AArch64::LDRSWpost:
5759 case AArch64::LDRWpost:
5760 case AArch64::LDRXpost: {
5761 MCRegister Rt = Inst.getOperand(1).getReg();
5762 MCRegister Rn = Inst.getOperand(2).getReg();
5763 if (RI->isSubRegisterEq(Rn, Rt))
5764 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5765 "is also a source");
5766 break;
5767 }
5768 case AArch64::STRBBpost:
5769 case AArch64::STRBpost:
5770 case AArch64::STRHHpost:
5771 case AArch64::STRHpost:
5772 case AArch64::STRWpost:
5773 case AArch64::STRXpost:
5774 case AArch64::STRBBpre:
5775 case AArch64::STRBpre:
5776 case AArch64::STRHHpre:
5777 case AArch64::STRHpre:
5778 case AArch64::STRWpre:
5779 case AArch64::STRXpre: {
5780 MCRegister Rt = Inst.getOperand(1).getReg();
5781 MCRegister Rn = Inst.getOperand(2).getReg();
5782 if (RI->isSubRegisterEq(Rn, Rt))
5783 return Error(Loc[0], "unpredictable STR instruction, writeback base "
5784 "is also a source");
5785 break;
5786 }
5787 case AArch64::STXRB:
5788 case AArch64::STXRH:
5789 case AArch64::STXRW:
5790 case AArch64::STXRX:
5791 case AArch64::STLXRB:
5792 case AArch64::STLXRH:
5793 case AArch64::STLXRW:
5794 case AArch64::STLXRX: {
5795 MCRegister Rs = Inst.getOperand(0).getReg();
5796 MCRegister Rt = Inst.getOperand(1).getReg();
5797 MCRegister Rn = Inst.getOperand(2).getReg();
5798 if (RI->isSubRegisterEq(Rt, Rs) ||
5799 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5800 return Error(Loc[0],
5801 "unpredictable STXR instruction, status is also a source");
5802 break;
5803 }
5804 case AArch64::STXPW:
5805 case AArch64::STXPX:
5806 case AArch64::STLXPW:
5807 case AArch64::STLXPX: {
5808 MCRegister Rs = Inst.getOperand(0).getReg();
5809 MCRegister Rt1 = Inst.getOperand(1).getReg();
5810 MCRegister Rt2 = Inst.getOperand(2).getReg();
5811 MCRegister Rn = Inst.getOperand(3).getReg();
5812 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5813 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5814 return Error(Loc[0],
5815 "unpredictable STXP instruction, status is also a source");
5816 break;
5817 }
5818 case AArch64::LDRABwriteback:
5819 case AArch64::LDRAAwriteback: {
5820 MCRegister Xt = Inst.getOperand(0).getReg();
5821 MCRegister Xn = Inst.getOperand(1).getReg();
5822 if (Xt == Xn)
5823 return Error(Loc[0],
5824 "unpredictable LDRA instruction, writeback base"
5825 " is also a destination");
5826 break;
5827 }
5828 }
5829
5830 // Check v8.8-A memops instructions.
5831 switch (Inst.getOpcode()) {
5832 case AArch64::CPYFP:
5833 case AArch64::CPYFPWN:
5834 case AArch64::CPYFPRN:
5835 case AArch64::CPYFPN:
5836 case AArch64::CPYFPWT:
5837 case AArch64::CPYFPWTWN:
5838 case AArch64::CPYFPWTRN:
5839 case AArch64::CPYFPWTN:
5840 case AArch64::CPYFPRT:
5841 case AArch64::CPYFPRTWN:
5842 case AArch64::CPYFPRTRN:
5843 case AArch64::CPYFPRTN:
5844 case AArch64::CPYFPT:
5845 case AArch64::CPYFPTWN:
5846 case AArch64::CPYFPTRN:
5847 case AArch64::CPYFPTN:
5848 case AArch64::CPYFM:
5849 case AArch64::CPYFMWN:
5850 case AArch64::CPYFMRN:
5851 case AArch64::CPYFMN:
5852 case AArch64::CPYFMWT:
5853 case AArch64::CPYFMWTWN:
5854 case AArch64::CPYFMWTRN:
5855 case AArch64::CPYFMWTN:
5856 case AArch64::CPYFMRT:
5857 case AArch64::CPYFMRTWN:
5858 case AArch64::CPYFMRTRN:
5859 case AArch64::CPYFMRTN:
5860 case AArch64::CPYFMT:
5861 case AArch64::CPYFMTWN:
5862 case AArch64::CPYFMTRN:
5863 case AArch64::CPYFMTN:
5864 case AArch64::CPYFE:
5865 case AArch64::CPYFEWN:
5866 case AArch64::CPYFERN:
5867 case AArch64::CPYFEN:
5868 case AArch64::CPYFEWT:
5869 case AArch64::CPYFEWTWN:
5870 case AArch64::CPYFEWTRN:
5871 case AArch64::CPYFEWTN:
5872 case AArch64::CPYFERT:
5873 case AArch64::CPYFERTWN:
5874 case AArch64::CPYFERTRN:
5875 case AArch64::CPYFERTN:
5876 case AArch64::CPYFET:
5877 case AArch64::CPYFETWN:
5878 case AArch64::CPYFETRN:
5879 case AArch64::CPYFETN:
5880 case AArch64::CPYP:
5881 case AArch64::CPYPWN:
5882 case AArch64::CPYPRN:
5883 case AArch64::CPYPN:
5884 case AArch64::CPYPWT:
5885 case AArch64::CPYPWTWN:
5886 case AArch64::CPYPWTRN:
5887 case AArch64::CPYPWTN:
5888 case AArch64::CPYPRT:
5889 case AArch64::CPYPRTWN:
5890 case AArch64::CPYPRTRN:
5891 case AArch64::CPYPRTN:
5892 case AArch64::CPYPT:
5893 case AArch64::CPYPTWN:
5894 case AArch64::CPYPTRN:
5895 case AArch64::CPYPTN:
5896 case AArch64::CPYM:
5897 case AArch64::CPYMWN:
5898 case AArch64::CPYMRN:
5899 case AArch64::CPYMN:
5900 case AArch64::CPYMWT:
5901 case AArch64::CPYMWTWN:
5902 case AArch64::CPYMWTRN:
5903 case AArch64::CPYMWTN:
5904 case AArch64::CPYMRT:
5905 case AArch64::CPYMRTWN:
5906 case AArch64::CPYMRTRN:
5907 case AArch64::CPYMRTN:
5908 case AArch64::CPYMT:
5909 case AArch64::CPYMTWN:
5910 case AArch64::CPYMTRN:
5911 case AArch64::CPYMTN:
5912 case AArch64::CPYE:
5913 case AArch64::CPYEWN:
5914 case AArch64::CPYERN:
5915 case AArch64::CPYEN:
5916 case AArch64::CPYEWT:
5917 case AArch64::CPYEWTWN:
5918 case AArch64::CPYEWTRN:
5919 case AArch64::CPYEWTN:
5920 case AArch64::CPYERT:
5921 case AArch64::CPYERTWN:
5922 case AArch64::CPYERTRN:
5923 case AArch64::CPYERTN:
5924 case AArch64::CPYET:
5925 case AArch64::CPYETWN:
5926 case AArch64::CPYETRN:
5927 case AArch64::CPYETN: {
5928 MCRegister Xd_wb = Inst.getOperand(0).getReg();
5929 MCRegister Xs_wb = Inst.getOperand(1).getReg();
5930 MCRegister Xn_wb = Inst.getOperand(2).getReg();
5931 MCRegister Xd = Inst.getOperand(3).getReg();
5932 MCRegister Xs = Inst.getOperand(4).getReg();
5933 MCRegister Xn = Inst.getOperand(5).getReg();
5934 if (Xd_wb != Xd)
5935 return Error(Loc[0],
5936 "invalid CPY instruction, Xd_wb and Xd do not match");
5937 if (Xs_wb != Xs)
5938 return Error(Loc[0],
5939 "invalid CPY instruction, Xs_wb and Xs do not match");
5940 if (Xn_wb != Xn)
5941 return Error(Loc[0],
5942 "invalid CPY instruction, Xn_wb and Xn do not match");
5943 if (Xd == Xs)
5944 return Error(Loc[0], "invalid CPY instruction, destination and source"
5945 " registers are the same");
5946 if (Xd == Xn)
5947 return Error(Loc[0], "invalid CPY instruction, destination and size"
5948 " registers are the same");
5949 if (Xs == Xn)
5950 return Error(Loc[0], "invalid CPY instruction, source and size"
5951 " registers are the same");
5952 break;
5953 }
5954 case AArch64::SETP:
5955 case AArch64::SETPT:
5956 case AArch64::SETPN:
5957 case AArch64::SETPTN:
5958 case AArch64::SETM:
5959 case AArch64::SETMT:
5960 case AArch64::SETMN:
5961 case AArch64::SETMTN:
5962 case AArch64::SETE:
5963 case AArch64::SETET:
5964 case AArch64::SETEN:
5965 case AArch64::SETETN:
5966 case AArch64::SETGP:
5967 case AArch64::SETGPT:
5968 case AArch64::SETGPN:
5969 case AArch64::SETGPTN:
5970 case AArch64::SETGM:
5971 case AArch64::SETGMT:
5972 case AArch64::SETGMN:
5973 case AArch64::SETGMTN:
5974 case AArch64::MOPSSETGE:
5975 case AArch64::MOPSSETGET:
5976 case AArch64::MOPSSETGEN:
5977 case AArch64::MOPSSETGETN: {
5978 MCRegister Xd_wb = Inst.getOperand(0).getReg();
5979 MCRegister Xn_wb = Inst.getOperand(1).getReg();
5980 MCRegister Xd = Inst.getOperand(2).getReg();
5981 MCRegister Xn = Inst.getOperand(3).getReg();
5982 MCRegister Xm = Inst.getOperand(4).getReg();
5983 if (Xd_wb != Xd)
5984 return Error(Loc[0],
5985 "invalid SET instruction, Xd_wb and Xd do not match");
5986 if (Xn_wb != Xn)
5987 return Error(Loc[0],
5988 "invalid SET instruction, Xn_wb and Xn do not match");
5989 if (Xd == Xn)
5990 return Error(Loc[0], "invalid SET instruction, destination and size"
5991 " registers are the same");
5992 if (Xd == Xm)
5993 return Error(Loc[0], "invalid SET instruction, destination and source"
5994 " registers are the same");
5995 if (Xn == Xm)
5996 return Error(Loc[0], "invalid SET instruction, source and size"
5997 " registers are the same");
5998 break;
5999 }
6000 }
6001
6002 // Now check immediate ranges. Separate from the above as there is overlap
6003 // in the instructions being checked and this keeps the nested conditionals
6004 // to a minimum.
6005 switch (Inst.getOpcode()) {
6006 case AArch64::ADDSWri:
6007 case AArch64::ADDSXri:
6008 case AArch64::ADDWri:
6009 case AArch64::ADDXri:
6010 case AArch64::SUBSWri:
6011 case AArch64::SUBSXri:
6012 case AArch64::SUBWri:
6013 case AArch64::SUBXri: {
6014 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
6015 // some slight duplication here.
6016 if (Inst.getOperand(2).isExpr()) {
6017 const MCExpr *Expr = Inst.getOperand(2).getExpr();
6018 AArch64::Specifier ELFSpec;
6019 AArch64::Specifier DarwinSpec;
6020 int64_t Addend;
6021 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
6022
6023 // Only allow these with ADDXri.
6024 if ((DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
6025 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) &&
6026 Inst.getOpcode() == AArch64::ADDXri)
6027 return false;
6028
6029 // Only allow these with ADDXri/ADDWri
6037 ELFSpec) &&
6038 (Inst.getOpcode() == AArch64::ADDXri ||
6039 Inst.getOpcode() == AArch64::ADDWri))
6040 return false;
6041
6042 // Don't allow symbol refs in the immediate field otherwise
6043 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
6044 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
6045 // 'cmp w0, 'borked')
6046 return Error(Loc.back(), "invalid immediate expression");
6047 }
6048 // We don't validate more complex expressions here
6049 }
6050 return false;
6051 }
6052 default:
6053 return false;
6054 }
6055}
6056
6058 const FeatureBitset &FBS,
6059 unsigned VariantID = 0);
6060
6061bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
6063 OperandVector &Operands) {
6064 switch (ErrCode) {
6065 case Match_InvalidTiedOperand: {
6066 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
6067 if (Op.isVectorList())
6068 return Error(Loc, "operand must match destination register list");
6069
6070 assert(Op.isReg() && "Unexpected operand type");
6071 switch (Op.getRegEqualityTy()) {
6072 case RegConstraintEqualityTy::EqualsSubReg:
6073 return Error(Loc, "operand must be 64-bit form of destination register");
6074 case RegConstraintEqualityTy::EqualsSuperReg:
6075 return Error(Loc, "operand must be 32-bit form of destination register");
6076 case RegConstraintEqualityTy::EqualsReg:
6077 return Error(Loc, "operand must match destination register");
6078 }
6079 llvm_unreachable("Unknown RegConstraintEqualityTy");
6080 }
6081 case Match_MissingFeature:
6082 return Error(Loc,
6083 "instruction requires a CPU feature not currently enabled");
6084 case Match_InvalidOperand:
6085 return Error(Loc, "invalid operand for instruction");
6086 case Match_InvalidSuffix:
6087 return Error(Loc, "invalid type suffix for instruction");
6088 case Match_InvalidCondCode:
6089 return Error(Loc, "expected AArch64 condition code");
6090 case Match_AddSubRegExtendSmall:
6091 return Error(Loc,
6092 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
6093 case Match_AddSubRegExtendLarge:
6094 return Error(Loc,
6095 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
6096 case Match_AddSubSecondSource:
6097 return Error(Loc,
6098 "expected compatible register, symbol or integer in range [0, 4095]");
6099 case Match_LogicalSecondSource:
6100 return Error(Loc, "expected compatible register or logical immediate");
6101 case Match_InvalidMovImm32Shift:
6102 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
6103 case Match_InvalidMovImm64Shift:
6104 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
6105 case Match_AddSubRegShift32:
6106 return Error(Loc,
6107 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
6108 case Match_AddSubRegShift64:
6109 return Error(Loc,
6110 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
6111 case Match_InvalidFPImm:
6112 return Error(Loc,
6113 "expected compatible register or floating-point constant");
6114 case Match_InvalidMemoryIndexedSImm6:
6115 return Error(Loc, "index must be an integer in range [-32, 31].");
6116 case Match_InvalidMemoryIndexedSImm5:
6117 return Error(Loc, "index must be an integer in range [-16, 15].");
6118 case Match_InvalidMemoryIndexed1SImm4:
6119 return Error(Loc, "index must be an integer in range [-8, 7].");
6120 case Match_InvalidMemoryIndexed2SImm4:
6121 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
6122 case Match_InvalidMemoryIndexed3SImm4:
6123 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
6124 case Match_InvalidMemoryIndexed4SImm4:
6125 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
6126 case Match_InvalidMemoryIndexed16SImm4:
6127 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
6128 case Match_InvalidMemoryIndexed32SImm4:
6129 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
6130 case Match_InvalidMemoryIndexed1SImm6:
6131 return Error(Loc, "index must be an integer in range [-32, 31].");
6132 case Match_InvalidMemoryIndexedSImm8:
6133 return Error(Loc, "index must be an integer in range [-128, 127].");
6134 case Match_InvalidMemoryIndexedSImm9:
6135 return Error(Loc, "index must be an integer in range [-256, 255].");
6136 case Match_InvalidMemoryIndexed16SImm9:
6137 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
6138 case Match_InvalidMemoryIndexed8SImm10:
6139 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
6140 case Match_InvalidMemoryIndexed4SImm7:
6141 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
6142 case Match_InvalidMemoryIndexed8SImm7:
6143 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
6144 case Match_InvalidMemoryIndexed16SImm7:
6145 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
6146 case Match_InvalidMemoryIndexed8UImm5:
6147 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
6148 case Match_InvalidMemoryIndexed8UImm3:
6149 return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
6150 case Match_InvalidMemoryIndexed4UImm5:
6151 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
6152 case Match_InvalidMemoryIndexed2UImm5:
6153 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
6154 case Match_InvalidMemoryIndexed8UImm6:
6155 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
6156 case Match_InvalidMemoryIndexed16UImm6:
6157 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
6158 case Match_InvalidMemoryIndexed4UImm6:
6159 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
6160 case Match_InvalidMemoryIndexed2UImm6:
6161 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
6162 case Match_InvalidMemoryIndexed1UImm6:
6163 return Error(Loc, "index must be in range [0, 63].");
6164 case Match_InvalidMemoryWExtend8:
6165 return Error(Loc,
6166 "expected 'uxtw' or 'sxtw' with optional shift of #0");
6167 case Match_InvalidMemoryWExtend16:
6168 return Error(Loc,
6169 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
6170 case Match_InvalidMemoryWExtend32:
6171 return Error(Loc,
6172 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
6173 case Match_InvalidMemoryWExtend64:
6174 return Error(Loc,
6175 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
6176 case Match_InvalidMemoryWExtend128:
6177 return Error(Loc,
6178 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
6179 case Match_InvalidMemoryXExtend8:
6180 return Error(Loc,
6181 "expected 'lsl' or 'sxtx' with optional shift of #0");
6182 case Match_InvalidMemoryXExtend16:
6183 return Error(Loc,
6184 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
6185 case Match_InvalidMemoryXExtend32:
6186 return Error(Loc,
6187 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
6188 case Match_InvalidMemoryXExtend64:
6189 return Error(Loc,
6190 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
6191 case Match_InvalidMemoryXExtend128:
6192 return Error(Loc,
6193 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
6194 case Match_InvalidMemoryIndexed1:
6195 return Error(Loc, "index must be an integer in range [0, 4095].");
6196 case Match_InvalidMemoryIndexed2:
6197 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
6198 case Match_InvalidMemoryIndexed4:
6199 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
6200 case Match_InvalidMemoryIndexed8:
6201 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
6202 case Match_InvalidMemoryIndexed16:
6203 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
6204 case Match_InvalidImm0_0:
6205 return Error(Loc, "immediate must be 0.");
6206 case Match_InvalidImm0_1:
6207 return Error(Loc, "immediate must be an integer in range [0, 1].");
6208 case Match_InvalidImm0_3:
6209 return Error(Loc, "immediate must be an integer in range [0, 3].");
6210 case Match_InvalidImm0_7:
6211 return Error(Loc, "immediate must be an integer in range [0, 7].");
6212 case Match_InvalidImm0_15:
6213 return Error(Loc, "immediate must be an integer in range [0, 15].");
6214 case Match_InvalidImm0_31:
6215 return Error(Loc, "immediate must be an integer in range [0, 31].");
6216 case Match_InvalidImm0_63:
6217 return Error(Loc, "immediate must be an integer in range [0, 63].");
6218 case Match_InvalidImm0_127:
6219 return Error(Loc, "immediate must be an integer in range [0, 127].");
6220 case Match_InvalidImm0_255:
6221 return Error(Loc, "immediate must be an integer in range [0, 255].");
6222 case Match_InvalidImm0_65535:
6223 return Error(Loc, "immediate must be an integer in range [0, 65535].");
6224 case Match_InvalidImm1_8:
6225 return Error(Loc, "immediate must be an integer in range [1, 8].");
6226 case Match_InvalidImm1_16:
6227 return Error(Loc, "immediate must be an integer in range [1, 16].");
6228 case Match_InvalidImm1_32:
6229 return Error(Loc, "immediate must be an integer in range [1, 32].");
6230 case Match_InvalidImm1_64:
6231 return Error(Loc, "immediate must be an integer in range [1, 64].");
6232 case Match_InvalidImmM1_62:
6233 return Error(Loc, "immediate must be an integer in range [-1, 62].");
6234 case Match_InvalidMemoryIndexedRange2UImm0:
6235 return Error(Loc, "vector select offset must be the immediate range 0:1.");
6236 case Match_InvalidMemoryIndexedRange2UImm1:
6237 return Error(Loc, "vector select offset must be an immediate range of the "
6238 "form <immf>:<imml>, where the first "
6239 "immediate is a multiple of 2 in the range [0, 2], and "
6240 "the second immediate is immf + 1.");
6241 case Match_InvalidMemoryIndexedRange2UImm2:
6242 case Match_InvalidMemoryIndexedRange2UImm3:
6243 return Error(
6244 Loc,
6245 "vector select offset must be an immediate range of the form "
6246 "<immf>:<imml>, "
6247 "where the first immediate is a multiple of 2 in the range [0, 6] or "
6248 "[0, 14] "
6249 "depending on the instruction, and the second immediate is immf + 1.");
6250 case Match_InvalidMemoryIndexedRange4UImm0:
6251 return Error(Loc, "vector select offset must be the immediate range 0:3.");
6252 case Match_InvalidMemoryIndexedRange4UImm1:
6253 case Match_InvalidMemoryIndexedRange4UImm2:
6254 return Error(
6255 Loc,
6256 "vector select offset must be an immediate range of the form "
6257 "<immf>:<imml>, "
6258 "where the first immediate is a multiple of 4 in the range [0, 4] or "
6259 "[0, 12] "
6260 "depending on the instruction, and the second immediate is immf + 3.");
6261 case Match_InvalidSVEAddSubImm8:
6262 return Error(Loc, "immediate must be an integer in range [0, 255]"
6263 " with a shift amount of 0");
6264 case Match_InvalidSVEAddSubImm16:
6265 case Match_InvalidSVEAddSubImm32:
6266 case Match_InvalidSVEAddSubImm64:
6267 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
6268 "multiple of 256 in range [256, 65280]");
6269 case Match_InvalidSVECpyImm8:
6270 return Error(Loc, "immediate must be an integer in range [-128, 255]"
6271 " with a shift amount of 0");
6272 case Match_InvalidSVECpyImm16:
6273 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6274 "multiple of 256 in range [-32768, 65280]");
6275 case Match_InvalidSVECpyImm32:
6276 case Match_InvalidSVECpyImm64:
6277 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6278 "multiple of 256 in range [-32768, 32512]");
6279 case Match_InvalidIndexRange0_0:
6280 return Error(Loc, "expected lane specifier '[0]'");
6281 case Match_InvalidIndexRange1_1:
6282 return Error(Loc, "expected lane specifier '[1]'");
6283 case Match_InvalidIndexRange0_15:
6284 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6285 case Match_InvalidIndexRange0_7:
6286 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6287 case Match_InvalidIndexRange0_3:
6288 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6289 case Match_InvalidIndexRange0_1:
6290 return Error(Loc, "vector lane must be an integer in range [0, 1].");
6291 case Match_InvalidSVEIndexRange0_63:
6292 return Error(Loc, "vector lane must be an integer in range [0, 63].");
6293 case Match_InvalidSVEIndexRange0_31:
6294 return Error(Loc, "vector lane must be an integer in range [0, 31].");
6295 case Match_InvalidSVEIndexRange0_15:
6296 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6297 case Match_InvalidSVEIndexRange0_7:
6298 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6299 case Match_InvalidSVEIndexRange0_3:
6300 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6301 case Match_InvalidLabel:
6302 return Error(Loc, "expected label or encodable integer pc offset");
6303 case Match_MRS:
6304 return Error(Loc, "expected readable system register");
6305 case Match_MSR:
6306 case Match_InvalidSVCR:
6307 return Error(Loc, "expected writable system register or pstate");
6308 case Match_InvalidComplexRotationEven:
6309 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
6310 case Match_InvalidComplexRotationOdd:
6311 return Error(Loc, "complex rotation must be 90 or 270.");
6312 case Match_MnemonicFail: {
6313 std::string Suggestion = AArch64MnemonicSpellCheck(
6314 ((AArch64Operand &)*Operands[0]).getToken(),
6315 ComputeAvailableFeatures(STI->getFeatureBits()));
6316 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
6317 }
6318 case Match_InvalidGPR64shifted8:
6319 return Error(Loc, "register must be x0..x30 or xzr, without shift");
6320 case Match_InvalidGPR64shifted16:
6321 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
6322 case Match_InvalidGPR64shifted32:
6323 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
6324 case Match_InvalidGPR64shifted64:
6325 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
6326 case Match_InvalidGPR64shifted128:
6327 return Error(
6328 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
6329 case Match_InvalidGPR64NoXZRshifted8:
6330 return Error(Loc, "register must be x0..x30 without shift");
6331 case Match_InvalidGPR64NoXZRshifted16:
6332 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
6333 case Match_InvalidGPR64NoXZRshifted32:
6334 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
6335 case Match_InvalidGPR64NoXZRshifted64:
6336 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
6337 case Match_InvalidGPR64NoXZRshifted128:
6338 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
6339 case Match_InvalidZPR32UXTW8:
6340 case Match_InvalidZPR32SXTW8:
6341 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6342 case Match_InvalidZPR32UXTW16:
6343 case Match_InvalidZPR32SXTW16:
6344 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6345 case Match_InvalidZPR32UXTW32:
6346 case Match_InvalidZPR32SXTW32:
6347 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6348 case Match_InvalidZPR32UXTW64:
6349 case Match_InvalidZPR32SXTW64:
6350 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6351 case Match_InvalidZPR64UXTW8:
6352 case Match_InvalidZPR64SXTW8:
6353 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6354 case Match_InvalidZPR64UXTW16:
6355 case Match_InvalidZPR64SXTW16:
6356 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6357 case Match_InvalidZPR64UXTW32:
6358 case Match_InvalidZPR64SXTW32:
6359 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6360 case Match_InvalidZPR64UXTW64:
6361 case Match_InvalidZPR64SXTW64:
6362 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6363 case Match_InvalidZPR32LSL8:
6364 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
6365 case Match_InvalidZPR32LSL16:
6366 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6367 case Match_InvalidZPR32LSL32:
6368 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6369 case Match_InvalidZPR32LSL64:
6370 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6371 case Match_InvalidZPR64LSL8:
6372 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
6373 case Match_InvalidZPR64LSL16:
6374 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6375 case Match_InvalidZPR64LSL32:
6376 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6377 case Match_InvalidZPR64LSL64:
6378 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6379 case Match_InvalidZPR0:
6380 return Error(Loc, "expected register without element width suffix");
6381 case Match_InvalidZPR8:
6382 case Match_InvalidZPR16:
6383 case Match_InvalidZPR32:
6384 case Match_InvalidZPR64:
6385 case Match_InvalidZPR128:
6386 return Error(Loc, "invalid element width");
6387 case Match_InvalidZPR_3b8:
6388 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
6389 case Match_InvalidZPR_3b16:
6390 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
6391 case Match_InvalidZPR_3b32:
6392 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
6393 case Match_InvalidZPR_4b8:
6394 return Error(Loc,
6395 "Invalid restricted vector register, expected z0.b..z15.b");
6396 case Match_InvalidZPR_4b16:
6397 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
6398 case Match_InvalidZPR_4b32:
6399 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
6400 case Match_InvalidZPR_4b64:
6401 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
6402 case Match_InvalidZPRMul2_Lo8:
6403 return Error(Loc, "Invalid restricted vector register, expected even "
6404 "register in z0.b..z14.b");
6405 case Match_InvalidZPRMul2_Hi8:
6406 return Error(Loc, "Invalid restricted vector register, expected even "
6407 "register in z16.b..z30.b");
6408 case Match_InvalidZPRMul2_Lo16:
6409 return Error(Loc, "Invalid restricted vector register, expected even "
6410 "register in z0.h..z14.h");
6411 case Match_InvalidZPRMul2_Hi16:
6412 return Error(Loc, "Invalid restricted vector register, expected even "
6413 "register in z16.h..z30.h");
6414 case Match_InvalidZPRMul2_Lo32:
6415 return Error(Loc, "Invalid restricted vector register, expected even "
6416 "register in z0.s..z14.s");
6417 case Match_InvalidZPRMul2_Hi32:
6418 return Error(Loc, "Invalid restricted vector register, expected even "
6419 "register in z16.s..z30.s");
6420 case Match_InvalidZPRMul2_Lo64:
6421 return Error(Loc, "Invalid restricted vector register, expected even "
6422 "register in z0.d..z14.d");
6423 case Match_InvalidZPRMul2_Hi64:
6424 return Error(Loc, "Invalid restricted vector register, expected even "
6425 "register in z16.d..z30.d");
6426 case Match_InvalidZPR_K0:
6427 return Error(Loc, "invalid restricted vector register, expected register "
6428 "in z20..z23 or z28..z31");
6429 case Match_InvalidSVEPattern:
6430 return Error(Loc, "invalid predicate pattern");
6431 case Match_InvalidSVEPPRorPNRAnyReg:
6432 case Match_InvalidSVEPPRorPNRBReg:
6433 case Match_InvalidSVEPredicateAnyReg:
6434 case Match_InvalidSVEPredicateBReg:
6435 case Match_InvalidSVEPredicateHReg:
6436 case Match_InvalidSVEPredicateSReg:
6437 case Match_InvalidSVEPredicateDReg:
6438 return Error(Loc, "invalid predicate register.");
6439 case Match_InvalidSVEPredicate3bAnyReg:
6440 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6441 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6442 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6443 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6444 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6445 return Error(Loc, "Invalid predicate register, expected PN in range "
6446 "pn8..pn15 with element suffix.");
6447 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6448 return Error(Loc, "invalid restricted predicate-as-counter register "
6449 "expected pn8..pn15");
6450 case Match_InvalidSVEPNPredicateBReg:
6451 case Match_InvalidSVEPNPredicateHReg:
6452 case Match_InvalidSVEPNPredicateSReg:
6453 case Match_InvalidSVEPNPredicateDReg:
6454 return Error(Loc, "Invalid predicate register, expected PN in range "
6455 "pn0..pn15 with element suffix.");
6456 case Match_InvalidSVEVecLenSpecifier:
6457 return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
6458 case Match_InvalidSVEPredicateListMul2x8:
6459 case Match_InvalidSVEPredicateListMul2x16:
6460 case Match_InvalidSVEPredicateListMul2x32:
6461 case Match_InvalidSVEPredicateListMul2x64:
6462 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6463 "predicate registers, where the first vector is a multiple of 2 "
6464 "and with correct element type");
6465 case Match_InvalidSVEExactFPImmOperandHalfOne:
6466 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
6467 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6468 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
6469 case Match_InvalidSVEExactFPImmOperandZeroOne:
6470 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
6471 case Match_InvalidMatrixTileVectorH8:
6472 case Match_InvalidMatrixTileVectorV8:
6473 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
6474 case Match_InvalidMatrixTileVectorH16:
6475 case Match_InvalidMatrixTileVectorV16:
6476 return Error(Loc,
6477 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6478 case Match_InvalidMatrixTileVectorH32:
6479 case Match_InvalidMatrixTileVectorV32:
6480 return Error(Loc,
6481 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6482 case Match_InvalidMatrixTileVectorH64:
6483 case Match_InvalidMatrixTileVectorV64:
6484 return Error(Loc,
6485 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6486 case Match_InvalidMatrixTileVectorH128:
6487 case Match_InvalidMatrixTileVectorV128:
6488 return Error(Loc,
6489 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6490 case Match_InvalidMatrixTile16:
6491 return Error(Loc, "invalid matrix operand, expected za[0-1].h");
6492 case Match_InvalidMatrixTile32:
6493 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
6494 case Match_InvalidMatrixTile64:
6495 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
6496 case Match_InvalidMatrix:
6497 return Error(Loc, "invalid matrix operand, expected za");
6498 case Match_InvalidMatrix8:
6499 return Error(Loc, "invalid matrix operand, expected suffix .b");
6500 case Match_InvalidMatrix16:
6501 return Error(Loc, "invalid matrix operand, expected suffix .h");
6502 case Match_InvalidMatrix32:
6503 return Error(Loc, "invalid matrix operand, expected suffix .s");
6504 case Match_InvalidMatrix64:
6505 return Error(Loc, "invalid matrix operand, expected suffix .d");
6506 case Match_InvalidMatrixIndexGPR32_12_15:
6507 return Error(Loc, "operand must be a register in range [w12, w15]");
6508 case Match_InvalidMatrixIndexGPR32_8_11:
6509 return Error(Loc, "operand must be a register in range [w8, w11]");
6510 case Match_InvalidSVEVectorList2x8Mul2:
6511 case Match_InvalidSVEVectorList2x16Mul2:
6512 case Match_InvalidSVEVectorList2x32Mul2:
6513 case Match_InvalidSVEVectorList2x64Mul2:
6514 case Match_InvalidSVEVectorList2x128Mul2:
6515 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6516 "SVE vectors, where the first vector is a multiple of 2 "
6517 "and with matching element types");
6518 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6519 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6520 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6521 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6522 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6523 "SVE vectors in the range z0-z14, where the first vector "
6524 "is a multiple of 2 "
6525 "and with matching element types");
6526 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6527 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6528 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6529 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6530 return Error(Loc,
6531 "Invalid vector list, expected list with 2 consecutive "
6532 "SVE vectors in the range z16-z30, where the first vector "
6533 "is a multiple of 2 "
6534 "and with matching element types");
6535 case Match_InvalidSVEVectorList4x8Mul4:
6536 case Match_InvalidSVEVectorList4x16Mul4:
6537 case Match_InvalidSVEVectorList4x32Mul4:
6538 case Match_InvalidSVEVectorList4x64Mul4:
6539 case Match_InvalidSVEVectorList4x128Mul4:
6540 return Error(Loc, "Invalid vector list, expected list with 4 consecutive "
6541 "SVE vectors, where the first vector is a multiple of 4 "
6542 "and with matching element types");
6543 case Match_InvalidLookupTable:
6544 return Error(Loc, "Invalid lookup table, expected zt0");
6545 case Match_InvalidSVEVectorListStrided2x8:
6546 case Match_InvalidSVEVectorListStrided2x16:
6547 case Match_InvalidSVEVectorListStrided2x32:
6548 case Match_InvalidSVEVectorListStrided2x64:
6549 return Error(
6550 Loc,
6551 "Invalid vector list, expected list with each SVE vector in the list "
6552 "8 registers apart, and the first register in the range [z0, z7] or "
6553 "[z16, z23] and with correct element type");
6554 case Match_InvalidSVEVectorListStrided4x8:
6555 case Match_InvalidSVEVectorListStrided4x16:
6556 case Match_InvalidSVEVectorListStrided4x32:
6557 case Match_InvalidSVEVectorListStrided4x64:
6558 return Error(
6559 Loc,
6560 "Invalid vector list, expected list with each SVE vector in the list "
6561 "4 registers apart, and the first register in the range [z0, z3] or "
6562 "[z16, z19] and with correct element type");
6563 case Match_AddSubLSLImm3ShiftLarge:
6564 return Error(Loc,
6565 "expected 'lsl' with optional integer in range [0, 7]");
6566 default:
6567 llvm_unreachable("unexpected error code!");
6568 }
6569}
6570
6571static const char *getSubtargetFeatureName(uint64_t Val);
6572
6573bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6574 OperandVector &Operands,
6575 MCStreamer &Out,
6577 bool MatchingInlineAsm) {
6578 assert(!Operands.empty() && "Unexpected empty operand list!");
6579 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6580 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6581
6582 StringRef Tok = Op.getToken();
6583 unsigned NumOperands = Operands.size();
6584
6585 if (NumOperands == 4 && Tok == "lsl") {
6586 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6587 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6588 if (Op2.isScalarReg() && Op3.isImm()) {
6589 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6590 if (Op3CE) {
6591 uint64_t Op3Val = Op3CE->getValue();
6592 uint64_t NewOp3Val = 0;
6593 uint64_t NewOp4Val = 0;
6594 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6595 Op2.getReg())) {
6596 NewOp3Val = (32 - Op3Val) & 0x1f;
6597 NewOp4Val = 31 - Op3Val;
6598 } else {
6599 NewOp3Val = (64 - Op3Val) & 0x3f;
6600 NewOp4Val = 63 - Op3Val;
6601 }
6602
6603 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
6604 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
6605
6606 Operands[0] =
6607 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
6608 Operands.push_back(AArch64Operand::CreateImm(
6609 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
6610 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6611 Op3.getEndLoc(), getContext());
6612 }
6613 }
6614 } else if (NumOperands == 4 && Tok == "bfc") {
6615 // FIXME: Horrible hack to handle BFC->BFM alias.
6616 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6617 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6618 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6619
6620 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6621 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
6622 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
6623
6624 if (LSBCE && WidthCE) {
6625 uint64_t LSB = LSBCE->getValue();
6626 uint64_t Width = WidthCE->getValue();
6627
6628 uint64_t RegWidth = 0;
6629 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6630 Op1.getReg()))
6631 RegWidth = 64;
6632 else
6633 RegWidth = 32;
6634
6635 if (LSB >= RegWidth)
6636 return Error(LSBOp.getStartLoc(),
6637 "expected integer in range [0, 31]");
6638 if (Width < 1 || Width > RegWidth)
6639 return Error(WidthOp.getStartLoc(),
6640 "expected integer in range [1, 32]");
6641
6642 uint64_t ImmR = 0;
6643 if (RegWidth == 32)
6644 ImmR = (32 - LSB) & 0x1f;
6645 else
6646 ImmR = (64 - LSB) & 0x3f;
6647
6648 uint64_t ImmS = Width - 1;
6649
6650 if (ImmR != 0 && ImmS >= ImmR)
6651 return Error(WidthOp.getStartLoc(),
6652 "requested insert overflows register");
6653
6654 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
6655 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
6656 Operands[0] =
6657 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
6658 Operands[2] = AArch64Operand::CreateReg(
6659 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6660 SMLoc(), SMLoc(), getContext());
6661 Operands[3] = AArch64Operand::CreateImm(
6662 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
6663 Operands.emplace_back(
6664 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6665 WidthOp.getEndLoc(), getContext()));
6666 }
6667 }
6668 } else if (NumOperands == 5) {
6669 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6670 // UBFIZ -> UBFM aliases.
6671 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6672 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6673 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6674 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6675
6676 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6677 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6678 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6679
6680 if (Op3CE && Op4CE) {
6681 uint64_t Op3Val = Op3CE->getValue();
6682 uint64_t Op4Val = Op4CE->getValue();
6683
6684 uint64_t RegWidth = 0;
6685 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6686 Op1.getReg()))
6687 RegWidth = 64;
6688 else
6689 RegWidth = 32;
6690
6691 if (Op3Val >= RegWidth)
6692 return Error(Op3.getStartLoc(),
6693 "expected integer in range [0, 31]");
6694 if (Op4Val < 1 || Op4Val > RegWidth)
6695 return Error(Op4.getStartLoc(),
6696 "expected integer in range [1, 32]");
6697
6698 uint64_t NewOp3Val = 0;
6699 if (RegWidth == 32)
6700 NewOp3Val = (32 - Op3Val) & 0x1f;
6701 else
6702 NewOp3Val = (64 - Op3Val) & 0x3f;
6703
6704 uint64_t NewOp4Val = Op4Val - 1;
6705
6706 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6707 return Error(Op4.getStartLoc(),
6708 "requested insert overflows register");
6709
6710 const MCExpr *NewOp3 =
6711 MCConstantExpr::create(NewOp3Val, getContext());
6712 const MCExpr *NewOp4 =
6713 MCConstantExpr::create(NewOp4Val, getContext());
6714 Operands[3] = AArch64Operand::CreateImm(
6715 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
6716 Operands[4] = AArch64Operand::CreateImm(
6717 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6718 if (Tok == "bfi")
6719 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6720 getContext());
6721 else if (Tok == "sbfiz")
6722 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6723 getContext());
6724 else if (Tok == "ubfiz")
6725 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6726 getContext());
6727 else
6728 llvm_unreachable("No valid mnemonic for alias?");
6729 }
6730 }
6731
6732 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6733 // UBFX -> UBFM aliases.
6734 } else if (NumOperands == 5 &&
6735 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6736 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6737 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6738 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6739
6740 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6741 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6742 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6743
6744 if (Op3CE && Op4CE) {
6745 uint64_t Op3Val = Op3CE->getValue();
6746 uint64_t Op4Val = Op4CE->getValue();
6747
6748 uint64_t RegWidth = 0;
6749 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6750 Op1.getReg()))
6751 RegWidth = 64;
6752 else
6753 RegWidth = 32;
6754
6755 if (Op3Val >= RegWidth)
6756 return Error(Op3.getStartLoc(),
6757 "expected integer in range [0, 31]");
6758 if (Op4Val < 1 || Op4Val > RegWidth)
6759 return Error(Op4.getStartLoc(),
6760 "expected integer in range [1, 32]");
6761
6762 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6763
6764 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6765 return Error(Op4.getStartLoc(),
6766 "requested extract overflows register");
6767
6768 const MCExpr *NewOp4 =
6769 MCConstantExpr::create(NewOp4Val, getContext());
6770 Operands[4] = AArch64Operand::CreateImm(
6771 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6772 if (Tok == "bfxil")
6773 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6774 getContext());
6775 else if (Tok == "sbfx")
6776 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6777 getContext());
6778 else if (Tok == "ubfx")
6779 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6780 getContext());
6781 else
6782 llvm_unreachable("No valid mnemonic for alias?");
6783 }
6784 }
6785 }
6786 }
6787
6788 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6789 // instruction for FP registers correctly in some rare circumstances. Convert
6790 // it to a safe instruction and warn (because silently changing someone's
6791 // assembly is rude).
6792 if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6793 NumOperands == 4 && Tok == "movi") {
6794 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6795 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6796 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6797 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6798 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6799 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6800 if (Suffix.lower() == ".2d" &&
6801 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
6802 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
6803 " correctly on this CPU, converting to equivalent movi.16b");
6804 // Switch the suffix to .16b.
6805 unsigned Idx = Op1.isToken() ? 1 : 2;
6806 Operands[Idx] =
6807 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
6808 }
6809 }
6810 }
6811
6812 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6813 // InstAlias can't quite handle this since the reg classes aren't
6814 // subclasses.
6815 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6816 // The source register can be Wn here, but the matcher expects a
6817 // GPR64. Twiddle it here if necessary.
6818 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6819 if (Op.isScalarReg()) {
6820 MCRegister Reg = getXRegFromWReg(Op.getReg());
6821 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6822 Op.getStartLoc(), Op.getEndLoc(),
6823 getContext());
6824 }
6825 }
6826 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6827 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6828 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6829 if (Op.isScalarReg() &&
6830 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6831 Op.getReg())) {
6832 // The source register can be Wn here, but the matcher expects a
6833 // GPR64. Twiddle it here if necessary.
6834 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6835 if (Op.isScalarReg()) {
6836 MCRegister Reg = getXRegFromWReg(Op.getReg());
6837 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6838 Op.getStartLoc(),
6839 Op.getEndLoc(), getContext());
6840 }
6841 }
6842 }
6843 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6844 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6845 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6846 if (Op.isScalarReg() &&
6847 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6848 Op.getReg())) {
6849 // The source register can be Wn here, but the matcher expects a
6850 // GPR32. Twiddle it here if necessary.
6851 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6852 if (Op.isScalarReg()) {
6853 MCRegister Reg = getWRegFromXReg(Op.getReg());
6854 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6855 Op.getStartLoc(),
6856 Op.getEndLoc(), getContext());
6857 }
6858 }
6859 }
6860
6861 MCInst Inst;
6862 FeatureBitset MissingFeatures;
6863 // First try to match against the secondary set of tables containing the
6864 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6865 unsigned MatchResult =
6866 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6867 MatchingInlineAsm, 1);
6868
6869 // If that fails, try against the alternate table containing long-form NEON:
6870 // "fadd v0.2s, v1.2s, v2.2s"
6871 if (MatchResult != Match_Success) {
6872 // But first, save the short-form match result: we can use it in case the
6873 // long-form match also fails.
6874 auto ShortFormNEONErrorInfo = ErrorInfo;
6875 auto ShortFormNEONMatchResult = MatchResult;
6876 auto ShortFormNEONMissingFeatures = MissingFeatures;
6877
6878 MatchResult =
6879 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6880 MatchingInlineAsm, 0);
6881
6882 // Now, both matches failed, and the long-form match failed on the mnemonic
6883 // suffix token operand. The short-form match failure is probably more
6884 // relevant: use it instead.
6885 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6886 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6887 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6888 MatchResult = ShortFormNEONMatchResult;
6889 ErrorInfo = ShortFormNEONErrorInfo;
6890 MissingFeatures = ShortFormNEONMissingFeatures;
6891 }
6892 }
6893
6894 switch (MatchResult) {
6895 case Match_Success: {
6896 // Perform range checking and other semantic validations
6897 SmallVector<SMLoc, 8> OperandLocs;
6898 NumOperands = Operands.size();
6899 for (unsigned i = 1; i < NumOperands; ++i)
6900 OperandLocs.push_back(Operands[i]->getStartLoc());
6901 if (validateInstruction(Inst, IDLoc, OperandLocs))
6902 return true;
6903
6904 Inst.setLoc(IDLoc);
6905 Out.emitInstruction(Inst, getSTI());
6906 return false;
6907 }
6908 case Match_MissingFeature: {
6909 assert(MissingFeatures.any() && "Unknown missing feature!");
6910 // Special case the error message for the very common case where only
6911 // a single subtarget feature is missing (neon, e.g.).
6912 std::string Msg = "instruction requires:";
6913 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
6914 if (MissingFeatures[i]) {
6915 Msg += " ";
6916 Msg += getSubtargetFeatureName(i);
6917 }
6918 }
6919 return Error(IDLoc, Msg);
6920 }
6921 case Match_MnemonicFail:
6922 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
6923 case Match_InvalidOperand: {
6924 SMLoc ErrorLoc = IDLoc;
6925
6926 if (ErrorInfo != ~0ULL) {
6927 if (ErrorInfo >= Operands.size())
6928 return Error(IDLoc, "too few operands for instruction",
6929 SMRange(IDLoc, getTok().getLoc()));
6930
6931 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6932 if (ErrorLoc == SMLoc())
6933 ErrorLoc = IDLoc;
6934 }
6935 // If the match failed on a suffix token operand, tweak the diagnostic
6936 // accordingly.
6937 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
6938 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
6939 MatchResult = Match_InvalidSuffix;
6940
6941 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6942 }
6943 case Match_InvalidTiedOperand:
6944 case Match_InvalidMemoryIndexed1:
6945 case Match_InvalidMemoryIndexed2:
6946 case Match_InvalidMemoryIndexed4:
6947 case Match_InvalidMemoryIndexed8:
6948 case Match_InvalidMemoryIndexed16:
6949 case Match_InvalidCondCode:
6950 case Match_AddSubLSLImm3ShiftLarge:
6951 case Match_AddSubRegExtendSmall:
6952 case Match_AddSubRegExtendLarge:
6953 case Match_AddSubSecondSource:
6954 case Match_LogicalSecondSource:
6955 case Match_AddSubRegShift32:
6956 case Match_AddSubRegShift64:
6957 case Match_InvalidMovImm32Shift:
6958 case Match_InvalidMovImm64Shift:
6959 case Match_InvalidFPImm:
6960 case Match_InvalidMemoryWExtend8:
6961 case Match_InvalidMemoryWExtend16:
6962 case Match_InvalidMemoryWExtend32:
6963 case Match_InvalidMemoryWExtend64:
6964 case Match_InvalidMemoryWExtend128:
6965 case Match_InvalidMemoryXExtend8:
6966 case Match_InvalidMemoryXExtend16:
6967 case Match_InvalidMemoryXExtend32:
6968 case Match_InvalidMemoryXExtend64:
6969 case Match_InvalidMemoryXExtend128:
6970 case Match_InvalidMemoryIndexed1SImm4:
6971 case Match_InvalidMemoryIndexed2SImm4:
6972 case Match_InvalidMemoryIndexed3SImm4:
6973 case Match_InvalidMemoryIndexed4SImm4:
6974 case Match_InvalidMemoryIndexed1SImm6:
6975 case Match_InvalidMemoryIndexed16SImm4:
6976 case Match_InvalidMemoryIndexed32SImm4:
6977 case Match_InvalidMemoryIndexed4SImm7:
6978 case Match_InvalidMemoryIndexed8SImm7:
6979 case Match_InvalidMemoryIndexed16SImm7:
6980 case Match_InvalidMemoryIndexed8UImm5:
6981 case Match_InvalidMemoryIndexed8UImm3:
6982 case Match_InvalidMemoryIndexed4UImm5:
6983 case Match_InvalidMemoryIndexed2UImm5:
6984 case Match_InvalidMemoryIndexed1UImm6:
6985 case Match_InvalidMemoryIndexed2UImm6:
6986 case Match_InvalidMemoryIndexed4UImm6:
6987 case Match_InvalidMemoryIndexed8UImm6:
6988 case Match_InvalidMemoryIndexed16UImm6:
6989 case Match_InvalidMemoryIndexedSImm6:
6990 case Match_InvalidMemoryIndexedSImm5:
6991 case Match_InvalidMemoryIndexedSImm8:
6992 case Match_InvalidMemoryIndexedSImm9:
6993 case Match_InvalidMemoryIndexed16SImm9:
6994 case Match_InvalidMemoryIndexed8SImm10:
6995 case Match_InvalidImm0_0:
6996 case Match_InvalidImm0_1:
6997 case Match_InvalidImm0_3:
6998 case Match_InvalidImm0_7:
6999 case Match_InvalidImm0_15:
7000 case Match_InvalidImm0_31:
7001 case Match_InvalidImm0_63:
7002 case Match_InvalidImm0_127:
7003 case Match_InvalidImm0_255:
7004 case Match_InvalidImm0_65535:
7005 case Match_InvalidImm1_8:
7006 case Match_InvalidImm1_16:
7007 case Match_InvalidImm1_32:
7008 case Match_InvalidImm1_64:
7009 case Match_InvalidImmM1_62:
7010 case Match_InvalidMemoryIndexedRange2UImm0:
7011 case Match_InvalidMemoryIndexedRange2UImm1:
7012 case Match_InvalidMemoryIndexedRange2UImm2:
7013 case Match_InvalidMemoryIndexedRange2UImm3:
7014 case Match_InvalidMemoryIndexedRange4UImm0:
7015 case Match_InvalidMemoryIndexedRange4UImm1:
7016 case Match_InvalidMemoryIndexedRange4UImm2:
7017 case Match_InvalidSVEAddSubImm8:
7018 case Match_InvalidSVEAddSubImm16:
7019 case Match_InvalidSVEAddSubImm32:
7020 case Match_InvalidSVEAddSubImm64:
7021 case Match_InvalidSVECpyImm8:
7022 case Match_InvalidSVECpyImm16:
7023 case Match_InvalidSVECpyImm32:
7024 case Match_InvalidSVECpyImm64:
7025 case Match_InvalidIndexRange0_0:
7026 case Match_InvalidIndexRange1_1:
7027 case Match_InvalidIndexRange0_15:
7028 case Match_InvalidIndexRange0_7:
7029 case Match_InvalidIndexRange0_3:
7030 case Match_InvalidIndexRange0_1:
7031 case Match_InvalidSVEIndexRange0_63:
7032 case Match_InvalidSVEIndexRange0_31:
7033 case Match_InvalidSVEIndexRange0_15:
7034 case Match_InvalidSVEIndexRange0_7:
7035 case Match_InvalidSVEIndexRange0_3:
7036 case Match_InvalidLabel:
7037 case Match_InvalidComplexRotationEven:
7038 case Match_InvalidComplexRotationOdd:
7039 case Match_InvalidGPR64shifted8:
7040 case Match_InvalidGPR64shifted16:
7041 case Match_InvalidGPR64shifted32:
7042 case Match_InvalidGPR64shifted64:
7043 case Match_InvalidGPR64shifted128:
7044 case Match_InvalidGPR64NoXZRshifted8:
7045 case Match_InvalidGPR64NoXZRshifted16:
7046 case Match_InvalidGPR64NoXZRshifted32:
7047 case Match_InvalidGPR64NoXZRshifted64:
7048 case Match_InvalidGPR64NoXZRshifted128:
7049 case Match_InvalidZPR32UXTW8:
7050 case Match_InvalidZPR32UXTW16:
7051 case Match_InvalidZPR32UXTW32:
7052 case Match_InvalidZPR32UXTW64:
7053 case Match_InvalidZPR32SXTW8:
7054 case Match_InvalidZPR32SXTW16:
7055 case Match_InvalidZPR32SXTW32:
7056 case Match_InvalidZPR32SXTW64:
7057 case Match_InvalidZPR64UXTW8:
7058 case Match_InvalidZPR64SXTW8:
7059 case Match_InvalidZPR64UXTW16:
7060 case Match_InvalidZPR64SXTW16:
7061 case Match_InvalidZPR64UXTW32:
7062 case Match_InvalidZPR64SXTW32:
7063 case Match_InvalidZPR64UXTW64:
7064 case Match_InvalidZPR64SXTW64:
7065 case Match_InvalidZPR32LSL8:
7066 case Match_InvalidZPR32LSL16:
7067 case Match_InvalidZPR32LSL32:
7068 case Match_InvalidZPR32LSL64:
7069 case Match_InvalidZPR64LSL8:
7070 case Match_InvalidZPR64LSL16:
7071 case Match_InvalidZPR64LSL32:
7072 case Match_InvalidZPR64LSL64:
7073 case Match_InvalidZPR0:
7074 case Match_InvalidZPR8:
7075 case Match_InvalidZPR16:
7076 case Match_InvalidZPR32:
7077 case Match_InvalidZPR64:
7078 case Match_InvalidZPR128:
7079 case Match_InvalidZPR_3b8:
7080 case Match_InvalidZPR_3b16:
7081 case Match_InvalidZPR_3b32:
7082 case Match_InvalidZPR_4b8:
7083 case Match_InvalidZPR_4b16:
7084 case Match_InvalidZPR_4b32:
7085 case Match_InvalidZPR_4b64:
7086 case Match_InvalidSVEPPRorPNRAnyReg:
7087 case Match_InvalidSVEPPRorPNRBReg:
7088 case Match_InvalidSVEPredicateAnyReg:
7089 case Match_InvalidSVEPattern:
7090 case Match_InvalidSVEVecLenSpecifier:
7091 case Match_InvalidSVEPredicateBReg:
7092 case Match_InvalidSVEPredicateHReg:
7093 case Match_InvalidSVEPredicateSReg:
7094 case Match_InvalidSVEPredicateDReg:
7095 case Match_InvalidSVEPredicate3bAnyReg:
7096 case Match_InvalidSVEPNPredicateB_p8to15Reg:
7097 case Match_InvalidSVEPNPredicateH_p8to15Reg:
7098 case Match_InvalidSVEPNPredicateS_p8to15Reg:
7099 case Match_InvalidSVEPNPredicateD_p8to15Reg:
7100 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
7101 case Match_InvalidSVEPNPredicateBReg:
7102 case Match_InvalidSVEPNPredicateHReg:
7103 case Match_InvalidSVEPNPredicateSReg:
7104 case Match_InvalidSVEPNPredicateDReg:
7105 case Match_InvalidSVEPredicateListMul2x8:
7106 case Match_InvalidSVEPredicateListMul2x16:
7107 case Match_InvalidSVEPredicateListMul2x32:
7108 case Match_InvalidSVEPredicateListMul2x64:
7109 case Match_InvalidSVEExactFPImmOperandHalfOne:
7110 case Match_InvalidSVEExactFPImmOperandHalfTwo:
7111 case Match_InvalidSVEExactFPImmOperandZeroOne:
7112 case Match_InvalidMatrixTile16:
7113 case Match_InvalidMatrixTile32:
7114 case Match_InvalidMatrixTile64:
7115 case Match_InvalidMatrix:
7116 case Match_InvalidMatrix8:
7117 case Match_InvalidMatrix16:
7118 case Match_InvalidMatrix32:
7119 case Match_InvalidMatrix64:
7120 case Match_InvalidMatrixTileVectorH8:
7121 case Match_InvalidMatrixTileVectorH16:
7122 case Match_InvalidMatrixTileVectorH32:
7123 case Match_InvalidMatrixTileVectorH64:
7124 case Match_InvalidMatrixTileVectorH128:
7125 case Match_InvalidMatrixTileVectorV8:
7126 case Match_InvalidMatrixTileVectorV16:
7127 case Match_InvalidMatrixTileVectorV32:
7128 case Match_InvalidMatrixTileVectorV64:
7129 case Match_InvalidMatrixTileVectorV128:
7130 case Match_InvalidSVCR:
7131 case Match_InvalidMatrixIndexGPR32_12_15:
7132 case Match_InvalidMatrixIndexGPR32_8_11:
7133 case Match_InvalidLookupTable:
7134 case Match_InvalidZPRMul2_Lo8:
7135 case Match_InvalidZPRMul2_Hi8:
7136 case Match_InvalidZPRMul2_Lo16:
7137 case Match_InvalidZPRMul2_Hi16:
7138 case Match_InvalidZPRMul2_Lo32:
7139 case Match_InvalidZPRMul2_Hi32:
7140 case Match_InvalidZPRMul2_Lo64:
7141 case Match_InvalidZPRMul2_Hi64:
7142 case Match_InvalidZPR_K0:
7143 case Match_InvalidSVEVectorList2x8Mul2:
7144 case Match_InvalidSVEVectorList2x16Mul2:
7145 case Match_InvalidSVEVectorList2x32Mul2:
7146 case Match_InvalidSVEVectorList2x64Mul2:
7147 case Match_InvalidSVEVectorList2x128Mul2:
7148 case Match_InvalidSVEVectorList4x8Mul4:
7149 case Match_InvalidSVEVectorList4x16Mul4:
7150 case Match_InvalidSVEVectorList4x32Mul4:
7151 case Match_InvalidSVEVectorList4x64Mul4:
7152 case Match_InvalidSVEVectorList4x128Mul4:
7153 case Match_InvalidSVEVectorList2x8Mul2_Lo:
7154 case Match_InvalidSVEVectorList2x16Mul2_Lo:
7155 case Match_InvalidSVEVectorList2x32Mul2_Lo:
7156 case Match_InvalidSVEVectorList2x64Mul2_Lo:
7157 case Match_InvalidSVEVectorList2x8Mul2_Hi:
7158 case Match_InvalidSVEVectorList2x16Mul2_Hi:
7159 case Match_InvalidSVEVectorList2x32Mul2_Hi:
7160 case Match_InvalidSVEVectorList2x64Mul2_Hi:
7161 case Match_InvalidSVEVectorListStrided2x8:
7162 case Match_InvalidSVEVectorListStrided2x16:
7163 case Match_InvalidSVEVectorListStrided2x32:
7164 case Match_InvalidSVEVectorListStrided2x64:
7165 case Match_InvalidSVEVectorListStrided4x8:
7166 case Match_InvalidSVEVectorListStrided4x16:
7167 case Match_InvalidSVEVectorListStrided4x32:
7168 case Match_InvalidSVEVectorListStrided4x64:
7169 case Match_MSR:
7170 case Match_MRS: {
7171 if (ErrorInfo >= Operands.size())
7172 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
7173 // Any time we get here, there's nothing fancy to do. Just get the
7174 // operand SMLoc and display the diagnostic.
7175 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7176 if (ErrorLoc == SMLoc())
7177 ErrorLoc = IDLoc;
7178 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
7179 }
7180 }
7181
7182 llvm_unreachable("Implement any new match types added!");
7183}
7184
7185/// ParseDirective parses the arm specific directives
7186bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
7187 const MCContext::Environment Format = getContext().getObjectFileType();
7188 bool IsMachO = Format == MCContext::IsMachO;
7189 bool IsCOFF = Format == MCContext::IsCOFF;
7190 bool IsELF = Format == MCContext::IsELF;
7191
7192 auto IDVal = DirectiveID.getIdentifier().lower();
7193 SMLoc Loc = DirectiveID.getLoc();
7194 if (IDVal == ".arch")
7195 parseDirectiveArch(Loc);
7196 else if (IDVal == ".cpu")
7197 parseDirectiveCPU(Loc);
7198 else if (IDVal == ".tlsdesccall")
7199 parseDirectiveTLSDescCall(Loc);
7200 else if (IDVal == ".ltorg" || IDVal == ".pool")
7201 parseDirectiveLtorg(Loc);
7202 else if (IDVal == ".unreq")
7203 parseDirectiveUnreq(Loc);
7204 else if (IDVal == ".inst")
7205 parseDirectiveInst(Loc);
7206 else if (IDVal == ".cfi_negate_ra_state")
7207 parseDirectiveCFINegateRAState();
7208 else if (IDVal == ".cfi_negate_ra_state_with_pc")
7209 parseDirectiveCFINegateRAStateWithPC();
7210 else if (IDVal == ".cfi_b_key_frame")
7211 parseDirectiveCFIBKeyFrame();
7212 else if (IDVal == ".cfi_mte_tagged_frame")
7213 parseDirectiveCFIMTETaggedFrame();
7214 else if (IDVal == ".arch_extension")
7215 parseDirectiveArchExtension(Loc);
7216 else if (IDVal == ".variant_pcs")
7217 parseDirectiveVariantPCS(Loc);
7218 else if (IsMachO) {
7219 if (IDVal == MCLOHDirectiveName())
7220 parseDirectiveLOH(IDVal, Loc);
7221 else
7222 return true;
7223 } else if (IsCOFF) {
7224 if (IDVal == ".seh_stackalloc")
7225 parseDirectiveSEHAllocStack(Loc);
7226 else if (IDVal == ".seh_endprologue")
7227 parseDirectiveSEHPrologEnd(Loc);
7228 else if (IDVal == ".seh_save_r19r20_x")
7229 parseDirectiveSEHSaveR19R20X(Loc);
7230 else if (IDVal == ".seh_save_fplr")
7231 parseDirectiveSEHSaveFPLR(Loc);
7232 else if (IDVal == ".seh_save_fplr_x")
7233 parseDirectiveSEHSaveFPLRX(Loc);
7234 else if (IDVal == ".seh_save_reg")
7235 parseDirectiveSEHSaveReg(Loc);
7236 else if (IDVal == ".seh_save_reg_x")
7237 parseDirectiveSEHSaveRegX(Loc);
7238 else if (IDVal == ".seh_save_regp")
7239 parseDirectiveSEHSaveRegP(Loc);
7240 else if (IDVal == ".seh_save_regp_x")
7241 parseDirectiveSEHSaveRegPX(Loc);
7242 else if (IDVal == ".seh_save_lrpair")
7243 parseDirectiveSEHSaveLRPair(Loc);
7244 else if (IDVal == ".seh_save_freg")
7245 parseDirectiveSEHSaveFReg(Loc);
7246 else if (IDVal == ".seh_save_freg_x")
7247 parseDirectiveSEHSaveFRegX(Loc);
7248 else if (IDVal == ".seh_save_fregp")
7249 parseDirectiveSEHSaveFRegP(Loc);
7250 else if (IDVal == ".seh_save_fregp_x")
7251 parseDirectiveSEHSaveFRegPX(Loc);
7252 else if (IDVal == ".seh_set_fp")
7253 parseDirectiveSEHSetFP(Loc);
7254 else if (IDVal == ".seh_add_fp")
7255 parseDirectiveSEHAddFP(Loc);
7256 else if (IDVal == ".seh_nop")
7257 parseDirectiveSEHNop(Loc);
7258 else if (IDVal == ".seh_save_next")
7259 parseDirectiveSEHSaveNext(Loc);
7260 else if (IDVal == ".seh_startepilogue")
7261 parseDirectiveSEHEpilogStart(Loc);
7262 else if (IDVal == ".seh_endepilogue")
7263 parseDirectiveSEHEpilogEnd(Loc);
7264 else if (IDVal == ".seh_trap_frame")
7265 parseDirectiveSEHTrapFrame(Loc);
7266 else if (IDVal == ".seh_pushframe")
7267 parseDirectiveSEHMachineFrame(Loc);
7268 else if (IDVal == ".seh_context")
7269 parseDirectiveSEHContext(Loc);
7270 else if (IDVal == ".seh_ec_context")
7271 parseDirectiveSEHECContext(Loc);
7272 else if (IDVal == ".seh_clear_unwound_to_call")
7273 parseDirectiveSEHClearUnwoundToCall(Loc);
7274 else if (IDVal == ".seh_pac_sign_lr")
7275 parseDirectiveSEHPACSignLR(Loc);
7276 else if (IDVal == ".seh_save_any_reg")
7277 parseDirectiveSEHSaveAnyReg(Loc, false, false);
7278 else if (IDVal == ".seh_save_any_reg_p")
7279 parseDirectiveSEHSaveAnyReg(Loc, true, false);
7280 else if (IDVal == ".seh_save_any_reg_x")
7281 parseDirectiveSEHSaveAnyReg(Loc, false, true);
7282 else if (IDVal == ".seh_save_any_reg_px")
7283 parseDirectiveSEHSaveAnyReg(Loc, true, true);
7284 else if (IDVal == ".seh_allocz")
7285 parseDirectiveSEHAllocZ(Loc);
7286 else if (IDVal == ".seh_save_zreg")
7287 parseDirectiveSEHSaveZReg(Loc);
7288 else if (IDVal == ".seh_save_preg")
7289 parseDirectiveSEHSavePReg(Loc);
7290 else
7291 return true;
7292 } else if (IsELF) {
7293 if (IDVal == ".aeabi_subsection")
7294 parseDirectiveAeabiSubSectionHeader(Loc);
7295 else if (IDVal == ".aeabi_attribute")
7296 parseDirectiveAeabiAArch64Attr(Loc);
7297 else
7298 return true;
7299 } else
7300 return true;
7301 return false;
7302}
7303
7304static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
7305 SmallVector<StringRef, 4> &RequestedExtensions) {
7306 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
7307 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
7308
7309 if (!NoCrypto && Crypto) {
7310 // Map 'generic' (and others) to sha2 and aes, because
7311 // that was the traditional meaning of crypto.
7312 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7313 ArchInfo == AArch64::ARMV8_3A) {
7314 RequestedExtensions.push_back("sha2");
7315 RequestedExtensions.push_back("aes");
7316 }
7317 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7318 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7319 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7320 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7321 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7322 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
7323 RequestedExtensions.push_back("sm4");
7324 RequestedExtensions.push_back("sha3");
7325 RequestedExtensions.push_back("sha2");
7326 RequestedExtensions.push_back("aes");
7327 }
7328 } else if (NoCrypto) {
7329 // Map 'generic' (and others) to sha2 and aes, because
7330 // that was the traditional meaning of crypto.
7331 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7332 ArchInfo == AArch64::ARMV8_3A) {
7333 RequestedExtensions.push_back("nosha2");
7334 RequestedExtensions.push_back("noaes");
7335 }
7336 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7337 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7338 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7339 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7340 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7341 ArchInfo == AArch64::ARMV9_4A) {
7342 RequestedExtensions.push_back("nosm4");
7343 RequestedExtensions.push_back("nosha3");
7344 RequestedExtensions.push_back("nosha2");
7345 RequestedExtensions.push_back("noaes");
7346 }
7347 }
7348}
7349
7351 return SMLoc::getFromPointer(L.getPointer() + Offset);
7352}
7353
7354/// parseDirectiveArch
7355/// ::= .arch token
7356bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
7357 SMLoc CurLoc = getLoc();
7358
7359 StringRef Name = getParser().parseStringToEndOfStatement().trim();
7360 StringRef Arch, ExtensionString;
7361 std::tie(Arch, ExtensionString) = Name.split('+');
7362
7363 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
7364 if (!ArchInfo)
7365 return Error(CurLoc, "unknown arch name");
7366
7367 if (parseToken(AsmToken::EndOfStatement))
7368 return true;
7369
7370 // Get the architecture and extension features.
7371 std::vector<StringRef> AArch64Features;
7372 AArch64Features.push_back(ArchInfo->ArchFeature);
7373 AArch64::getExtensionFeatures(ArchInfo->DefaultExts, AArch64Features);
7374
7375 MCSubtargetInfo &STI = copySTI();
7376 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
7377 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
7378 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
7379
7380 SmallVector<StringRef, 4> RequestedExtensions;
7381 if (!ExtensionString.empty())
7382 ExtensionString.split(RequestedExtensions, '+');
7383
7384 ExpandCryptoAEK(*ArchInfo, RequestedExtensions);
7385 CurLoc = incrementLoc(CurLoc, Arch.size());
7386
7387 for (auto Name : RequestedExtensions) {
7388 // Advance source location past '+'.
7389 CurLoc = incrementLoc(CurLoc, 1);
7390
7391 bool EnableFeature = !Name.consume_front_insensitive("no");
7392
7393 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7394 return Extension.Name == Name;
7395 });
7396
7397 if (It == std::end(ExtensionMap))
7398 return Error(CurLoc, "unsupported architectural extension: " + Name);
7399
7400 if (EnableFeature)
7401 STI.SetFeatureBitsTransitively(It->Features);
7402 else
7403 STI.ClearFeatureBitsTransitively(It->Features);
7404 CurLoc = incrementLoc(CurLoc, Name.size());
7405 }
7406 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7407 setAvailableFeatures(Features);
7408
7409 getTargetStreamer().emitDirectiveArch(Name);
7410 return false;
7411}
7412
7413/// parseDirectiveArchExtension
7414/// ::= .arch_extension [no]feature
7415bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7416 SMLoc ExtLoc = getLoc();
7417
7418 StringRef FullName = getParser().parseStringToEndOfStatement().trim();
7419
7420 if (parseEOL())
7421 return true;
7422
7423 bool EnableFeature = true;
7424 StringRef Name = FullName;
7425 if (Name.starts_with_insensitive("no")) {
7426 EnableFeature = false;
7427 Name = Name.substr(2);
7428 }
7429
7430 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7431 return Extension.Name == Name;
7432 });
7433
7434 if (It == std::end(ExtensionMap))
7435 return Error(ExtLoc, "unsupported architectural extension: " + Name);
7436
7437 MCSubtargetInfo &STI = copySTI();
7438 if (EnableFeature)
7439 STI.SetFeatureBitsTransitively(It->Features);
7440 else
7441 STI.ClearFeatureBitsTransitively(It->Features);
7442 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7443 setAvailableFeatures(Features);
7444
7445 getTargetStreamer().emitDirectiveArchExtension(FullName);
7446 return false;
7447}
7448
7449/// parseDirectiveCPU
7450/// ::= .cpu id
7451bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7452 SMLoc CurLoc = getLoc();
7453
7454 StringRef CPU, ExtensionString;
7455 std::tie(CPU, ExtensionString) =
7456 getParser().parseStringToEndOfStatement().trim().split('+');
7457
7458 if (parseToken(AsmToken::EndOfStatement))
7459 return true;
7460
7461 SmallVector<StringRef, 4> RequestedExtensions;
7462 if (!ExtensionString.empty())
7463 ExtensionString.split(RequestedExtensions, '+');
7464
7465 const llvm::AArch64::ArchInfo *CpuArch = llvm::AArch64::getArchForCpu(CPU);
7466 if (!CpuArch) {
7467 Error(CurLoc, "unknown CPU name");
7468 return false;
7469 }
7470 ExpandCryptoAEK(*CpuArch, RequestedExtensions);
7471
7472 MCSubtargetInfo &STI = copySTI();
7473 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
7474 CurLoc = incrementLoc(CurLoc, CPU.size());
7475
7476 for (auto Name : RequestedExtensions) {
7477 // Advance source location past '+'.
7478 CurLoc = incrementLoc(CurLoc, 1);
7479
7480 bool EnableFeature = !Name.consume_front_insensitive("no");
7481
7482 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7483 return Extension.Name == Name;
7484 });
7485
7486 if (It == std::end(ExtensionMap))
7487 return Error(CurLoc, "unsupported architectural extension: " + Name);
7488
7489 if (EnableFeature)
7490 STI.SetFeatureBitsTransitively(It->Features);
7491 else
7492 STI.ClearFeatureBitsTransitively(It->Features);
7493 CurLoc = incrementLoc(CurLoc, Name.size());
7494 }
7495 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7496 setAvailableFeatures(Features);
7497 return false;
7498}
7499
7500/// parseDirectiveInst
7501/// ::= .inst opcode [, ...]
7502bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7503 if (getLexer().is(AsmToken::EndOfStatement))
7504 return Error(Loc, "expected expression following '.inst' directive");
7505
7506 auto parseOp = [&]() -> bool {
7507 SMLoc L = getLoc();
7508 const MCExpr *Expr = nullptr;
7509 if (check(getParser().parseExpression(Expr), L, "expected expression"))
7510 return true;
7511 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
7512 if (check(!Value, L, "expected constant expression"))
7513 return true;
7514 getTargetStreamer().emitInst(Value->getValue());
7515 return false;
7516 };
7517
7518 return parseMany(parseOp);
7519}
7520
7521// parseDirectiveTLSDescCall:
7522// ::= .tlsdesccall symbol
7523bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7524 StringRef Name;
7525 if (check(getParser().parseIdentifier(Name), L, "expected symbol") ||
7526 parseToken(AsmToken::EndOfStatement))
7527 return true;
7528
7529 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7530 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
7532
7533 MCInst Inst;
7534 Inst.setOpcode(AArch64::TLSDESCCALL);
7536
7537 getParser().getStreamer().emitInstruction(Inst, getSTI());
7538 return false;
7539}
7540
7541/// ::= .loh <lohName | lohId> label1, ..., labelN
7542/// The number of arguments depends on the loh identifier.
7543bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7545 if (getTok().isNot(AsmToken::Identifier)) {
7546 if (getTok().isNot(AsmToken::Integer))
7547 return TokError("expected an identifier or a number in directive");
7548 // We successfully get a numeric value for the identifier.
7549 // Check if it is valid.
7550 int64_t Id = getTok().getIntVal();
7551 if (Id <= -1U && !isValidMCLOHType(Id))
7552 return TokError("invalid numeric identifier in directive");
7553 Kind = (MCLOHType)Id;
7554 } else {
7555 StringRef Name = getTok().getIdentifier();
7556 // We successfully parse an identifier.
7557 // Check if it is a recognized one.
7558 int Id = MCLOHNameToId(Name);
7559
7560 if (Id == -1)
7561 return TokError("invalid identifier in directive");
7562 Kind = (MCLOHType)Id;
7563 }
7564 // Consume the identifier.
7565 Lex();
7566 // Get the number of arguments of this LOH.
7567 int NbArgs = MCLOHIdToNbArgs(Kind);
7568
7569 assert(NbArgs != -1 && "Invalid number of arguments");
7570
7572 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7573 StringRef Name;
7574 if (getParser().parseIdentifier(Name))
7575 return TokError("expected identifier in directive");
7576 Args.push_back(getContext().getOrCreateSymbol(Name));
7577
7578 if (Idx + 1 == NbArgs)
7579 break;
7580 if (parseComma())
7581 return true;
7582 }
7583 if (parseEOL())
7584 return true;
7585
7586 getStreamer().emitLOHDirective(Kind, Args);
7587 return false;
7588}
7589
7590/// parseDirectiveLtorg
7591/// ::= .ltorg | .pool
7592bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7593 if (parseEOL())
7594 return true;
7595 getTargetStreamer().emitCurrentConstantPool();
7596 return false;
7597}
7598
7599/// parseDirectiveReq
7600/// ::= name .req registername
7601bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7602 Lex(); // Eat the '.req' token.
7603 SMLoc SRegLoc = getLoc();
7604 RegKind RegisterKind = RegKind::Scalar;
7605 MCRegister RegNum;
7606 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7607
7608 if (!ParseRes.isSuccess()) {
7609 StringRef Kind;
7610 RegisterKind = RegKind::NeonVector;
7611 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7612
7613 if (ParseRes.isFailure())
7614 return true;
7615
7616 if (ParseRes.isSuccess() && !Kind.empty())
7617 return Error(SRegLoc, "vector register without type specifier expected");
7618 }
7619
7620 if (!ParseRes.isSuccess()) {
7621 StringRef Kind;
7622 RegisterKind = RegKind::SVEDataVector;
7623 ParseRes =
7624 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7625
7626 if (ParseRes.isFailure())
7627 return true;
7628
7629 if (ParseRes.isSuccess() && !Kind.empty())
7630 return Error(SRegLoc,
7631 "sve vector register without type specifier expected");
7632 }
7633
7634 if (!ParseRes.isSuccess()) {
7635 StringRef Kind;
7636 RegisterKind = RegKind::SVEPredicateVector;
7637 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7638
7639 if (ParseRes.isFailure())
7640 return true;
7641
7642 if (ParseRes.isSuccess() && !Kind.empty())
7643 return Error(SRegLoc,
7644 "sve predicate register without type specifier expected");
7645 }
7646
7647 if (!ParseRes.isSuccess())
7648 return Error(SRegLoc, "register name or alias expected");
7649
7650 // Shouldn't be anything else.
7651 if (parseEOL())
7652 return true;
7653
7654 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
7655 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
7656 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
7657
7658 return false;
7659}
7660
7661/// parseDirectiveUneq
7662/// ::= .unreq registername
7663bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7664 if (getTok().isNot(AsmToken::Identifier))
7665 return TokError("unexpected input in .unreq directive.");
7666 RegisterReqs.erase(getTok().getIdentifier().lower());
7667 Lex(); // Eat the identifier.
7668 return parseToken(AsmToken::EndOfStatement);
7669}
7670
7671bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7672 if (parseEOL())
7673 return true;
7674 getStreamer().emitCFINegateRAState();
7675 return false;
7676}
7677
7678bool AArch64AsmParser::parseDirectiveCFINegateRAStateWithPC() {
7679 if (parseEOL())
7680 return true;
7681 getStreamer().emitCFINegateRAStateWithPC();
7682 return false;
7683}
7684
7685/// parseDirectiveCFIBKeyFrame
7686/// ::= .cfi_b_key
7687bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7688 if (parseEOL())
7689 return true;
7690 getStreamer().emitCFIBKeyFrame();
7691 return false;
7692}
7693
7694/// parseDirectiveCFIMTETaggedFrame
7695/// ::= .cfi_mte_tagged_frame
7696bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7697 if (parseEOL())
7698 return true;
7699 getStreamer().emitCFIMTETaggedFrame();
7700 return false;
7701}
7702
7703/// parseDirectiveVariantPCS
7704/// ::= .variant_pcs symbolname
7705bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7706 StringRef Name;
7707 if (getParser().parseIdentifier(Name))
7708 return TokError("expected symbol name");
7709 if (parseEOL())
7710 return true;
7711 getTargetStreamer().emitDirectiveVariantPCS(
7712 getContext().getOrCreateSymbol(Name));
7713 return false;
7714}
7715
7716/// parseDirectiveSEHAllocStack
7717/// ::= .seh_stackalloc
7718bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7719 int64_t Size;
7720 if (parseImmExpr(Size))
7721 return true;
7722 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7723 return false;
7724}
7725
7726/// parseDirectiveSEHPrologEnd
7727/// ::= .seh_endprologue
7728bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7729 getTargetStreamer().emitARM64WinCFIPrologEnd();
7730 return false;
7731}
7732
7733/// parseDirectiveSEHSaveR19R20X
7734/// ::= .seh_save_r19r20_x
7735bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7736 int64_t Offset;
7737 if (parseImmExpr(Offset))
7738 return true;
7739 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7740 return false;
7741}
7742
7743/// parseDirectiveSEHSaveFPLR
7744/// ::= .seh_save_fplr
7745bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7746 int64_t Offset;
7747 if (parseImmExpr(Offset))
7748 return true;
7749 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7750 return false;
7751}
7752
7753/// parseDirectiveSEHSaveFPLRX
7754/// ::= .seh_save_fplr_x
7755bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7756 int64_t Offset;
7757 if (parseImmExpr(Offset))
7758 return true;
7759 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7760 return false;
7761}
7762
7763/// parseDirectiveSEHSaveReg
7764/// ::= .seh_save_reg
7765bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7766 unsigned Reg;
7767 int64_t Offset;
7768 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7769 parseComma() || parseImmExpr(Offset))
7770 return true;
7771 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7772 return false;
7773}
7774
7775/// parseDirectiveSEHSaveRegX
7776/// ::= .seh_save_reg_x
7777bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7778 unsigned Reg;
7779 int64_t Offset;
7780 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7781 parseComma() || parseImmExpr(Offset))
7782 return true;
7783 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7784 return false;
7785}
7786
7787/// parseDirectiveSEHSaveRegP
7788/// ::= .seh_save_regp
7789bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7790 unsigned Reg;
7791 int64_t Offset;
7792 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7793 parseComma() || parseImmExpr(Offset))
7794 return true;
7795 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7796 return false;
7797}
7798
7799/// parseDirectiveSEHSaveRegPX
7800/// ::= .seh_save_regp_x
7801bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7802 unsigned Reg;
7803 int64_t Offset;
7804 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7805 parseComma() || parseImmExpr(Offset))
7806 return true;
7807 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7808 return false;
7809}
7810
7811/// parseDirectiveSEHSaveLRPair
7812/// ::= .seh_save_lrpair
7813bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7814 unsigned Reg;
7815 int64_t Offset;
7816 L = getLoc();
7817 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7818 parseComma() || parseImmExpr(Offset))
7819 return true;
7820 if (check(((Reg - 19) % 2 != 0), L,
7821 "expected register with even offset from x19"))
7822 return true;
7823 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7824 return false;
7825}
7826
7827/// parseDirectiveSEHSaveFReg
7828/// ::= .seh_save_freg
7829bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7830 unsigned Reg;
7831 int64_t Offset;
7832 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7833 parseComma() || parseImmExpr(Offset))
7834 return true;
7835 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7836 return false;
7837}
7838
7839/// parseDirectiveSEHSaveFRegX
7840/// ::= .seh_save_freg_x
7841bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7842 unsigned Reg;
7843 int64_t Offset;
7844 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7845 parseComma() || parseImmExpr(Offset))
7846 return true;
7847 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7848 return false;
7849}
7850
7851/// parseDirectiveSEHSaveFRegP
7852/// ::= .seh_save_fregp
7853bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7854 unsigned Reg;
7855 int64_t Offset;
7856 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7857 parseComma() || parseImmExpr(Offset))
7858 return true;
7859 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7860 return false;
7861}
7862
7863/// parseDirectiveSEHSaveFRegPX
7864/// ::= .seh_save_fregp_x
7865bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7866 unsigned Reg;
7867 int64_t Offset;
7868 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7869 parseComma() || parseImmExpr(Offset))
7870 return true;
7871 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7872 return false;
7873}
7874
7875/// parseDirectiveSEHSetFP
7876/// ::= .seh_set_fp
7877bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7878 getTargetStreamer().emitARM64WinCFISetFP();
7879 return false;
7880}
7881
7882/// parseDirectiveSEHAddFP
7883/// ::= .seh_add_fp
7884bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7885 int64_t Size;
7886 if (parseImmExpr(Size))
7887 return true;
7888 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7889 return false;
7890}
7891
7892/// parseDirectiveSEHNop
7893/// ::= .seh_nop
7894bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7895 getTargetStreamer().emitARM64WinCFINop();
7896 return false;
7897}
7898
7899/// parseDirectiveSEHSaveNext
7900/// ::= .seh_save_next
7901bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7902 getTargetStreamer().emitARM64WinCFISaveNext();
7903 return false;
7904}
7905
7906/// parseDirectiveSEHEpilogStart
7907/// ::= .seh_startepilogue
7908bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7909 getTargetStreamer().emitARM64WinCFIEpilogStart();
7910 return false;
7911}
7912
7913/// parseDirectiveSEHEpilogEnd
7914/// ::= .seh_endepilogue
7915bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
7916 getTargetStreamer().emitARM64WinCFIEpilogEnd();
7917 return false;
7918}
7919
7920/// parseDirectiveSEHTrapFrame
7921/// ::= .seh_trap_frame
7922bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
7923 getTargetStreamer().emitARM64WinCFITrapFrame();
7924 return false;
7925}
7926
7927/// parseDirectiveSEHMachineFrame
7928/// ::= .seh_pushframe
7929bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
7930 getTargetStreamer().emitARM64WinCFIMachineFrame();
7931 return false;
7932}
7933
7934/// parseDirectiveSEHContext
7935/// ::= .seh_context
7936bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
7937 getTargetStreamer().emitARM64WinCFIContext();
7938 return false;
7939}
7940
7941/// parseDirectiveSEHECContext
7942/// ::= .seh_ec_context
7943bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
7944 getTargetStreamer().emitARM64WinCFIECContext();
7945 return false;
7946}
7947
7948/// parseDirectiveSEHClearUnwoundToCall
7949/// ::= .seh_clear_unwound_to_call
7950bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
7951 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
7952 return false;
7953}
7954
7955/// parseDirectiveSEHPACSignLR
7956/// ::= .seh_pac_sign_lr
7957bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
7958 getTargetStreamer().emitARM64WinCFIPACSignLR();
7959 return false;
7960}
7961
7962/// parseDirectiveSEHSaveAnyReg
7963/// ::= .seh_save_any_reg
7964/// ::= .seh_save_any_reg_p
7965/// ::= .seh_save_any_reg_x
7966/// ::= .seh_save_any_reg_px
7967bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
7968 bool Writeback) {
7969 MCRegister Reg;
7970 SMLoc Start, End;
7971 int64_t Offset;
7972 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") ||
7973 parseComma() || parseImmExpr(Offset))
7974 return true;
7975
7976 if (Reg == AArch64::FP || Reg == AArch64::LR ||
7977 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
7978 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7979 return Error(L, "invalid save_any_reg offset");
7980 unsigned EncodedReg;
7981 if (Reg == AArch64::FP)
7982 EncodedReg = 29;
7983 else if (Reg == AArch64::LR)
7984 EncodedReg = 30;
7985 else
7986 EncodedReg = Reg - AArch64::X0;
7987 if (Paired) {
7988 if (Reg == AArch64::LR)
7989 return Error(Start, "lr cannot be paired with another register");
7990 if (Writeback)
7991 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset);
7992 else
7993 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset);
7994 } else {
7995 if (Writeback)
7996 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset);
7997 else
7998 getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset);
7999 }
8000 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
8001 unsigned EncodedReg = Reg - AArch64::D0;
8002 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8003 return Error(L, "invalid save_any_reg offset");
8004 if (Paired) {
8005 if (Reg == AArch64::D31)
8006 return Error(Start, "d31 cannot be paired with another register");
8007 if (Writeback)
8008 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset);
8009 else
8010 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset);
8011 } else {
8012 if (Writeback)
8013 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset);
8014 else
8015 getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset);
8016 }
8017 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
8018 unsigned EncodedReg = Reg - AArch64::Q0;
8019 if (Offset < 0 || Offset % 16)
8020 return Error(L, "invalid save_any_reg offset");
8021 if (Paired) {
8022 if (Reg == AArch64::Q31)
8023 return Error(Start, "q31 cannot be paired with another register");
8024 if (Writeback)
8025 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset);
8026 else
8027 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset);
8028 } else {
8029 if (Writeback)
8030 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset);
8031 else
8032 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset);
8033 }
8034 } else {
8035 return Error(Start, "save_any_reg register must be x, q or d register");
8036 }
8037 return false;
8038}
8039
8040/// parseDirectiveAllocZ
8041/// ::= .seh_allocz
8042bool AArch64AsmParser::parseDirectiveSEHAllocZ(SMLoc L) {
8043 int64_t Offset;
8044 if (parseImmExpr(Offset))
8045 return true;
8046 getTargetStreamer().emitARM64WinCFIAllocZ(Offset);
8047 return false;
8048}
8049
8050/// parseDirectiveSEHSaveZReg
8051/// ::= .seh_save_zreg
8052bool AArch64AsmParser::parseDirectiveSEHSaveZReg(SMLoc L) {
8053 MCRegister RegNum;
8054 StringRef Kind;
8055 int64_t Offset;
8056 ParseStatus Res =
8057 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8058 if (!Res.isSuccess())
8059 return true;
8060 if (check(RegNum < AArch64::Z8 || RegNum > AArch64::Z23, L,
8061 "expected register in range z8 to z23"))
8062 return true;
8063 if (parseComma() || parseImmExpr(Offset))
8064 return true;
8065 getTargetStreamer().emitARM64WinCFISaveZReg(RegNum - AArch64::Z0, Offset);
8066 return false;
8067}
8068
8069/// parseDirectiveSEHSavePReg
8070/// ::= .seh_save_preg
8071bool AArch64AsmParser::parseDirectiveSEHSavePReg(SMLoc L) {
8072 MCRegister RegNum;
8073 StringRef Kind;
8074 int64_t Offset;
8075 ParseStatus Res =
8076 tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
8077 if (!Res.isSuccess())
8078 return true;
8079 if (check(RegNum < AArch64::P4 || RegNum > AArch64::P15, L,
8080 "expected register in range p4 to p15"))
8081 return true;
8082 if (parseComma() || parseImmExpr(Offset))
8083 return true;
8084 getTargetStreamer().emitARM64WinCFISavePReg(RegNum - AArch64::P0, Offset);
8085 return false;
8086}
8087
8088bool AArch64AsmParser::parseDirectiveAeabiSubSectionHeader(SMLoc L) {
8089 // Handle parsing of .aeabi_subsection directives
8090 // - On first declaration of a subsection, expect exactly three identifiers
8091 // after `.aeabi_subsection`: the subsection name and two parameters.
8092 // - When switching to an existing subsection, it is valid to provide only
8093 // the subsection name, or the name together with the two parameters.
8094 MCAsmParser &Parser = getParser();
8095
8096 // Consume the name (subsection name)
8097 StringRef SubsectionName;
8098 AArch64BuildAttributes::VendorID SubsectionNameID;
8099 if (Parser.getTok().is(AsmToken::Identifier)) {
8100 SubsectionName = Parser.getTok().getIdentifier();
8101 SubsectionNameID = AArch64BuildAttributes::getVendorID(SubsectionName);
8102 } else {
8103 Error(Parser.getTok().getLoc(), "subsection name not found");
8104 return true;
8105 }
8106 Parser.Lex();
8107
8108 std::unique_ptr<MCELFStreamer::AttributeSubSection> SubsectionExists =
8109 getTargetStreamer().getAttributesSubsectionByName(SubsectionName);
8110 // Check whether only the subsection name was provided.
8111 // If so, the user is trying to switch to a subsection that should have been
8112 // declared before.
8114 if (SubsectionExists) {
8115 getTargetStreamer().emitAttributesSubsection(
8116 SubsectionName,
8118 SubsectionExists->IsOptional),
8120 SubsectionExists->ParameterType));
8121 return false;
8122 }
8123 // If subsection does not exists, report error.
8124 else {
8125 Error(Parser.getTok().getLoc(),
8126 "Could not switch to subsection '" + SubsectionName +
8127 "' using subsection name, subsection has not been defined");
8128 return true;
8129 }
8130 }
8131
8132 // Otherwise, expecting 2 more parameters: consume a comma
8133 // parseComma() return *false* on success, and call Lex(), no need to call
8134 // Lex() again.
8135 if (Parser.parseComma()) {
8136 return true;
8137 }
8138
8139 // Consume the first parameter (optionality parameter)
8141 // options: optional/required
8142 if (Parser.getTok().is(AsmToken::Identifier)) {
8143 StringRef Optionality = Parser.getTok().getIdentifier();
8144 IsOptional = AArch64BuildAttributes::getOptionalID(Optionality);
8146 Error(Parser.getTok().getLoc(),
8148 return true;
8149 }
8150 if (SubsectionExists) {
8151 if (IsOptional != SubsectionExists->IsOptional) {
8152 Error(Parser.getTok().getLoc(),
8153 "optionality mismatch! subsection '" + SubsectionName +
8154 "' already exists with optionality defined as '" +
8156 SubsectionExists->IsOptional) +
8157 "' and not '" +
8158 AArch64BuildAttributes::getOptionalStr(IsOptional) + "'");
8159 return true;
8160 }
8161 }
8162 } else {
8163 Error(Parser.getTok().getLoc(),
8164 "optionality parameter not found, expected required|optional");
8165 return true;
8166 }
8167 // Check for possible IsOptional unaccepted values for known subsections
8168 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID) {
8169 if (AArch64BuildAttributes::REQUIRED == IsOptional) {
8170 Error(Parser.getTok().getLoc(),
8171 "aeabi_feature_and_bits must be marked as optional");
8172 return true;
8173 }
8174 }
8175 if (AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8176 if (AArch64BuildAttributes::OPTIONAL == IsOptional) {
8177 Error(Parser.getTok().getLoc(),
8178 "aeabi_pauthabi must be marked as required");
8179 return true;
8180 }
8181 }
8182 Parser.Lex();
8183 // consume a comma
8184 if (Parser.parseComma()) {
8185 return true;
8186 }
8187
8188 // Consume the second parameter (type parameter)
8190 if (Parser.getTok().is(AsmToken::Identifier)) {
8191 StringRef Name = Parser.getTok().getIdentifier();
8194 Error(Parser.getTok().getLoc(),
8196 return true;
8197 }
8198 if (SubsectionExists) {
8199 if (Type != SubsectionExists->ParameterType) {
8200 Error(Parser.getTok().getLoc(),
8201 "type mismatch! subsection '" + SubsectionName +
8202 "' already exists with type defined as '" +
8204 SubsectionExists->ParameterType) +
8205 "' and not '" + AArch64BuildAttributes::getTypeStr(Type) +
8206 "'");
8207 return true;
8208 }
8209 }
8210 } else {
8211 Error(Parser.getTok().getLoc(),
8212 "type parameter not found, expected uleb128|ntbs");
8213 return true;
8214 }
8215 // Check for possible unaccepted 'type' values for known subsections
8216 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID ||
8217 AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8219 Error(Parser.getTok().getLoc(),
8220 SubsectionName + " must be marked as ULEB128");
8221 return true;
8222 }
8223 }
8224 Parser.Lex();
8225
8226 // Parsing finished, check for trailing tokens.
8228 Error(Parser.getTok().getLoc(), "unexpected token for AArch64 build "
8229 "attributes subsection header directive");
8230 return true;
8231 }
8232
8233 getTargetStreamer().emitAttributesSubsection(SubsectionName, IsOptional, Type);
8234
8235 return false;
8236}
8237
8238bool AArch64AsmParser::parseDirectiveAeabiAArch64Attr(SMLoc L) {
8239 // Expecting 2 Tokens: after '.aeabi_attribute', e.g.:
8240 // .aeabi_attribute (1)Tag_Feature_BTI, (2)[uleb128|ntbs]
8241 // separated by a comma.
8242 MCAsmParser &Parser = getParser();
8243
8244 std::unique_ptr<MCELFStreamer::AttributeSubSection> ActiveSubsection =
8245 getTargetStreamer().getActiveAttributesSubsection();
8246 if (nullptr == ActiveSubsection) {
8247 Error(Parser.getTok().getLoc(),
8248 "no active subsection, build attribute can not be added");
8249 return true;
8250 }
8251 StringRef ActiveSubsectionName = ActiveSubsection->VendorName;
8252 unsigned ActiveSubsectionType = ActiveSubsection->ParameterType;
8253
8254 unsigned ActiveSubsectionID = AArch64BuildAttributes::VENDOR_UNKNOWN;
8256 AArch64BuildAttributes::AEABI_PAUTHABI) == ActiveSubsectionName)
8257 ActiveSubsectionID = AArch64BuildAttributes::AEABI_PAUTHABI;
8260 ActiveSubsectionName)
8262
8263 StringRef TagStr = "";
8264 unsigned Tag;
8265 if (Parser.getTok().is(AsmToken::Integer)) {
8266 Tag = getTok().getIntVal();
8267 } else if (Parser.getTok().is(AsmToken::Identifier)) {
8268 TagStr = Parser.getTok().getIdentifier();
8269 switch (ActiveSubsectionID) {
8271 // Tag was provided as an unrecognized string instead of an unsigned
8272 // integer
8273 Error(Parser.getTok().getLoc(), "unrecognized Tag: '" + TagStr +
8274 "' \nExcept for public subsections, "
8275 "tags have to be an unsigned int.");
8276 return true;
8277 break;
8281 Error(Parser.getTok().getLoc(), "unknown AArch64 build attribute '" +
8282 TagStr + "' for subsection '" +
8283 ActiveSubsectionName + "'");
8284 return true;
8285 }
8286 break;
8290 Error(Parser.getTok().getLoc(), "unknown AArch64 build attribute '" +
8291 TagStr + "' for subsection '" +
8292 ActiveSubsectionName + "'");
8293 return true;
8294 }
8295 break;
8296 }
8297 } else {
8298 Error(Parser.getTok().getLoc(), "AArch64 build attributes tag not found");
8299 return true;
8300 }
8301 Parser.Lex();
8302 // consume a comma
8303 // parseComma() return *false* on success, and call Lex(), no need to call
8304 // Lex() again.
8305 if (Parser.parseComma()) {
8306 return true;
8307 }
8308
8309 // Consume the second parameter (attribute value)
8310 unsigned ValueInt = unsigned(-1);
8311 std::string ValueStr = "";
8312 if (Parser.getTok().is(AsmToken::Integer)) {
8313 if (AArch64BuildAttributes::NTBS == ActiveSubsectionType) {
8314 Error(
8315 Parser.getTok().getLoc(),
8316 "active subsection type is NTBS (string), found ULEB128 (unsigned)");
8317 return true;
8318 }
8319 ValueInt = getTok().getIntVal();
8320 } else if (Parser.getTok().is(AsmToken::Identifier)) {
8321 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8322 Error(
8323 Parser.getTok().getLoc(),
8324 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8325 return true;
8326 }
8327 ValueStr = Parser.getTok().getIdentifier();
8328 } else if (Parser.getTok().is(AsmToken::String)) {
8329 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8330 Error(
8331 Parser.getTok().getLoc(),
8332 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8333 return true;
8334 }
8335 ValueStr = Parser.getTok().getString();
8336 } else {
8337 Error(Parser.getTok().getLoc(), "AArch64 build attributes value not found");
8338 return true;
8339 }
8340 // Check for possible unaccepted values for known tags
8341 // (AEABI_FEATURE_AND_BITS)
8342 if (ActiveSubsectionID == AArch64BuildAttributes::AEABI_FEATURE_AND_BITS) {
8343 if (0 != ValueInt && 1 != ValueInt) {
8344 Error(Parser.getTok().getLoc(),
8345 "unknown AArch64 build attributes Value for Tag '" + TagStr +
8346 "' options are 0|1");
8347 return true;
8348 }
8349 }
8350 Parser.Lex();
8351
8352 // Parsing finished. Check for trailing tokens.
8354 Error(Parser.getTok().getLoc(),
8355 "unexpected token for AArch64 build attributes tag and value "
8356 "attribute directive");
8357 return true;
8358 }
8359
8360 if (unsigned(-1) != ValueInt) {
8361 getTargetStreamer().emitAttribute(ActiveSubsectionName, Tag, ValueInt, "");
8362 }
8363 if ("" != ValueStr) {
8364 getTargetStreamer().emitAttribute(ActiveSubsectionName, Tag, unsigned(-1),
8365 ValueStr);
8366 }
8367 return false;
8368}
8369
8370bool AArch64AsmParser::parseDataExpr(const MCExpr *&Res) {
8371 SMLoc EndLoc;
8372
8373 if (getParser().parseExpression(Res))
8374 return true;
8375 MCAsmParser &Parser = getParser();
8376 if (!parseOptionalToken(AsmToken::At))
8377 return false;
8378 if (getLexer().getKind() != AsmToken::Identifier)
8379 return Error(getLoc(), "expected relocation specifier");
8380
8381 std::string Identifier = Parser.getTok().getIdentifier().lower();
8382 SMLoc Loc = getLoc();
8383 Lex();
8384 if (Identifier == "auth")
8385 return parseAuthExpr(Res, EndLoc);
8386
8387 auto Spec = AArch64::S_None;
8388 if (STI->getTargetTriple().isOSBinFormatMachO()) {
8389 if (Identifier == "got")
8390 Spec = AArch64::S_MACHO_GOT;
8391 } else {
8392 // Unofficial, experimental syntax that will be changed.
8393 if (Identifier == "gotpcrel")
8394 Spec = AArch64::S_GOTPCREL;
8395 else if (Identifier == "plt")
8396 Spec = AArch64::S_PLT;
8397 else if (Identifier == "funcinit")
8398 Spec = AArch64::S_FUNCINIT;
8399 }
8400 if (Spec == AArch64::S_None)
8401 return Error(Loc, "invalid relocation specifier");
8402 if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Res))
8403 Res = MCSymbolRefExpr::create(&SRE->getSymbol(), Spec, getContext(),
8404 SRE->getLoc());
8405 else
8406 return Error(Loc, "@ specifier only allowed after a symbol");
8407
8408 for (;;) {
8409 std::optional<MCBinaryExpr::Opcode> Opcode;
8410 if (parseOptionalToken(AsmToken::Plus))
8411 Opcode = MCBinaryExpr::Add;
8412 else if (parseOptionalToken(AsmToken::Minus))
8413 Opcode = MCBinaryExpr::Sub;
8414 else
8415 break;
8416 const MCExpr *Term;
8417 if (getParser().parsePrimaryExpr(Term, EndLoc, nullptr))
8418 return true;
8419 Res = MCBinaryExpr::create(*Opcode, Res, Term, getContext(), Res->getLoc());
8420 }
8421 return false;
8422}
8423
8424/// parseAuthExpr
8425/// ::= _sym@AUTH(ib,123[,addr])
8426/// ::= (_sym + 5)@AUTH(ib,123[,addr])
8427/// ::= (_sym - 5)@AUTH(ib,123[,addr])
8428bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
8429 MCAsmParser &Parser = getParser();
8430 MCContext &Ctx = getContext();
8431 AsmToken Tok = Parser.getTok();
8432
8433 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
8434 if (parseToken(AsmToken::LParen, "expected '('"))
8435 return true;
8436
8437 if (Parser.getTok().isNot(AsmToken::Identifier))
8438 return TokError("expected key name");
8439
8440 StringRef KeyStr = Parser.getTok().getIdentifier();
8441 auto KeyIDOrNone = AArch64StringToPACKeyID(KeyStr);
8442 if (!KeyIDOrNone)
8443 return TokError("invalid key '" + KeyStr + "'");
8444 Parser.Lex();
8445
8446 if (parseToken(AsmToken::Comma, "expected ','"))
8447 return true;
8448
8449 if (Parser.getTok().isNot(AsmToken::Integer))
8450 return TokError("expected integer discriminator");
8451 int64_t Discriminator = Parser.getTok().getIntVal();
8452
8453 if (!isUInt<16>(Discriminator))
8454 return TokError("integer discriminator " + Twine(Discriminator) +
8455 " out of range [0, 0xFFFF]");
8456 Parser.Lex();
8457
8458 bool UseAddressDiversity = false;
8459 if (Parser.getTok().is(AsmToken::Comma)) {
8460 Parser.Lex();
8461 if (Parser.getTok().isNot(AsmToken::Identifier) ||
8462 Parser.getTok().getIdentifier() != "addr")
8463 return TokError("expected 'addr'");
8464 UseAddressDiversity = true;
8465 Parser.Lex();
8466 }
8467
8468 EndLoc = Parser.getTok().getEndLoc();
8469 if (parseToken(AsmToken::RParen, "expected ')'"))
8470 return true;
8471
8472 Res = AArch64AuthMCExpr::create(Res, Discriminator, *KeyIDOrNone,
8473 UseAddressDiversity, Ctx, Res->getLoc());
8474 return false;
8475}
8476
8477bool AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
8478 AArch64::Specifier &ELFSpec,
8479 AArch64::Specifier &DarwinSpec,
8480 int64_t &Addend) {
8481 ELFSpec = AArch64::S_INVALID;
8482 DarwinSpec = AArch64::S_None;
8483 Addend = 0;
8484
8485 if (auto *AE = dyn_cast<MCSpecifierExpr>(Expr)) {
8486 ELFSpec = AE->getSpecifier();
8487 Expr = AE->getSubExpr();
8488 }
8489
8490 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
8491 if (SE) {
8492 // It's a simple symbol reference with no addend.
8493 DarwinSpec = AArch64::Specifier(SE->getKind());
8494 return true;
8495 }
8496
8497 // Check that it looks like a symbol + an addend
8498 MCValue Res;
8499 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr);
8500 if (!Relocatable || Res.getSubSym())
8501 return false;
8502
8503 // Treat expressions with an ELFSpec (like ":abs_g1:3", or
8504 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
8505 if (!Res.getAddSym() && ELFSpec == AArch64::S_INVALID)
8506 return false;
8507
8508 if (Res.getAddSym())
8509 DarwinSpec = AArch64::Specifier(Res.getSpecifier());
8510 Addend = Res.getConstant();
8511
8512 // It's some symbol reference + a constant addend, but really
8513 // shouldn't use both Darwin and ELF syntax.
8514 return ELFSpec == AArch64::S_INVALID || DarwinSpec == AArch64::S_None;
8515}
8516
8517/// Force static initialization.
8518extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
8526
8527#define GET_REGISTER_MATCHER
8528#define GET_SUBTARGET_FEATURE_NAME
8529#define GET_MATCHER_IMPLEMENTATION
8530#define GET_MNEMONIC_SPELL_CHECKER
8531#include "AArch64GenAsmMatcher.inc"
8532
8533// Define this matcher function after the auto-generated include so we
8534// have the match class enum definitions.
8535unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
8536 unsigned Kind) {
8537 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
8538
8539 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
8540 if (!Op.isImm())
8541 return Match_InvalidOperand;
8542 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
8543 if (!CE)
8544 return Match_InvalidOperand;
8545 if (CE->getValue() == ExpectedVal)
8546 return Match_Success;
8547 return Match_InvalidOperand;
8548 };
8549
8550 switch (Kind) {
8551 default:
8552 return Match_InvalidOperand;
8553 case MCK_MPR:
8554 // If the Kind is a token for the MPR register class which has the "za"
8555 // register (SME accumulator array), check if the asm is a literal "za"
8556 // token. This is for the "smstart za" alias that defines the register
8557 // as a literal token.
8558 if (Op.isTokenEqual("za"))
8559 return Match_Success;
8560 return Match_InvalidOperand;
8561
8562 // If the kind is a token for a literal immediate, check if our asm operand
8563 // matches. This is for InstAliases which have a fixed-value immediate in
8564 // the asm string, such as hints which are parsed into a specific
8565 // instruction definition.
8566#define MATCH_HASH(N) \
8567 case MCK__HASH_##N: \
8568 return MatchesOpImmediate(N);
8569 MATCH_HASH(0)
8570 MATCH_HASH(1)
8571 MATCH_HASH(2)
8572 MATCH_HASH(3)
8573 MATCH_HASH(4)
8574 MATCH_HASH(6)
8575 MATCH_HASH(7)
8576 MATCH_HASH(8)
8577 MATCH_HASH(10)
8578 MATCH_HASH(12)
8579 MATCH_HASH(14)
8580 MATCH_HASH(16)
8581 MATCH_HASH(24)
8582 MATCH_HASH(25)
8583 MATCH_HASH(26)
8584 MATCH_HASH(27)
8585 MATCH_HASH(28)
8586 MATCH_HASH(29)
8587 MATCH_HASH(30)
8588 MATCH_HASH(31)
8589 MATCH_HASH(32)
8590 MATCH_HASH(40)
8591 MATCH_HASH(48)
8592 MATCH_HASH(64)
8593#undef MATCH_HASH
8594#define MATCH_HASH_MINUS(N) \
8595 case MCK__HASH__MINUS_##N: \
8596 return MatchesOpImmediate(-N);
8600#undef MATCH_HASH_MINUS
8601 }
8602}
8603
8604ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
8605
8606 SMLoc S = getLoc();
8607
8608 if (getTok().isNot(AsmToken::Identifier))
8609 return Error(S, "expected register");
8610
8611 MCRegister FirstReg;
8612 ParseStatus Res = tryParseScalarRegister(FirstReg);
8613 if (!Res.isSuccess())
8614 return Error(S, "expected first even register of a consecutive same-size "
8615 "even/odd register pair");
8616
8617 const MCRegisterClass &WRegClass =
8618 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
8619 const MCRegisterClass &XRegClass =
8620 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
8621
8622 bool isXReg = XRegClass.contains(FirstReg),
8623 isWReg = WRegClass.contains(FirstReg);
8624 if (!isXReg && !isWReg)
8625 return Error(S, "expected first even register of a consecutive same-size "
8626 "even/odd register pair");
8627
8628 const MCRegisterInfo *RI = getContext().getRegisterInfo();
8629 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
8630
8631 if (FirstEncoding & 0x1)
8632 return Error(S, "expected first even register of a consecutive same-size "
8633 "even/odd register pair");
8634
8635 if (getTok().isNot(AsmToken::Comma))
8636 return Error(getLoc(), "expected comma");
8637 // Eat the comma
8638 Lex();
8639
8640 SMLoc E = getLoc();
8641 MCRegister SecondReg;
8642 Res = tryParseScalarRegister(SecondReg);
8643 if (!Res.isSuccess())
8644 return Error(E, "expected second odd register of a consecutive same-size "
8645 "even/odd register pair");
8646
8647 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
8648 (isXReg && !XRegClass.contains(SecondReg)) ||
8649 (isWReg && !WRegClass.contains(SecondReg)))
8650 return Error(E, "expected second odd register of a consecutive same-size "
8651 "even/odd register pair");
8652
8653 MCRegister Pair;
8654 if (isXReg) {
8655 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
8656 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
8657 } else {
8658 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
8659 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
8660 }
8661
8662 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
8663 getLoc(), getContext()));
8664
8665 return ParseStatus::Success;
8666}
8667
8668template <bool ParseShiftExtend, bool ParseSuffix>
8669ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
8670 const SMLoc S = getLoc();
8671 // Check for a SVE vector register specifier first.
8672 MCRegister RegNum;
8673 StringRef Kind;
8674
8675 ParseStatus Res =
8676 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8677
8678 if (!Res.isSuccess())
8679 return Res;
8680
8681 if (ParseSuffix && Kind.empty())
8682 return ParseStatus::NoMatch;
8683
8684 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
8685 if (!KindRes)
8686 return ParseStatus::NoMatch;
8687
8688 unsigned ElementWidth = KindRes->second;
8689
8690 // No shift/extend is the default.
8691 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
8692 Operands.push_back(AArch64Operand::CreateVectorReg(
8693 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
8694
8695 ParseStatus Res = tryParseVectorIndex(Operands);
8696 if (Res.isFailure())
8697 return ParseStatus::Failure;
8698 return ParseStatus::Success;
8699 }
8700
8701 // Eat the comma
8702 Lex();
8703
8704 // Match the shift
8706 Res = tryParseOptionalShiftExtend(ExtOpnd);
8707 if (!Res.isSuccess())
8708 return Res;
8709
8710 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
8711 Operands.push_back(AArch64Operand::CreateVectorReg(
8712 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
8713 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
8714 Ext->hasShiftExtendAmount()));
8715
8716 return ParseStatus::Success;
8717}
8718
8719ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
8720 MCAsmParser &Parser = getParser();
8721
8722 SMLoc SS = getLoc();
8723 const AsmToken &TokE = getTok();
8724 bool IsHash = TokE.is(AsmToken::Hash);
8725
8726 if (!IsHash && TokE.isNot(AsmToken::Identifier))
8727 return ParseStatus::NoMatch;
8728
8729 int64_t Pattern;
8730 if (IsHash) {
8731 Lex(); // Eat hash
8732
8733 // Parse the immediate operand.
8734 const MCExpr *ImmVal;
8735 SS = getLoc();
8736 if (Parser.parseExpression(ImmVal))
8737 return ParseStatus::Failure;
8738
8739 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
8740 if (!MCE)
8741 return TokError("invalid operand for instruction");
8742
8743 Pattern = MCE->getValue();
8744 } else {
8745 // Parse the pattern
8746 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
8747 if (!Pat)
8748 return ParseStatus::NoMatch;
8749
8750 Lex();
8751 Pattern = Pat->Encoding;
8752 assert(Pattern >= 0 && Pattern < 32);
8753 }
8754
8755 Operands.push_back(
8756 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8757 SS, getLoc(), getContext()));
8758
8759 return ParseStatus::Success;
8760}
8761
8762ParseStatus
8763AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8764 int64_t Pattern;
8765 SMLoc SS = getLoc();
8766 const AsmToken &TokE = getTok();
8767 // Parse the pattern
8768 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8769 TokE.getString());
8770 if (!Pat)
8771 return ParseStatus::NoMatch;
8772
8773 Lex();
8774 Pattern = Pat->Encoding;
8775 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8776
8777 Operands.push_back(
8778 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8779 SS, getLoc(), getContext()));
8780
8781 return ParseStatus::Success;
8782}
8783
8784ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8785 SMLoc SS = getLoc();
8786
8787 MCRegister XReg;
8788 if (!tryParseScalarRegister(XReg).isSuccess())
8789 return ParseStatus::NoMatch;
8790
8791 MCContext &ctx = getContext();
8792 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8793 MCRegister X8Reg = RI->getMatchingSuperReg(
8794 XReg, AArch64::x8sub_0,
8795 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8796 if (!X8Reg)
8797 return Error(SS,
8798 "expected an even-numbered x-register in the range [x0,x22]");
8799
8800 Operands.push_back(
8801 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
8802 return ParseStatus::Success;
8803}
8804
8805ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8806 SMLoc S = getLoc();
8807
8808 if (getTok().isNot(AsmToken::Integer))
8809 return ParseStatus::NoMatch;
8810
8811 if (getLexer().peekTok().isNot(AsmToken::Colon))
8812 return ParseStatus::NoMatch;
8813
8814 const MCExpr *ImmF;
8815 if (getParser().parseExpression(ImmF))
8816 return ParseStatus::NoMatch;
8817
8818 if (getTok().isNot(AsmToken::Colon))
8819 return ParseStatus::NoMatch;
8820
8821 Lex(); // Eat ':'
8822 if (getTok().isNot(AsmToken::Integer))
8823 return ParseStatus::NoMatch;
8824
8825 SMLoc E = getTok().getLoc();
8826 const MCExpr *ImmL;
8827 if (getParser().parseExpression(ImmL))
8828 return ParseStatus::NoMatch;
8829
8830 unsigned ImmFVal = cast<MCConstantExpr>(ImmF)->getValue();
8831 unsigned ImmLVal = cast<MCConstantExpr>(ImmL)->getValue();
8832
8833 Operands.push_back(
8834 AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext()));
8835 return ParseStatus::Success;
8836}
8837
8838template <int Adj>
8839ParseStatus AArch64AsmParser::tryParseAdjImm0_63(OperandVector &Operands) {
8840 SMLoc S = getLoc();
8841
8842 parseOptionalToken(AsmToken::Hash);
8843 bool IsNegative = parseOptionalToken(AsmToken::Minus);
8844
8845 if (getTok().isNot(AsmToken::Integer))
8846 return ParseStatus::NoMatch;
8847
8848 const MCExpr *Ex;
8849 if (getParser().parseExpression(Ex))
8850 return ParseStatus::NoMatch;
8851
8852 int64_t Imm = dyn_cast<MCConstantExpr>(Ex)->getValue();
8853 if (IsNegative)
8854 Imm = -Imm;
8855
8856 // We want an adjusted immediate in the range [0, 63]. If we don't have one,
8857 // return a value, which is certain to trigger a error message about invalid
8858 // immediate range instead of a non-descriptive invalid operand error.
8859 static_assert(Adj == 1 || Adj == -1, "Unsafe immediate adjustment");
8860 if (Imm == INT64_MIN || Imm == INT64_MAX || Imm + Adj < 0 || Imm + Adj > 63)
8861 Imm = -2;
8862 else
8863 Imm += Adj;
8864
8865 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
8866 Operands.push_back(AArch64Operand::CreateImm(
8868
8869 return ParseStatus::Success;
8870}
#define MATCH_HASH_MINUS(N)
static unsigned matchSVEDataVectorRegName(StringRef Name)
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind)
static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo, SmallVector< StringRef, 4 > &RequestedExtensions)
static unsigned matchSVEPredicateAsCounterRegName(StringRef Name)
static MCRegister MatchRegisterName(StringRef Name)
static bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg)
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser()
Force static initialization.
static const char * getSubtargetFeatureName(uint64_t Val)
static unsigned MatchNeonVectorRegName(StringRef Name)
}
static std::optional< std::pair< int, int > > parseVectorKind(StringRef Suffix, RegKind VectorKind)
Returns an optional pair of (elements, element-width) if Suffix is a valid vector kind.
static unsigned matchMatrixRegName(StringRef Name)
static unsigned matchMatrixTileListRegName(StringRef Name)
static std::string AArch64MnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static SMLoc incrementLoc(SMLoc L, int Offset)
#define MATCH_HASH(N)
static const struct Extension ExtensionMap[]
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
static unsigned matchSVEPredicateVectorRegName(StringRef Name)
static SDValue getCondCode(SelectionDAG &DAG, AArch64CC::CondCode CC)
Like SelectionDAG::getCondCode(), but for AArch64 condition codes.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
@ Default
Value * getPointer(Value *Ptr)
static LVOptions Options
Definition LVOptions.cpp:25
Live Register Matrix
loop data Loop Data Prefetch
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Register Reg
#define T
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:472
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
APInt bitcastToAPInt() const
Definition APFloat.h:1335
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition APInt.h:436
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition APInt.h:433
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1563
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition AsmLexer.h:121
void UnLex(AsmToken const &Token)
Definition AsmLexer.h:106
LLVM_ABI SMLoc getLoc() const
Definition AsmLexer.cpp:31
int64_t getIntVal() const
Definition MCAsmMacro.h:108
bool isNot(TokenKind K) const
Definition MCAsmMacro.h:76
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition MCAsmMacro.h:103
bool is(TokenKind K) const
Definition MCAsmMacro.h:75
LLVM_ABI SMLoc getEndLoc() const
Definition AsmLexer.cpp:33
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Definition MCAsmMacro.h:92
Base class for user error types.
Definition Error.h:354
Container class for subtarget features.
constexpr size_t size() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
void printExpr(raw_ostream &, const MCExpr &) const
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
AsmLexer & getLexer()
const AsmToken & getTok() const
Get the current AsmToken from the stream.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
static LLVM_ABI const MCBinaryExpr * create(Opcode Op, const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:201
@ Sub
Subtraction.
Definition MCExpr.h:324
@ Add
Addition.
Definition MCExpr.h:302
int64_t getValue() const
Definition MCExpr.h:171
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
const MCRegisterInfo * getRegisterInfo() const
Definition MCContext.h:414
LLVM_ABI bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm) const
Try to evaluate the expression to a relocatable value, i.e.
Definition MCExpr.cpp:450
SMLoc getLoc() const
Definition MCExpr.h:86
unsigned getNumOperands() const
Definition MCInst.h:212
void setLoc(SMLoc loc)
Definition MCInst.h:207
unsigned getOpcode() const
Definition MCInst.h:202
void addOperand(const MCOperand Op)
Definition MCInst.h:215
void setOpcode(unsigned Op)
Definition MCInst.h:201
const MCOperand & getOperand(unsigned i) const
Definition MCInst.h:210
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
int64_t getImm() const
Definition MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
bool isImm() const
Definition MCInst.h:66
bool isReg() const
Definition MCInst.h:65
MCRegister getReg() const
Returns the register number.
Definition MCInst.h:73
const MCExpr * getExpr() const
Definition MCInst.h:118
bool isExpr() const
Definition MCInst.h:69
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual MCRegister getReg() const =0
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:743
Streaming machine code generation interface.
Definition MCStreamer.h:220
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
MCTargetStreamer * getTargetStreamer()
Definition MCStreamer.h:324
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
FeatureBitset SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
FeatureBitset ClearFeatureBitsTransitively(const FeatureBitset &FB)
VariantKind getKind() const
Definition MCExpr.h:232
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual bool areEqualRegs(const MCParsedAsmOperand &Op1, const MCParsedAsmOperand &Op2) const
Returns whether two operands are registers and are equal.
const MCSymbol * getAddSym() const
Definition MCValue.h:49
int64_t getConstant() const
Definition MCValue.h:44
uint32_t getSpecifier() const
Definition MCValue.h:46
const MCSymbol * getSubSym() const
Definition MCValue.h:51
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
constexpr bool isNoMatch() const
Represents a location in source code.
Definition SMLoc.h:22
static SMLoc getFromPointer(const char *Ptr)
Definition SMLoc.h:35
constexpr const char * getPointer() const
Definition SMLoc.h:33
void insert_range(Range &&R)
Definition SmallSet.h:195
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:228
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:183
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
iterator end()
Definition StringMap.h:224
iterator find(StringRef Key)
Definition StringMap.h:237
void erase(iterator I)
Definition StringMap.h:427
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
Definition StringMap.h:321
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:702
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
Definition StringRef.h:611
LLVM_ABI std::string upper() const
Convert the given ASCII string to uppercase.
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
StringRef take_back(size_t N=1) const
Return a StringRef equal to 'this' but with only the last N elements remaining.
Definition StringRef.h:591
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition StringRef.h:816
LLVM_ABI std::string lower() const
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
Definition StringRef.h:172
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition Triple.h:784
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define INT64_MIN
Definition DataTypes.h:74
#define INT64_MAX
Definition DataTypes.h:71
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SubsectionType getTypeID(StringRef Type)
StringRef getVendorName(unsigned const Vendor)
StringRef getOptionalStr(unsigned Optional)
VendorID
AArch64 build attributes vendors IDs (a.k.a subsection name)
SubsectionOptional getOptionalID(StringRef Optional)
FeatureAndBitsTags getFeatureAndBitsTagsID(StringRef FeatureAndBitsTag)
VendorID getVendorID(StringRef const Vendor)
PauthABITags getPauthABITagsID(StringRef PauthABITag)
StringRef getTypeStr(unsigned Type)
static CondCode getInvertedCondCode(CondCode Code)
const PHint * lookupPHintByName(StringRef)
uint32_t parseGenericRegister(StringRef Name)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static bool isSVEAddSubImm(int64_t Imm)
Returns true if Imm is valid for ADD/SUB.
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static float getFPImmFloat(unsigned Imm)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
static bool isSVECpyImm(int64_t Imm)
Returns true if Imm is valid for CPY/DUP.
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
LLVM_ABI const ArchInfo * parseArch(StringRef Arch)
LLVM_ABI const ArchInfo * getArchForCpu(StringRef CPU)
LLVM_ABI bool getExtensionFeatures(const AArch64::ExtensionBitset &Extensions, std::vector< StringRef > &Features)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool isPredicated(const MCInst &MI, const MCInstrInfo *MCII)
@ Entry
Definition COFF.h:862
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
float getFPImm(unsigned Imm)
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
constexpr double e
NodeAddr< CodeNode * > Code
Definition RDFGraph.h:388
Context & getContext() const
Definition BasicBlock.h:99
This is an optimization pass for GlobalISel generic memory operations.
static std::optional< AArch64PACKey::ID > AArch64StringToPACKeyID(StringRef Name)
Return numeric key ID for 2-letter identifier string.
bool errorToBool(Error Err)
Helper for converting an Error to a bool.
Definition Error.h:1113
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
static int MCLOHNameToId(StringRef Name)
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
static bool isMem(const MachineInstr &MI, unsigned Op)
LLVM_ABI std::pair< StringRef, StringRef > getToken(StringRef Source, StringRef Delimiters=" \t\n\v\f\r")
getToken - This function extracts one token from source, ignoring any leading characters that appear ...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
Target & getTheAArch64beTarget()
static StringRef MCLOHDirectiveName()
std::string utostr(uint64_t X, bool isNeg=false)
static bool isValidMCLOHType(unsigned Kind)
Op::Description Desc
Target & getTheAArch64leTarget()
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
SmallVectorImpl< std::unique_ptr< MCParsedAsmOperand > > OperandVector
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
Target & getTheAArch64_32Target()
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
Target & getTheARM64_32Target()
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:71
static int MCLOHIdToNbArgs(MCLOHType Kind)
std::string join(IteratorT Begin, IteratorT End, StringRef Separator)
Joins the strings in the range [Begin, End), adding Separator between the elements.
static MCRegister getXRegFromWReg(MCRegister Reg)
MCLOHType
Linker Optimization Hint Type.
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
Target & getTheARM64Target()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static MCRegister getWRegFromXReg(MCRegister Reg)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1758
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1897
#define N
const FeatureBitset Features
const char * Name
AArch64::ExtensionBitset DefaultExts
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
bool haveFeatures(FeatureBitset ActiveFeatures) const
FeatureBitset getRequiredFeatures() const
const char * Name
FeatureBitset FeaturesRequired