LLVM 22.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCAsmInfo.h"
29#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
40#include "llvm/MC/MCStreamer.h"
42#include "llvm/MC/MCSymbol.h"
44#include "llvm/MC/MCValue.h"
50#include "llvm/Support/SMLoc.h"
54#include <cassert>
55#include <cctype>
56#include <cstdint>
57#include <cstdio>
58#include <optional>
59#include <string>
60#include <tuple>
61#include <utility>
62#include <vector>
63
64using namespace llvm;
65
66namespace {
67
68enum class RegKind {
69 Scalar,
70 NeonVector,
71 SVEDataVector,
72 SVEPredicateAsCounter,
73 SVEPredicateVector,
74 Matrix,
75 LookupTable
76};
77
78enum class MatrixKind { Array, Tile, Row, Col };
79
80enum RegConstraintEqualityTy {
81 EqualsReg,
82 EqualsSuperReg,
83 EqualsSubReg
84};
85
86class AArch64AsmParser : public MCTargetAsmParser {
87private:
88 StringRef Mnemonic; ///< Instruction mnemonic.
89
90 // Map of register aliases registers via the .req directive.
91 StringMap<std::pair<RegKind, MCRegister>> RegisterReqs;
92
93 class PrefixInfo {
94 public:
95 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
96 PrefixInfo Prefix;
97 switch (Inst.getOpcode()) {
98 case AArch64::MOVPRFX_ZZ:
99 Prefix.Active = true;
100 Prefix.Dst = Inst.getOperand(0).getReg();
101 break;
102 case AArch64::MOVPRFX_ZPmZ_B:
103 case AArch64::MOVPRFX_ZPmZ_H:
104 case AArch64::MOVPRFX_ZPmZ_S:
105 case AArch64::MOVPRFX_ZPmZ_D:
106 Prefix.Active = true;
107 Prefix.Predicated = true;
108 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
109 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
110 "No destructive element size set for movprfx");
111 Prefix.Dst = Inst.getOperand(0).getReg();
112 Prefix.Pg = Inst.getOperand(2).getReg();
113 break;
114 case AArch64::MOVPRFX_ZPzZ_B:
115 case AArch64::MOVPRFX_ZPzZ_H:
116 case AArch64::MOVPRFX_ZPzZ_S:
117 case AArch64::MOVPRFX_ZPzZ_D:
118 Prefix.Active = true;
119 Prefix.Predicated = true;
120 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
121 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
122 "No destructive element size set for movprfx");
123 Prefix.Dst = Inst.getOperand(0).getReg();
124 Prefix.Pg = Inst.getOperand(1).getReg();
125 break;
126 default:
127 break;
128 }
129
130 return Prefix;
131 }
132
133 PrefixInfo() = default;
134 bool isActive() const { return Active; }
135 bool isPredicated() const { return Predicated; }
136 unsigned getElementSize() const {
137 assert(Predicated);
138 return ElementSize;
139 }
140 MCRegister getDstReg() const { return Dst; }
141 MCRegister getPgReg() const {
142 assert(Predicated);
143 return Pg;
144 }
145
146 private:
147 bool Active = false;
148 bool Predicated = false;
149 unsigned ElementSize;
150 MCRegister Dst;
151 MCRegister Pg;
152 } NextPrefix;
153
154 AArch64TargetStreamer &getTargetStreamer() {
155 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
156 return static_cast<AArch64TargetStreamer &>(TS);
157 }
158
159 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
160
161 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 bool parseSyslAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
163 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
164 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
165 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
166 std::string &Suggestion);
167 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
168 MCRegister matchRegisterNameAlias(StringRef Name, RegKind Kind);
169 bool parseRegister(OperandVector &Operands);
170 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
171 bool parseNeonVectorList(OperandVector &Operands);
172 bool parseOptionalMulOperand(OperandVector &Operands);
173 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
174 bool parseKeywordOperand(OperandVector &Operands);
175 bool parseOperand(OperandVector &Operands, bool isCondCode,
176 bool invertCondCode);
177 bool parseImmExpr(int64_t &Out);
178 bool parseComma();
179 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
180 unsigned Last);
181
182 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
183 OperandVector &Operands);
184
185 bool parseDataExpr(const MCExpr *&Res) override;
186 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
187
188 bool parseDirectiveArch(SMLoc L);
189 bool parseDirectiveArchExtension(SMLoc L);
190 bool parseDirectiveCPU(SMLoc L);
191 bool parseDirectiveInst(SMLoc L);
192
193 bool parseDirectiveTLSDescCall(SMLoc L);
194
195 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
196 bool parseDirectiveLtorg(SMLoc L);
197
198 bool parseDirectiveReq(StringRef Name, SMLoc L);
199 bool parseDirectiveUnreq(SMLoc L);
200 bool parseDirectiveCFINegateRAState();
201 bool parseDirectiveCFINegateRAStateWithPC();
202 bool parseDirectiveCFIBKeyFrame();
203 bool parseDirectiveCFIMTETaggedFrame();
204
205 bool parseDirectiveVariantPCS(SMLoc L);
206
207 bool parseDirectiveSEHAllocStack(SMLoc L);
208 bool parseDirectiveSEHPrologEnd(SMLoc L);
209 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
210 bool parseDirectiveSEHSaveFPLR(SMLoc L);
211 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
212 bool parseDirectiveSEHSaveReg(SMLoc L);
213 bool parseDirectiveSEHSaveRegX(SMLoc L);
214 bool parseDirectiveSEHSaveRegP(SMLoc L);
215 bool parseDirectiveSEHSaveRegPX(SMLoc L);
216 bool parseDirectiveSEHSaveLRPair(SMLoc L);
217 bool parseDirectiveSEHSaveFReg(SMLoc L);
218 bool parseDirectiveSEHSaveFRegX(SMLoc L);
219 bool parseDirectiveSEHSaveFRegP(SMLoc L);
220 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
221 bool parseDirectiveSEHSetFP(SMLoc L);
222 bool parseDirectiveSEHAddFP(SMLoc L);
223 bool parseDirectiveSEHNop(SMLoc L);
224 bool parseDirectiveSEHSaveNext(SMLoc L);
225 bool parseDirectiveSEHEpilogStart(SMLoc L);
226 bool parseDirectiveSEHEpilogEnd(SMLoc L);
227 bool parseDirectiveSEHTrapFrame(SMLoc L);
228 bool parseDirectiveSEHMachineFrame(SMLoc L);
229 bool parseDirectiveSEHContext(SMLoc L);
230 bool parseDirectiveSEHECContext(SMLoc L);
231 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
232 bool parseDirectiveSEHPACSignLR(SMLoc L);
233 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
234 bool parseDirectiveSEHAllocZ(SMLoc L);
235 bool parseDirectiveSEHSaveZReg(SMLoc L);
236 bool parseDirectiveSEHSavePReg(SMLoc L);
237 bool parseDirectiveAeabiSubSectionHeader(SMLoc L);
238 bool parseDirectiveAeabiAArch64Attr(SMLoc L);
239
240 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
241 SmallVectorImpl<SMLoc> &Loc);
242 unsigned getNumRegsForRegKind(RegKind K);
243 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
244 OperandVector &Operands, MCStreamer &Out,
245 uint64_t &ErrorInfo,
246 bool MatchingInlineAsm) override;
247 /// @name Auto-generated Match Functions
248 /// {
249
250#define GET_ASSEMBLER_HEADER
251#include "AArch64GenAsmMatcher.inc"
252
253 /// }
254
255 ParseStatus tryParseScalarRegister(MCRegister &Reg);
256 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
257 RegKind MatchKind);
258 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
259 ParseStatus tryParseSVCR(OperandVector &Operands);
260 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
261 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
262 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
263 ParseStatus tryParseSysReg(OperandVector &Operands);
264 ParseStatus tryParseSysCROperand(OperandVector &Operands);
265 template <bool IsSVEPrefetch = false>
266 ParseStatus tryParsePrefetch(OperandVector &Operands);
267 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
268 ParseStatus tryParsePSBHint(OperandVector &Operands);
269 ParseStatus tryParseBTIHint(OperandVector &Operands);
270 ParseStatus tryParseCMHPriorityHint(OperandVector &Operands);
271 ParseStatus tryParseTIndexHint(OperandVector &Operands);
272 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
273 ParseStatus tryParseAdrLabel(OperandVector &Operands);
274 template <bool AddFPZeroAsLiteral>
275 ParseStatus tryParseFPImm(OperandVector &Operands);
276 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
277 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
278 bool tryParseNeonVectorRegister(OperandVector &Operands);
279 ParseStatus tryParseVectorIndex(OperandVector &Operands);
280 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
281 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
282 template <bool ParseShiftExtend,
283 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
284 ParseStatus tryParseGPROperand(OperandVector &Operands);
285 ParseStatus tryParseZTOperand(OperandVector &Operands);
286 template <bool ParseShiftExtend, bool ParseSuffix>
287 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
288 template <RegKind RK>
289 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
291 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
292 template <RegKind VectorKind>
293 ParseStatus tryParseVectorList(OperandVector &Operands,
294 bool ExpectMatch = false);
295 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
296 ParseStatus tryParseSVEPattern(OperandVector &Operands);
297 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
298 ParseStatus tryParseGPR64x8(OperandVector &Operands);
299 ParseStatus tryParseImmRange(OperandVector &Operands);
300 template <int> ParseStatus tryParseAdjImm0_63(OperandVector &Operands);
301 ParseStatus tryParsePHintInstOperand(OperandVector &Operands);
302
303public:
304 enum AArch64MatchResultTy {
305 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
306#define GET_OPERAND_DIAGNOSTIC_TYPES
307#include "AArch64GenAsmMatcher.inc"
308 };
309 bool IsILP32;
310 bool IsWindowsArm64EC;
311
312 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
313 const MCInstrInfo &MII, const MCTargetOptions &Options)
314 : MCTargetAsmParser(Options, STI, MII) {
315 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
316 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
318 MCStreamer &S = getParser().getStreamer();
319 if (S.getTargetStreamer() == nullptr)
320 new AArch64TargetStreamer(S);
321
322 // Alias .hword/.word/.[dx]word to the target-independent
323 // .2byte/.4byte/.8byte directives as they have the same form and
324 // semantics:
325 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
326 Parser.addAliasForDirective(".hword", ".2byte");
327 Parser.addAliasForDirective(".word", ".4byte");
328 Parser.addAliasForDirective(".dword", ".8byte");
329 Parser.addAliasForDirective(".xword", ".8byte");
330
331 // Initialize the set of available features.
332 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
333 }
334
335 bool areEqualRegs(const MCParsedAsmOperand &Op1,
336 const MCParsedAsmOperand &Op2) const override;
337 bool parseInstruction(ParseInstructionInfo &Info, StringRef Name,
338 SMLoc NameLoc, OperandVector &Operands) override;
339 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
340 ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
341 SMLoc &EndLoc) override;
342 bool ParseDirective(AsmToken DirectiveID) override;
343 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
344 unsigned Kind) override;
345
346 static bool classifySymbolRef(const MCExpr *Expr, AArch64::Specifier &ELFSpec,
347 AArch64::Specifier &DarwinSpec,
348 int64_t &Addend);
349};
350
351/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
352/// instruction.
353class AArch64Operand : public MCParsedAsmOperand {
354private:
355 enum KindTy {
356 k_Immediate,
357 k_ShiftedImm,
358 k_ImmRange,
359 k_CondCode,
360 k_Register,
361 k_MatrixRegister,
362 k_MatrixTileList,
363 k_SVCR,
364 k_VectorList,
365 k_VectorIndex,
366 k_Token,
367 k_SysReg,
368 k_SysCR,
369 k_Prefetch,
370 k_ShiftExtend,
371 k_FPImm,
372 k_Barrier,
373 k_PSBHint,
374 k_PHint,
375 k_BTIHint,
376 k_CMHPriorityHint,
377 k_TIndexHint,
378 } Kind;
379
380 SMLoc StartLoc, EndLoc;
381
382 struct TokOp {
383 const char *Data;
384 unsigned Length;
385 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
386 };
387
388 // Separate shift/extend operand.
389 struct ShiftExtendOp {
391 unsigned Amount;
392 bool HasExplicitAmount;
393 };
394
395 struct RegOp {
396 MCRegister Reg;
397 RegKind Kind;
398 int ElementWidth;
399
400 // The register may be allowed as a different register class,
401 // e.g. for GPR64as32 or GPR32as64.
402 RegConstraintEqualityTy EqualityTy;
403
404 // In some cases the shift/extend needs to be explicitly parsed together
405 // with the register, rather than as a separate operand. This is needed
406 // for addressing modes where the instruction as a whole dictates the
407 // scaling/extend, rather than specific bits in the instruction.
408 // By parsing them as a single operand, we avoid the need to pass an
409 // extra operand in all CodeGen patterns (because all operands need to
410 // have an associated value), and we avoid the need to update TableGen to
411 // accept operands that have no associated bits in the instruction.
412 //
413 // An added benefit of parsing them together is that the assembler
414 // can give a sensible diagnostic if the scaling is not correct.
415 //
416 // The default is 'lsl #0' (HasExplicitAmount = false) if no
417 // ShiftExtend is specified.
418 ShiftExtendOp ShiftExtend;
419 };
420
421 struct MatrixRegOp {
422 MCRegister Reg;
423 unsigned ElementWidth;
424 MatrixKind Kind;
425 };
426
427 struct MatrixTileListOp {
428 unsigned RegMask = 0;
429 };
430
431 struct VectorListOp {
432 MCRegister Reg;
433 unsigned Count;
434 unsigned Stride;
435 unsigned NumElements;
436 unsigned ElementWidth;
437 RegKind RegisterKind;
438 };
439
440 struct VectorIndexOp {
441 int Val;
442 };
443
444 struct ImmOp {
445 const MCExpr *Val;
446 };
447
448 struct ShiftedImmOp {
449 const MCExpr *Val;
450 unsigned ShiftAmount;
451 };
452
453 struct ImmRangeOp {
454 unsigned First;
455 unsigned Last;
456 };
457
458 struct CondCodeOp {
460 };
461
462 struct FPImmOp {
463 uint64_t Val; // APFloat value bitcasted to uint64_t.
464 bool IsExact; // describes whether parsed value was exact.
465 };
466
467 struct BarrierOp {
468 const char *Data;
469 unsigned Length;
470 unsigned Val; // Not the enum since not all values have names.
471 bool HasnXSModifier;
472 };
473
474 struct SysRegOp {
475 const char *Data;
476 unsigned Length;
477 uint32_t MRSReg;
478 uint32_t MSRReg;
479 uint32_t PStateField;
480 };
481
482 struct SysCRImmOp {
483 unsigned Val;
484 };
485
486 struct PrefetchOp {
487 const char *Data;
488 unsigned Length;
489 unsigned Val;
490 };
491
492 struct PSBHintOp {
493 const char *Data;
494 unsigned Length;
495 unsigned Val;
496 };
497 struct PHintOp {
498 const char *Data;
499 unsigned Length;
500 unsigned Val;
501 };
502 struct BTIHintOp {
503 const char *Data;
504 unsigned Length;
505 unsigned Val;
506 };
507 struct CMHPriorityHintOp {
508 const char *Data;
509 unsigned Length;
510 unsigned Val;
511 };
512 struct TIndexHintOp {
513 const char *Data;
514 unsigned Length;
515 unsigned Val;
516 };
517
518 struct SVCROp {
519 const char *Data;
520 unsigned Length;
521 unsigned PStateField;
522 };
523
524 union {
525 struct TokOp Tok;
526 struct RegOp Reg;
527 struct MatrixRegOp MatrixReg;
528 struct MatrixTileListOp MatrixTileList;
529 struct VectorListOp VectorList;
530 struct VectorIndexOp VectorIndex;
531 struct ImmOp Imm;
532 struct ShiftedImmOp ShiftedImm;
533 struct ImmRangeOp ImmRange;
534 struct CondCodeOp CondCode;
535 struct FPImmOp FPImm;
536 struct BarrierOp Barrier;
537 struct SysRegOp SysReg;
538 struct SysCRImmOp SysCRImm;
539 struct PrefetchOp Prefetch;
540 struct PSBHintOp PSBHint;
541 struct PHintOp PHint;
542 struct BTIHintOp BTIHint;
543 struct CMHPriorityHintOp CMHPriorityHint;
544 struct TIndexHintOp TIndexHint;
545 struct ShiftExtendOp ShiftExtend;
546 struct SVCROp SVCR;
547 };
548
549 // Keep the MCContext around as the MCExprs may need manipulated during
550 // the add<>Operands() calls.
551 MCContext &Ctx;
552
553public:
554 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
555
556 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
557 Kind = o.Kind;
558 StartLoc = o.StartLoc;
559 EndLoc = o.EndLoc;
560 switch (Kind) {
561 case k_Token:
562 Tok = o.Tok;
563 break;
564 case k_Immediate:
565 Imm = o.Imm;
566 break;
567 case k_ShiftedImm:
568 ShiftedImm = o.ShiftedImm;
569 break;
570 case k_ImmRange:
571 ImmRange = o.ImmRange;
572 break;
573 case k_CondCode:
574 CondCode = o.CondCode;
575 break;
576 case k_FPImm:
577 FPImm = o.FPImm;
578 break;
579 case k_Barrier:
580 Barrier = o.Barrier;
581 break;
582 case k_Register:
583 Reg = o.Reg;
584 break;
585 case k_MatrixRegister:
586 MatrixReg = o.MatrixReg;
587 break;
588 case k_MatrixTileList:
589 MatrixTileList = o.MatrixTileList;
590 break;
591 case k_VectorList:
592 VectorList = o.VectorList;
593 break;
594 case k_VectorIndex:
595 VectorIndex = o.VectorIndex;
596 break;
597 case k_SysReg:
598 SysReg = o.SysReg;
599 break;
600 case k_SysCR:
601 SysCRImm = o.SysCRImm;
602 break;
603 case k_Prefetch:
604 Prefetch = o.Prefetch;
605 break;
606 case k_PSBHint:
607 PSBHint = o.PSBHint;
608 break;
609 case k_PHint:
610 PHint = o.PHint;
611 break;
612 case k_BTIHint:
613 BTIHint = o.BTIHint;
614 break;
615 case k_CMHPriorityHint:
616 CMHPriorityHint = o.CMHPriorityHint;
617 break;
618 case k_TIndexHint:
619 TIndexHint = o.TIndexHint;
620 break;
621 case k_ShiftExtend:
622 ShiftExtend = o.ShiftExtend;
623 break;
624 case k_SVCR:
625 SVCR = o.SVCR;
626 break;
627 }
628 }
629
630 /// getStartLoc - Get the location of the first token of this operand.
631 SMLoc getStartLoc() const override { return StartLoc; }
632 /// getEndLoc - Get the location of the last token of this operand.
633 SMLoc getEndLoc() const override { return EndLoc; }
634
635 StringRef getToken() const {
636 assert(Kind == k_Token && "Invalid access!");
637 return StringRef(Tok.Data, Tok.Length);
638 }
639
640 bool isTokenSuffix() const {
641 assert(Kind == k_Token && "Invalid access!");
642 return Tok.IsSuffix;
643 }
644
645 const MCExpr *getImm() const {
646 assert(Kind == k_Immediate && "Invalid access!");
647 return Imm.Val;
648 }
649
650 const MCExpr *getShiftedImmVal() const {
651 assert(Kind == k_ShiftedImm && "Invalid access!");
652 return ShiftedImm.Val;
653 }
654
655 unsigned getShiftedImmShift() const {
656 assert(Kind == k_ShiftedImm && "Invalid access!");
657 return ShiftedImm.ShiftAmount;
658 }
659
660 unsigned getFirstImmVal() const {
661 assert(Kind == k_ImmRange && "Invalid access!");
662 return ImmRange.First;
663 }
664
665 unsigned getLastImmVal() const {
666 assert(Kind == k_ImmRange && "Invalid access!");
667 return ImmRange.Last;
668 }
669
671 assert(Kind == k_CondCode && "Invalid access!");
672 return CondCode.Code;
673 }
674
675 APFloat getFPImm() const {
676 assert (Kind == k_FPImm && "Invalid access!");
677 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
678 }
679
680 bool getFPImmIsExact() const {
681 assert (Kind == k_FPImm && "Invalid access!");
682 return FPImm.IsExact;
683 }
684
685 unsigned getBarrier() const {
686 assert(Kind == k_Barrier && "Invalid access!");
687 return Barrier.Val;
688 }
689
690 StringRef getBarrierName() const {
691 assert(Kind == k_Barrier && "Invalid access!");
692 return StringRef(Barrier.Data, Barrier.Length);
693 }
694
695 bool getBarriernXSModifier() const {
696 assert(Kind == k_Barrier && "Invalid access!");
697 return Barrier.HasnXSModifier;
698 }
699
700 MCRegister getReg() const override {
701 assert(Kind == k_Register && "Invalid access!");
702 return Reg.Reg;
703 }
704
705 MCRegister getMatrixReg() const {
706 assert(Kind == k_MatrixRegister && "Invalid access!");
707 return MatrixReg.Reg;
708 }
709
710 unsigned getMatrixElementWidth() const {
711 assert(Kind == k_MatrixRegister && "Invalid access!");
712 return MatrixReg.ElementWidth;
713 }
714
715 MatrixKind getMatrixKind() const {
716 assert(Kind == k_MatrixRegister && "Invalid access!");
717 return MatrixReg.Kind;
718 }
719
720 unsigned getMatrixTileListRegMask() const {
721 assert(isMatrixTileList() && "Invalid access!");
722 return MatrixTileList.RegMask;
723 }
724
725 RegConstraintEqualityTy getRegEqualityTy() const {
726 assert(Kind == k_Register && "Invalid access!");
727 return Reg.EqualityTy;
728 }
729
730 MCRegister getVectorListStart() const {
731 assert(Kind == k_VectorList && "Invalid access!");
732 return VectorList.Reg;
733 }
734
735 unsigned getVectorListCount() const {
736 assert(Kind == k_VectorList && "Invalid access!");
737 return VectorList.Count;
738 }
739
740 unsigned getVectorListStride() const {
741 assert(Kind == k_VectorList && "Invalid access!");
742 return VectorList.Stride;
743 }
744
745 int getVectorIndex() const {
746 assert(Kind == k_VectorIndex && "Invalid access!");
747 return VectorIndex.Val;
748 }
749
750 StringRef getSysReg() const {
751 assert(Kind == k_SysReg && "Invalid access!");
752 return StringRef(SysReg.Data, SysReg.Length);
753 }
754
755 unsigned getSysCR() const {
756 assert(Kind == k_SysCR && "Invalid access!");
757 return SysCRImm.Val;
758 }
759
760 unsigned getPrefetch() const {
761 assert(Kind == k_Prefetch && "Invalid access!");
762 return Prefetch.Val;
763 }
764
765 unsigned getPSBHint() const {
766 assert(Kind == k_PSBHint && "Invalid access!");
767 return PSBHint.Val;
768 }
769
770 unsigned getPHint() const {
771 assert(Kind == k_PHint && "Invalid access!");
772 return PHint.Val;
773 }
774
775 StringRef getPSBHintName() const {
776 assert(Kind == k_PSBHint && "Invalid access!");
777 return StringRef(PSBHint.Data, PSBHint.Length);
778 }
779
780 StringRef getPHintName() const {
781 assert(Kind == k_PHint && "Invalid access!");
782 return StringRef(PHint.Data, PHint.Length);
783 }
784
785 unsigned getBTIHint() const {
786 assert(Kind == k_BTIHint && "Invalid access!");
787 return BTIHint.Val;
788 }
789
790 StringRef getBTIHintName() const {
791 assert(Kind == k_BTIHint && "Invalid access!");
792 return StringRef(BTIHint.Data, BTIHint.Length);
793 }
794
795 unsigned getCMHPriorityHint() const {
796 assert(Kind == k_CMHPriorityHint && "Invalid access!");
797 return CMHPriorityHint.Val;
798 }
799
800 StringRef getCMHPriorityHintName() const {
801 assert(Kind == k_CMHPriorityHint && "Invalid access!");
802 return StringRef(CMHPriorityHint.Data, CMHPriorityHint.Length);
803 }
804
805 unsigned getTIndexHint() const {
806 assert(Kind == k_TIndexHint && "Invalid access!");
807 return TIndexHint.Val;
808 }
809
810 StringRef getTIndexHintName() const {
811 assert(Kind == k_TIndexHint && "Invalid access!");
812 return StringRef(TIndexHint.Data, TIndexHint.Length);
813 }
814
815 StringRef getSVCR() const {
816 assert(Kind == k_SVCR && "Invalid access!");
817 return StringRef(SVCR.Data, SVCR.Length);
818 }
819
820 StringRef getPrefetchName() const {
821 assert(Kind == k_Prefetch && "Invalid access!");
822 return StringRef(Prefetch.Data, Prefetch.Length);
823 }
824
825 AArch64_AM::ShiftExtendType getShiftExtendType() const {
826 if (Kind == k_ShiftExtend)
827 return ShiftExtend.Type;
828 if (Kind == k_Register)
829 return Reg.ShiftExtend.Type;
830 llvm_unreachable("Invalid access!");
831 }
832
833 unsigned getShiftExtendAmount() const {
834 if (Kind == k_ShiftExtend)
835 return ShiftExtend.Amount;
836 if (Kind == k_Register)
837 return Reg.ShiftExtend.Amount;
838 llvm_unreachable("Invalid access!");
839 }
840
841 bool hasShiftExtendAmount() const {
842 if (Kind == k_ShiftExtend)
843 return ShiftExtend.HasExplicitAmount;
844 if (Kind == k_Register)
845 return Reg.ShiftExtend.HasExplicitAmount;
846 llvm_unreachable("Invalid access!");
847 }
848
849 bool isImm() const override { return Kind == k_Immediate; }
850 bool isMem() const override { return false; }
851
852 bool isUImm6() const {
853 if (!isImm())
854 return false;
855 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
856 if (!MCE)
857 return false;
858 int64_t Val = MCE->getValue();
859 return (Val >= 0 && Val < 64);
860 }
861
862 template <int Width> bool isSImm() const {
863 return bool(isSImmScaled<Width, 1>());
864 }
865
866 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
867 return isImmScaled<Bits, Scale>(true);
868 }
869
870 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
871 DiagnosticPredicate isUImmScaled() const {
872 if (IsRange && isImmRange() &&
873 (getLastImmVal() != getFirstImmVal() + Offset))
875
876 return isImmScaled<Bits, Scale, IsRange>(false);
877 }
878
879 template <int Bits, int Scale, bool IsRange = false>
880 DiagnosticPredicate isImmScaled(bool Signed) const {
881 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
882 (isImmRange() && !IsRange))
884
885 int64_t Val;
886 if (isImmRange())
887 Val = getFirstImmVal();
888 else {
889 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
890 if (!MCE)
892 Val = MCE->getValue();
893 }
894
895 int64_t MinVal, MaxVal;
896 if (Signed) {
897 int64_t Shift = Bits - 1;
898 MinVal = (int64_t(1) << Shift) * -Scale;
899 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
900 } else {
901 MinVal = 0;
902 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
903 }
904
905 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
907
909 }
910
911 DiagnosticPredicate isSVEPattern() const {
912 if (!isImm())
914 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
915 if (!MCE)
917 int64_t Val = MCE->getValue();
918 if (Val >= 0 && Val < 32)
921 }
922
923 DiagnosticPredicate isSVEVecLenSpecifier() const {
924 if (!isImm())
926 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
927 if (!MCE)
929 int64_t Val = MCE->getValue();
930 if (Val >= 0 && Val <= 1)
933 }
934
935 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
936 AArch64::Specifier ELFSpec;
937 AArch64::Specifier DarwinSpec;
938 int64_t Addend;
939 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
940 Addend)) {
941 // If we don't understand the expression, assume the best and
942 // let the fixup and relocation code deal with it.
943 return true;
944 }
945
946 if (DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
954 ELFSpec)) {
955 // Note that we don't range-check the addend. It's adjusted modulo page
956 // size when converted, so there is no "out of range" condition when using
957 // @pageoff.
958 return true;
959 } else if (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF ||
960 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) {
961 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
962 return Addend == 0;
963 }
964
965 return false;
966 }
967
968 template <int Scale> bool isUImm12Offset() const {
969 if (!isImm())
970 return false;
971
972 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
973 if (!MCE)
974 return isSymbolicUImm12Offset(getImm());
975
976 int64_t Val = MCE->getValue();
977 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
978 }
979
980 template <int N, int M>
981 bool isImmInRange() const {
982 if (!isImm())
983 return false;
984 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
985 if (!MCE)
986 return false;
987 int64_t Val = MCE->getValue();
988 return (Val >= N && Val <= M);
989 }
990
991 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
992 // a logical immediate can always be represented when inverted.
993 template <typename T>
994 bool isLogicalImm() const {
995 if (!isImm())
996 return false;
997 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
998 if (!MCE)
999 return false;
1000
1001 int64_t Val = MCE->getValue();
1002 // Avoid left shift by 64 directly.
1003 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
1004 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
1005 if ((Val & Upper) && (Val & Upper) != Upper)
1006 return false;
1007
1008 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
1009 }
1010
1011 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
1012
1013 bool isImmRange() const { return Kind == k_ImmRange; }
1014
1015 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
1016 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
1017 /// immediate that can be shifted by 'Shift'.
1018 template <unsigned Width>
1019 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
1020 if (isShiftedImm() && Width == getShiftedImmShift())
1021 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
1022 return std::make_pair(CE->getValue(), Width);
1023
1024 if (isImm())
1025 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
1026 int64_t Val = CE->getValue();
1027 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
1028 return std::make_pair(Val >> Width, Width);
1029 else
1030 return std::make_pair(Val, 0u);
1031 }
1032
1033 return {};
1034 }
1035
1036 bool isAddSubImm() const {
1037 if (!isShiftedImm() && !isImm())
1038 return false;
1039
1040 const MCExpr *Expr;
1041
1042 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
1043 if (isShiftedImm()) {
1044 unsigned Shift = ShiftedImm.ShiftAmount;
1045 Expr = ShiftedImm.Val;
1046 if (Shift != 0 && Shift != 12)
1047 return false;
1048 } else {
1049 Expr = getImm();
1050 }
1051
1052 AArch64::Specifier ELFSpec;
1053 AArch64::Specifier DarwinSpec;
1054 int64_t Addend;
1055 if (AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
1056 Addend)) {
1057 return DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
1058 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF ||
1059 (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF && Addend == 0) ||
1067 ELFSpec);
1068 }
1069
1070 // If it's a constant, it should be a real immediate in range.
1071 if (auto ShiftedVal = getShiftedVal<12>())
1072 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1073
1074 // If it's an expression, we hope for the best and let the fixup/relocation
1075 // code deal with it.
1076 return true;
1077 }
1078
1079 bool isAddSubImmNeg() const {
1080 if (!isShiftedImm() && !isImm())
1081 return false;
1082
1083 // Otherwise it should be a real negative immediate in range.
1084 if (auto ShiftedVal = getShiftedVal<12>())
1085 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1086
1087 return false;
1088 }
1089
1090 // Signed value in the range -128 to +127. For element widths of
1091 // 16 bits or higher it may also be a signed multiple of 256 in the
1092 // range -32768 to +32512.
1093 // For element-width of 8 bits a range of -128 to 255 is accepted,
1094 // since a copy of a byte can be either signed/unsigned.
1095 template <typename T>
1096 DiagnosticPredicate isSVECpyImm() const {
1097 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1099
1100 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1101 std::is_same<int8_t, T>::value;
1102 if (auto ShiftedImm = getShiftedVal<8>())
1103 if (!(IsByte && ShiftedImm->second) &&
1104 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1105 << ShiftedImm->second))
1107
1109 }
1110
1111 // Unsigned value in the range 0 to 255. For element widths of
1112 // 16 bits or higher it may also be a signed multiple of 256 in the
1113 // range 0 to 65280.
1114 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1115 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1117
1118 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1119 std::is_same<int8_t, T>::value;
1120 if (auto ShiftedImm = getShiftedVal<8>())
1121 if (!(IsByte && ShiftedImm->second) &&
1122 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1123 << ShiftedImm->second))
1125
1127 }
1128
1129 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1130 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1133 }
1134
1135 bool isCondCode() const { return Kind == k_CondCode; }
1136
1137 bool isSIMDImmType10() const {
1138 if (!isImm())
1139 return false;
1140 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1141 if (!MCE)
1142 return false;
1144 }
1145
1146 template<int N>
1147 bool isBranchTarget() const {
1148 if (!isImm())
1149 return false;
1150 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1151 if (!MCE)
1152 return true;
1153 int64_t Val = MCE->getValue();
1154 if (Val & 0x3)
1155 return false;
1156 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1157 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1158 }
1159
1160 bool isMovWSymbol(ArrayRef<AArch64::Specifier> AllowedModifiers) const {
1161 if (!isImm())
1162 return false;
1163
1164 AArch64::Specifier ELFSpec;
1165 AArch64::Specifier DarwinSpec;
1166 int64_t Addend;
1167 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFSpec, DarwinSpec,
1168 Addend)) {
1169 return false;
1170 }
1171 if (DarwinSpec != AArch64::S_None)
1172 return false;
1173
1174 return llvm::is_contained(AllowedModifiers, ELFSpec);
1175 }
1176
1177 bool isMovWSymbolG3() const {
1178 return isMovWSymbol({AArch64::S_ABS_G3, AArch64::S_PREL_G3});
1179 }
1180
1181 bool isMovWSymbolG2() const {
1182 return isMovWSymbol({AArch64::S_ABS_G2, AArch64::S_ABS_G2_S,
1186 }
1187
1188 bool isMovWSymbolG1() const {
1189 return isMovWSymbol({AArch64::S_ABS_G1, AArch64::S_ABS_G1_S,
1194 }
1195
1196 bool isMovWSymbolG0() const {
1197 return isMovWSymbol({AArch64::S_ABS_G0, AArch64::S_ABS_G0_S,
1202 }
1203
1204 template<int RegWidth, int Shift>
1205 bool isMOVZMovAlias() const {
1206 if (!isImm()) return false;
1207
1208 const MCExpr *E = getImm();
1209 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1210 uint64_t Value = CE->getValue();
1211
1212 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1213 }
1214 // Only supports the case of Shift being 0 if an expression is used as an
1215 // operand
1216 return !Shift && E;
1217 }
1218
1219 template<int RegWidth, int Shift>
1220 bool isMOVNMovAlias() const {
1221 if (!isImm()) return false;
1222
1223 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1224 if (!CE) return false;
1225 uint64_t Value = CE->getValue();
1226
1227 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1228 }
1229
1230 bool isFPImm() const {
1231 return Kind == k_FPImm &&
1232 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1233 }
1234
1235 bool isBarrier() const {
1236 return Kind == k_Barrier && !getBarriernXSModifier();
1237 }
1238 bool isBarriernXS() const {
1239 return Kind == k_Barrier && getBarriernXSModifier();
1240 }
1241 bool isSysReg() const { return Kind == k_SysReg; }
1242
1243 bool isMRSSystemRegister() const {
1244 if (!isSysReg()) return false;
1245
1246 return SysReg.MRSReg != -1U;
1247 }
1248
1249 bool isMSRSystemRegister() const {
1250 if (!isSysReg()) return false;
1251 return SysReg.MSRReg != -1U;
1252 }
1253
1254 bool isSystemPStateFieldWithImm0_1() const {
1255 if (!isSysReg()) return false;
1256 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1257 }
1258
1259 bool isSystemPStateFieldWithImm0_15() const {
1260 if (!isSysReg())
1261 return false;
1262 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1263 }
1264
1265 bool isSVCR() const {
1266 if (Kind != k_SVCR)
1267 return false;
1268 return SVCR.PStateField != -1U;
1269 }
1270
1271 bool isReg() const override {
1272 return Kind == k_Register;
1273 }
1274
1275 bool isVectorList() const { return Kind == k_VectorList; }
1276
1277 bool isScalarReg() const {
1278 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1279 }
1280
1281 bool isNeonVectorReg() const {
1282 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1283 }
1284
1285 bool isNeonVectorRegLo() const {
1286 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1287 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1288 Reg.Reg) ||
1289 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1290 Reg.Reg));
1291 }
1292
1293 bool isNeonVectorReg0to7() const {
1294 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1295 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1296 Reg.Reg));
1297 }
1298
1299 bool isMatrix() const { return Kind == k_MatrixRegister; }
1300 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1301
1302 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1303 RegKind RK;
1304 switch (Class) {
1305 case AArch64::PPRRegClassID:
1306 case AArch64::PPR_3bRegClassID:
1307 case AArch64::PPR_p8to15RegClassID:
1308 case AArch64::PNRRegClassID:
1309 case AArch64::PNR_p8to15RegClassID:
1310 case AArch64::PPRorPNRRegClassID:
1311 RK = RegKind::SVEPredicateAsCounter;
1312 break;
1313 default:
1314 llvm_unreachable("Unsupported register class");
1315 }
1316
1317 return (Kind == k_Register && Reg.Kind == RK) &&
1318 AArch64MCRegisterClasses[Class].contains(getReg());
1319 }
1320
1321 template <unsigned Class> bool isSVEVectorReg() const {
1322 RegKind RK;
1323 switch (Class) {
1324 case AArch64::ZPRRegClassID:
1325 case AArch64::ZPR_3bRegClassID:
1326 case AArch64::ZPR_4bRegClassID:
1327 case AArch64::ZPRMul2_LoRegClassID:
1328 case AArch64::ZPRMul2_HiRegClassID:
1329 case AArch64::ZPR_KRegClassID:
1330 RK = RegKind::SVEDataVector;
1331 break;
1332 case AArch64::PPRRegClassID:
1333 case AArch64::PPR_3bRegClassID:
1334 case AArch64::PPR_p8to15RegClassID:
1335 case AArch64::PNRRegClassID:
1336 case AArch64::PNR_p8to15RegClassID:
1337 case AArch64::PPRorPNRRegClassID:
1338 RK = RegKind::SVEPredicateVector;
1339 break;
1340 default:
1341 llvm_unreachable("Unsupported register class");
1342 }
1343
1344 return (Kind == k_Register && Reg.Kind == RK) &&
1345 AArch64MCRegisterClasses[Class].contains(getReg());
1346 }
1347
1348 template <unsigned Class> bool isFPRasZPR() const {
1349 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1350 AArch64MCRegisterClasses[Class].contains(getReg());
1351 }
1352
1353 template <int ElementWidth, unsigned Class>
1354 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1355 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1357
1358 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1360
1362 }
1363
1364 template <int ElementWidth, unsigned Class>
1365 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1366 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1367 Reg.Kind != RegKind::SVEPredicateVector))
1369
1370 if ((isSVEPredicateAsCounterReg<Class>() ||
1371 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1372 Reg.ElementWidth == ElementWidth)
1374
1376 }
1377
1378 template <int ElementWidth, unsigned Class>
1379 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1380 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1382
1383 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1385
1387 }
1388
1389 template <int ElementWidth, unsigned Class>
1390 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1391 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1393
1394 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1396
1398 }
1399
1400 template <int ElementWidth, unsigned Class,
1401 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1402 bool ShiftWidthAlwaysSame>
1403 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1404 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1405 if (!VectorMatch.isMatch())
1407
1408 // Give a more specific diagnostic when the user has explicitly typed in
1409 // a shift-amount that does not match what is expected, but for which
1410 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1411 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1412 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1413 ShiftExtendTy == AArch64_AM::SXTW) &&
1414 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1416
1417 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1419
1421 }
1422
1423 bool isGPR32as64() const {
1424 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1425 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.Reg);
1426 }
1427
1428 bool isGPR64as32() const {
1429 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1430 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.Reg);
1431 }
1432
1433 bool isGPR64x8() const {
1434 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1435 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1436 Reg.Reg);
1437 }
1438
1439 bool isWSeqPair() const {
1440 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1441 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1442 Reg.Reg);
1443 }
1444
1445 bool isXSeqPair() const {
1446 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1447 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1448 Reg.Reg);
1449 }
1450
1451 bool isSyspXzrPair() const {
1452 return isGPR64<AArch64::GPR64RegClassID>() && Reg.Reg == AArch64::XZR;
1453 }
1454
1455 template<int64_t Angle, int64_t Remainder>
1456 DiagnosticPredicate isComplexRotation() const {
1457 if (!isImm())
1459
1460 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1461 if (!CE)
1463 uint64_t Value = CE->getValue();
1464
1465 if (Value % Angle == Remainder && Value <= 270)
1468 }
1469
1470 template <unsigned RegClassID> bool isGPR64() const {
1471 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1472 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1473 }
1474
1475 template <unsigned RegClassID, int ExtWidth>
1476 DiagnosticPredicate isGPR64WithShiftExtend() const {
1477 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1479
1480 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1481 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1484 }
1485
1486 /// Is this a vector list with the type implicit (presumably attached to the
1487 /// instruction itself)?
1488 template <RegKind VectorKind, unsigned NumRegs, bool IsConsecutive = false>
1489 bool isImplicitlyTypedVectorList() const {
1490 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1491 VectorList.NumElements == 0 &&
1492 VectorList.RegisterKind == VectorKind &&
1493 (!IsConsecutive || (VectorList.Stride == 1));
1494 }
1495
1496 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1497 unsigned ElementWidth, unsigned Stride = 1>
1498 bool isTypedVectorList() const {
1499 if (Kind != k_VectorList)
1500 return false;
1501 if (VectorList.Count != NumRegs)
1502 return false;
1503 if (VectorList.RegisterKind != VectorKind)
1504 return false;
1505 if (VectorList.ElementWidth != ElementWidth)
1506 return false;
1507 if (VectorList.Stride != Stride)
1508 return false;
1509 return VectorList.NumElements == NumElements;
1510 }
1511
1512 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1513 unsigned ElementWidth, unsigned RegClass>
1514 DiagnosticPredicate isTypedVectorListMultiple() const {
1515 bool Res =
1516 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1517 if (!Res)
1519 if (!AArch64MCRegisterClasses[RegClass].contains(VectorList.Reg))
1522 }
1523
1524 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1525 unsigned ElementWidth>
1526 DiagnosticPredicate isTypedVectorListStrided() const {
1527 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1528 ElementWidth, Stride>();
1529 if (!Res)
1531 if ((VectorList.Reg < (AArch64::Z0 + Stride)) ||
1532 ((VectorList.Reg >= AArch64::Z16) &&
1533 (VectorList.Reg < (AArch64::Z16 + Stride))))
1536 }
1537
1538 template <int Min, int Max>
1539 DiagnosticPredicate isVectorIndex() const {
1540 if (Kind != k_VectorIndex)
1542 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1545 }
1546
1547 bool isToken() const override { return Kind == k_Token; }
1548
1549 bool isTokenEqual(StringRef Str) const {
1550 return Kind == k_Token && getToken() == Str;
1551 }
1552 bool isSysCR() const { return Kind == k_SysCR; }
1553 bool isPrefetch() const { return Kind == k_Prefetch; }
1554 bool isPSBHint() const { return Kind == k_PSBHint; }
1555 bool isPHint() const { return Kind == k_PHint; }
1556 bool isBTIHint() const { return Kind == k_BTIHint; }
1557 bool isCMHPriorityHint() const { return Kind == k_CMHPriorityHint; }
1558 bool isTIndexHint() const { return Kind == k_TIndexHint; }
1559 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1560 bool isShifter() const {
1561 if (!isShiftExtend())
1562 return false;
1563
1564 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1565 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1566 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1567 ST == AArch64_AM::MSL);
1568 }
1569
1570 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1571 if (Kind != k_FPImm)
1573
1574 if (getFPImmIsExact()) {
1575 // Lookup the immediate from table of supported immediates.
1576 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1577 assert(Desc && "Unknown enum value");
1578
1579 // Calculate its FP value.
1580 APFloat RealVal(APFloat::IEEEdouble());
1581 auto StatusOrErr =
1582 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1583 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1584 llvm_unreachable("FP immediate is not exact");
1585
1586 if (getFPImm().bitwiseIsEqual(RealVal))
1588 }
1589
1591 }
1592
1593 template <unsigned ImmA, unsigned ImmB>
1594 DiagnosticPredicate isExactFPImm() const {
1595 DiagnosticPredicate Res = DiagnosticPredicate::NoMatch;
1596 if ((Res = isExactFPImm<ImmA>()))
1598 if ((Res = isExactFPImm<ImmB>()))
1600 return Res;
1601 }
1602
1603 bool isExtend() const {
1604 if (!isShiftExtend())
1605 return false;
1606
1607 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1608 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1609 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1610 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1611 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1612 ET == AArch64_AM::LSL) &&
1613 getShiftExtendAmount() <= 4;
1614 }
1615
1616 bool isExtend64() const {
1617 if (!isExtend())
1618 return false;
1619 // Make sure the extend expects a 32-bit source register.
1620 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1621 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1622 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1623 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1624 }
1625
1626 bool isExtendLSL64() const {
1627 if (!isExtend())
1628 return false;
1629 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1630 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1631 ET == AArch64_AM::LSL) &&
1632 getShiftExtendAmount() <= 4;
1633 }
1634
1635 bool isLSLImm3Shift() const {
1636 if (!isShiftExtend())
1637 return false;
1638 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1639 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1640 }
1641
1642 template<int Width> bool isMemXExtend() const {
1643 if (!isExtend())
1644 return false;
1645 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1646 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1647 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1648 getShiftExtendAmount() == 0);
1649 }
1650
1651 template<int Width> bool isMemWExtend() const {
1652 if (!isExtend())
1653 return false;
1654 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1655 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1656 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1657 getShiftExtendAmount() == 0);
1658 }
1659
1660 template <unsigned width>
1661 bool isArithmeticShifter() const {
1662 if (!isShifter())
1663 return false;
1664
1665 // An arithmetic shifter is LSL, LSR, or ASR.
1666 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1667 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1668 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1669 }
1670
1671 template <unsigned width>
1672 bool isLogicalShifter() const {
1673 if (!isShifter())
1674 return false;
1675
1676 // A logical shifter is LSL, LSR, ASR or ROR.
1677 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1678 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1679 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1680 getShiftExtendAmount() < width;
1681 }
1682
1683 bool isMovImm32Shifter() const {
1684 if (!isShifter())
1685 return false;
1686
1687 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1688 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1689 if (ST != AArch64_AM::LSL)
1690 return false;
1691 uint64_t Val = getShiftExtendAmount();
1692 return (Val == 0 || Val == 16);
1693 }
1694
1695 bool isMovImm64Shifter() const {
1696 if (!isShifter())
1697 return false;
1698
1699 // A MOVi shifter is LSL of 0 or 16.
1700 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1701 if (ST != AArch64_AM::LSL)
1702 return false;
1703 uint64_t Val = getShiftExtendAmount();
1704 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1705 }
1706
1707 bool isLogicalVecShifter() const {
1708 if (!isShifter())
1709 return false;
1710
1711 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1712 unsigned Shift = getShiftExtendAmount();
1713 return getShiftExtendType() == AArch64_AM::LSL &&
1714 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1715 }
1716
1717 bool isLogicalVecHalfWordShifter() const {
1718 if (!isLogicalVecShifter())
1719 return false;
1720
1721 // A logical vector shifter is a left shift by 0 or 8.
1722 unsigned Shift = getShiftExtendAmount();
1723 return getShiftExtendType() == AArch64_AM::LSL &&
1724 (Shift == 0 || Shift == 8);
1725 }
1726
1727 bool isMoveVecShifter() const {
1728 if (!isShiftExtend())
1729 return false;
1730
1731 // A logical vector shifter is a left shift by 8 or 16.
1732 unsigned Shift = getShiftExtendAmount();
1733 return getShiftExtendType() == AArch64_AM::MSL &&
1734 (Shift == 8 || Shift == 16);
1735 }
1736
1737 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1738 // to LDUR/STUR when the offset is not legal for the former but is for
1739 // the latter. As such, in addition to checking for being a legal unscaled
1740 // address, also check that it is not a legal scaled address. This avoids
1741 // ambiguity in the matcher.
1742 template<int Width>
1743 bool isSImm9OffsetFB() const {
1744 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1745 }
1746
1747 bool isAdrpLabel() const {
1748 // Validation was handled during parsing, so we just verify that
1749 // something didn't go haywire.
1750 if (!isImm())
1751 return false;
1752
1753 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1754 int64_t Val = CE->getValue();
1755 int64_t Min = - (4096 * (1LL << (21 - 1)));
1756 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1757 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1758 }
1759
1760 return true;
1761 }
1762
1763 bool isAdrLabel() const {
1764 // Validation was handled during parsing, so we just verify that
1765 // something didn't go haywire.
1766 if (!isImm())
1767 return false;
1768
1769 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1770 int64_t Val = CE->getValue();
1771 int64_t Min = - (1LL << (21 - 1));
1772 int64_t Max = ((1LL << (21 - 1)) - 1);
1773 return Val >= Min && Val <= Max;
1774 }
1775
1776 return true;
1777 }
1778
1779 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1780 DiagnosticPredicate isMatrixRegOperand() const {
1781 if (!isMatrix())
1783 if (getMatrixKind() != Kind ||
1784 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1785 EltSize != getMatrixElementWidth())
1788 }
1789
1790 bool isPAuthPCRelLabel16Operand() const {
1791 // PAuth PCRel16 operands are similar to regular branch targets, but only
1792 // negative values are allowed for concrete immediates as signing instr
1793 // should be in a lower address.
1794 if (!isImm())
1795 return false;
1796 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1797 if (!MCE)
1798 return true;
1799 int64_t Val = MCE->getValue();
1800 if (Val & 0b11)
1801 return false;
1802 return (Val <= 0) && (Val > -(1 << 18));
1803 }
1804
1805 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1806 // Add as immediates when possible. Null MCExpr = 0.
1807 if (!Expr)
1809 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1810 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1811 else
1813 }
1814
1815 void addRegOperands(MCInst &Inst, unsigned N) const {
1816 assert(N == 1 && "Invalid number of operands!");
1818 }
1819
1820 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1821 assert(N == 1 && "Invalid number of operands!");
1822 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1823 }
1824
1825 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1826 assert(N == 1 && "Invalid number of operands!");
1827 assert(
1828 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1829
1830 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1831 MCRegister Reg = RI->getRegClass(AArch64::GPR32RegClassID)
1833
1835 }
1836
1837 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1838 assert(N == 1 && "Invalid number of operands!");
1839 assert(
1840 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1841
1842 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1843 MCRegister Reg = RI->getRegClass(AArch64::GPR64RegClassID)
1845
1847 }
1848
1849 template <int Width>
1850 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1851 unsigned Base;
1852 switch (Width) {
1853 case 8: Base = AArch64::B0; break;
1854 case 16: Base = AArch64::H0; break;
1855 case 32: Base = AArch64::S0; break;
1856 case 64: Base = AArch64::D0; break;
1857 case 128: Base = AArch64::Q0; break;
1858 default:
1859 llvm_unreachable("Unsupported width");
1860 }
1861 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1862 }
1863
1864 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1865 assert(N == 1 && "Invalid number of operands!");
1866 MCRegister Reg = getReg();
1867 // Normalise to PPR
1868 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1869 Reg = Reg - AArch64::PN0 + AArch64::P0;
1871 }
1872
1873 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1874 assert(N == 1 && "Invalid number of operands!");
1875 Inst.addOperand(
1876 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1877 }
1878
1879 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1880 assert(N == 1 && "Invalid number of operands!");
1881 assert(
1882 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1883 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1884 }
1885
1886 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1887 assert(N == 1 && "Invalid number of operands!");
1888 assert(
1889 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1891 }
1892
1893 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1894 assert(N == 1 && "Invalid number of operands!");
1896 }
1897
1898 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1899 assert(N == 1 && "Invalid number of operands!");
1901 }
1902
1903 enum VecListIndexType {
1904 VecListIdx_DReg = 0,
1905 VecListIdx_QReg = 1,
1906 VecListIdx_ZReg = 2,
1907 VecListIdx_PReg = 3,
1908 };
1909
1910 template <VecListIndexType RegTy, unsigned NumRegs,
1911 bool IsConsecutive = false>
1912 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1913 assert(N == 1 && "Invalid number of operands!");
1914 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1915 "Expected consecutive registers");
1916 static const unsigned FirstRegs[][5] = {
1917 /* DReg */ { AArch64::Q0,
1918 AArch64::D0, AArch64::D0_D1,
1919 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1920 /* QReg */ { AArch64::Q0,
1921 AArch64::Q0, AArch64::Q0_Q1,
1922 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1923 /* ZReg */ { AArch64::Z0,
1924 AArch64::Z0, AArch64::Z0_Z1,
1925 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1926 /* PReg */ { AArch64::P0,
1927 AArch64::P0, AArch64::P0_P1 }
1928 };
1929
1930 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1931 " NumRegs must be <= 4 for ZRegs");
1932
1933 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1934 " NumRegs must be <= 2 for PRegs");
1935
1936 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1937 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1938 FirstRegs[(unsigned)RegTy][0]));
1939 }
1940
1941 template <unsigned NumRegs>
1942 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1943 assert(N == 1 && "Invalid number of operands!");
1944 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1945
1946 switch (NumRegs) {
1947 case 2:
1948 if (getVectorListStart() < AArch64::Z16) {
1949 assert((getVectorListStart() < AArch64::Z8) &&
1950 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1952 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1953 } else {
1954 assert((getVectorListStart() < AArch64::Z24) &&
1955 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1957 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1958 }
1959 break;
1960 case 4:
1961 if (getVectorListStart() < AArch64::Z16) {
1962 assert((getVectorListStart() < AArch64::Z4) &&
1963 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1965 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1966 } else {
1967 assert((getVectorListStart() < AArch64::Z20) &&
1968 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1970 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1971 }
1972 break;
1973 default:
1974 llvm_unreachable("Unsupported number of registers for strided vec list");
1975 }
1976 }
1977
1978 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1979 assert(N == 1 && "Invalid number of operands!");
1980 unsigned RegMask = getMatrixTileListRegMask();
1981 assert(RegMask <= 0xFF && "Invalid mask!");
1982 Inst.addOperand(MCOperand::createImm(RegMask));
1983 }
1984
1985 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1986 assert(N == 1 && "Invalid number of operands!");
1987 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1988 }
1989
1990 template <unsigned ImmIs0, unsigned ImmIs1>
1991 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1992 assert(N == 1 && "Invalid number of operands!");
1993 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1994 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1995 }
1996
1997 void addImmOperands(MCInst &Inst, unsigned N) const {
1998 assert(N == 1 && "Invalid number of operands!");
1999 // If this is a pageoff symrefexpr with an addend, adjust the addend
2000 // to be only the page-offset portion. Otherwise, just add the expr
2001 // as-is.
2002 addExpr(Inst, getImm());
2003 }
2004
2005 template <int Shift>
2006 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2007 assert(N == 2 && "Invalid number of operands!");
2008 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2009 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
2010 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
2011 } else if (isShiftedImm()) {
2012 addExpr(Inst, getShiftedImmVal());
2013 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
2014 } else {
2015 addExpr(Inst, getImm());
2017 }
2018 }
2019
2020 template <int Shift>
2021 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2022 assert(N == 2 && "Invalid number of operands!");
2023 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2024 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
2025 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
2026 } else
2027 llvm_unreachable("Not a shifted negative immediate");
2028 }
2029
2030 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2031 assert(N == 1 && "Invalid number of operands!");
2033 }
2034
2035 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
2036 assert(N == 1 && "Invalid number of operands!");
2037 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2038 if (!MCE)
2039 addExpr(Inst, getImm());
2040 else
2041 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
2042 }
2043
2044 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2045 addImmOperands(Inst, N);
2046 }
2047
2048 template<int Scale>
2049 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2050 assert(N == 1 && "Invalid number of operands!");
2051 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2052
2053 if (!MCE) {
2055 return;
2056 }
2057 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2058 }
2059
2060 void addUImm6Operands(MCInst &Inst, unsigned N) const {
2061 assert(N == 1 && "Invalid number of operands!");
2062 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2064 }
2065
2066 template <int Scale>
2067 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
2068 assert(N == 1 && "Invalid number of operands!");
2069 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2070 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2071 }
2072
2073 template <int Scale>
2074 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2075 assert(N == 1 && "Invalid number of operands!");
2076 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
2077 }
2078
2079 template <typename T>
2080 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2081 assert(N == 1 && "Invalid number of operands!");
2082 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2083 std::make_unsigned_t<T> Val = MCE->getValue();
2084 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2085 Inst.addOperand(MCOperand::createImm(encoding));
2086 }
2087
2088 template <typename T>
2089 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2090 assert(N == 1 && "Invalid number of operands!");
2091 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2092 std::make_unsigned_t<T> Val = ~MCE->getValue();
2093 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2094 Inst.addOperand(MCOperand::createImm(encoding));
2095 }
2096
2097 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2098 assert(N == 1 && "Invalid number of operands!");
2099 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2100 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
2101 Inst.addOperand(MCOperand::createImm(encoding));
2102 }
2103
2104 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2105 // Branch operands don't encode the low bits, so shift them off
2106 // here. If it's a label, however, just put it on directly as there's
2107 // not enough information now to do anything.
2108 assert(N == 1 && "Invalid number of operands!");
2109 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2110 if (!MCE) {
2111 addExpr(Inst, getImm());
2112 return;
2113 }
2114 assert(MCE && "Invalid constant immediate operand!");
2115 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2116 }
2117
2118 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2119 // PC-relative operands don't encode the low bits, so shift them off
2120 // here. If it's a label, however, just put it on directly as there's
2121 // not enough information now to do anything.
2122 assert(N == 1 && "Invalid number of operands!");
2123 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2124 if (!MCE) {
2125 addExpr(Inst, getImm());
2126 return;
2127 }
2128 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2129 }
2130
2131 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2132 // Branch operands don't encode the low bits, so shift them off
2133 // here. If it's a label, however, just put it on directly as there's
2134 // not enough information now to do anything.
2135 assert(N == 1 && "Invalid number of operands!");
2136 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2137 if (!MCE) {
2138 addExpr(Inst, getImm());
2139 return;
2140 }
2141 assert(MCE && "Invalid constant immediate operand!");
2142 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2143 }
2144
2145 void addPCRelLabel9Operands(MCInst &Inst, unsigned N) const {
2146 // Branch operands don't encode the low bits, so shift them off
2147 // here. If it's a label, however, just put it on directly as there's
2148 // not enough information now to do anything.
2149 assert(N == 1 && "Invalid number of operands!");
2150 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2151 if (!MCE) {
2152 addExpr(Inst, getImm());
2153 return;
2154 }
2155 assert(MCE && "Invalid constant immediate operand!");
2156 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2157 }
2158
2159 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2160 // Branch operands don't encode the low bits, so shift them off
2161 // here. If it's a label, however, just put it on directly as there's
2162 // not enough information now to do anything.
2163 assert(N == 1 && "Invalid number of operands!");
2164 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2165 if (!MCE) {
2166 addExpr(Inst, getImm());
2167 return;
2168 }
2169 assert(MCE && "Invalid constant immediate operand!");
2170 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2171 }
2172
2173 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2174 assert(N == 1 && "Invalid number of operands!");
2176 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2177 }
2178
2179 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2180 assert(N == 1 && "Invalid number of operands!");
2181 Inst.addOperand(MCOperand::createImm(getBarrier()));
2182 }
2183
2184 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2185 assert(N == 1 && "Invalid number of operands!");
2186 Inst.addOperand(MCOperand::createImm(getBarrier()));
2187 }
2188
2189 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2190 assert(N == 1 && "Invalid number of operands!");
2191
2192 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2193 }
2194
2195 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2196 assert(N == 1 && "Invalid number of operands!");
2197
2198 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2199 }
2200
2201 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2202 assert(N == 1 && "Invalid number of operands!");
2203
2204 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2205 }
2206
2207 void addSVCROperands(MCInst &Inst, unsigned N) const {
2208 assert(N == 1 && "Invalid number of operands!");
2209
2210 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2211 }
2212
2213 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2214 assert(N == 1 && "Invalid number of operands!");
2215
2216 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2217 }
2218
2219 void addSysCROperands(MCInst &Inst, unsigned N) const {
2220 assert(N == 1 && "Invalid number of operands!");
2221 Inst.addOperand(MCOperand::createImm(getSysCR()));
2222 }
2223
2224 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2225 assert(N == 1 && "Invalid number of operands!");
2226 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2227 }
2228
2229 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2230 assert(N == 1 && "Invalid number of operands!");
2231 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2232 }
2233
2234 void addPHintOperands(MCInst &Inst, unsigned N) const {
2235 assert(N == 1 && "Invalid number of operands!");
2236 Inst.addOperand(MCOperand::createImm(getPHint()));
2237 }
2238
2239 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2240 assert(N == 1 && "Invalid number of operands!");
2241 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2242 }
2243
2244 void addCMHPriorityHintOperands(MCInst &Inst, unsigned N) const {
2245 assert(N == 1 && "Invalid number of operands!");
2246 Inst.addOperand(MCOperand::createImm(getCMHPriorityHint()));
2247 }
2248
2249 void addTIndexHintOperands(MCInst &Inst, unsigned N) const {
2250 assert(N == 1 && "Invalid number of operands!");
2251 Inst.addOperand(MCOperand::createImm(getTIndexHint()));
2252 }
2253
2254 void addShifterOperands(MCInst &Inst, unsigned N) const {
2255 assert(N == 1 && "Invalid number of operands!");
2256 unsigned Imm =
2257 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2259 }
2260
2261 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2262 assert(N == 1 && "Invalid number of operands!");
2263 unsigned Imm = getShiftExtendAmount();
2265 }
2266
2267 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2268 assert(N == 1 && "Invalid number of operands!");
2269
2270 if (!isScalarReg())
2271 return;
2272
2273 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2274 MCRegister Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2276 if (Reg != AArch64::XZR)
2277 llvm_unreachable("wrong register");
2278
2279 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2280 }
2281
2282 void addExtendOperands(MCInst &Inst, unsigned N) const {
2283 assert(N == 1 && "Invalid number of operands!");
2284 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2285 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2286 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2288 }
2289
2290 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2291 assert(N == 1 && "Invalid number of operands!");
2292 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2293 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2294 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2296 }
2297
2298 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2299 assert(N == 2 && "Invalid number of operands!");
2300 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2301 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2302 Inst.addOperand(MCOperand::createImm(IsSigned));
2303 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2304 }
2305
2306 // For 8-bit load/store instructions with a register offset, both the
2307 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2308 // they're disambiguated by whether the shift was explicit or implicit rather
2309 // than its size.
2310 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2311 assert(N == 2 && "Invalid number of operands!");
2312 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2313 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2314 Inst.addOperand(MCOperand::createImm(IsSigned));
2315 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2316 }
2317
2318 template<int Shift>
2319 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2320 assert(N == 1 && "Invalid number of operands!");
2321
2322 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2323 if (CE) {
2324 uint64_t Value = CE->getValue();
2325 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2326 } else {
2327 addExpr(Inst, getImm());
2328 }
2329 }
2330
2331 template<int Shift>
2332 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2333 assert(N == 1 && "Invalid number of operands!");
2334
2335 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2336 uint64_t Value = CE->getValue();
2337 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2338 }
2339
2340 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2341 assert(N == 1 && "Invalid number of operands!");
2342 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2343 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2344 }
2345
2346 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2347 assert(N == 1 && "Invalid number of operands!");
2348 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2349 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2350 }
2351
2352 void print(raw_ostream &OS, const MCAsmInfo &MAI) const override;
2353
2354 static std::unique_ptr<AArch64Operand>
2355 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2356 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2357 Op->Tok.Data = Str.data();
2358 Op->Tok.Length = Str.size();
2359 Op->Tok.IsSuffix = IsSuffix;
2360 Op->StartLoc = S;
2361 Op->EndLoc = S;
2362 return Op;
2363 }
2364
2365 static std::unique_ptr<AArch64Operand>
2366 CreateReg(MCRegister Reg, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2367 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2369 unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
2370 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2371 Op->Reg.Reg = Reg;
2372 Op->Reg.Kind = Kind;
2373 Op->Reg.ElementWidth = 0;
2374 Op->Reg.EqualityTy = EqTy;
2375 Op->Reg.ShiftExtend.Type = ExtTy;
2376 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2377 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2378 Op->StartLoc = S;
2379 Op->EndLoc = E;
2380 return Op;
2381 }
2382
2383 static std::unique_ptr<AArch64Operand> CreateVectorReg(
2384 MCRegister Reg, RegKind Kind, unsigned ElementWidth, SMLoc S, SMLoc E,
2385 MCContext &Ctx, AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2386 unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
2387 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2388 Kind == RegKind::SVEPredicateVector ||
2389 Kind == RegKind::SVEPredicateAsCounter) &&
2390 "Invalid vector kind");
2391 auto Op = CreateReg(Reg, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2392 HasExplicitAmount);
2393 Op->Reg.ElementWidth = ElementWidth;
2394 return Op;
2395 }
2396
2397 static std::unique_ptr<AArch64Operand>
2398 CreateVectorList(MCRegister Reg, unsigned Count, unsigned Stride,
2399 unsigned NumElements, unsigned ElementWidth,
2400 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2401 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2402 Op->VectorList.Reg = Reg;
2403 Op->VectorList.Count = Count;
2404 Op->VectorList.Stride = Stride;
2405 Op->VectorList.NumElements = NumElements;
2406 Op->VectorList.ElementWidth = ElementWidth;
2407 Op->VectorList.RegisterKind = RegisterKind;
2408 Op->StartLoc = S;
2409 Op->EndLoc = E;
2410 return Op;
2411 }
2412
2413 static std::unique_ptr<AArch64Operand>
2414 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2415 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2416 Op->VectorIndex.Val = Idx;
2417 Op->StartLoc = S;
2418 Op->EndLoc = E;
2419 return Op;
2420 }
2421
2422 static std::unique_ptr<AArch64Operand>
2423 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2424 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2425 Op->MatrixTileList.RegMask = RegMask;
2426 Op->StartLoc = S;
2427 Op->EndLoc = E;
2428 return Op;
2429 }
2430
2431 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2432 const unsigned ElementWidth) {
2433 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2434 RegMap = {
2435 {{0, AArch64::ZAB0},
2436 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2437 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2438 {{8, AArch64::ZAB0},
2439 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2440 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2441 {{16, AArch64::ZAH0},
2442 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2443 {{16, AArch64::ZAH1},
2444 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2445 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2446 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2447 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2448 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2449 };
2450
2451 if (ElementWidth == 64)
2452 OutRegs.insert(Reg);
2453 else {
2454 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2455 assert(!Regs.empty() && "Invalid tile or element width!");
2456 OutRegs.insert_range(Regs);
2457 }
2458 }
2459
2460 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2461 SMLoc E, MCContext &Ctx) {
2462 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2463 Op->Imm.Val = Val;
2464 Op->StartLoc = S;
2465 Op->EndLoc = E;
2466 return Op;
2467 }
2468
2469 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2470 unsigned ShiftAmount,
2471 SMLoc S, SMLoc E,
2472 MCContext &Ctx) {
2473 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2474 Op->ShiftedImm .Val = Val;
2475 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2476 Op->StartLoc = S;
2477 Op->EndLoc = E;
2478 return Op;
2479 }
2480
2481 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2482 unsigned Last, SMLoc S,
2483 SMLoc E,
2484 MCContext &Ctx) {
2485 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2486 Op->ImmRange.First = First;
2487 Op->ImmRange.Last = Last;
2488 Op->EndLoc = E;
2489 return Op;
2490 }
2491
2492 static std::unique_ptr<AArch64Operand>
2493 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2494 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2495 Op->CondCode.Code = Code;
2496 Op->StartLoc = S;
2497 Op->EndLoc = E;
2498 return Op;
2499 }
2500
2501 static std::unique_ptr<AArch64Operand>
2502 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2503 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2504 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2505 Op->FPImm.IsExact = IsExact;
2506 Op->StartLoc = S;
2507 Op->EndLoc = S;
2508 return Op;
2509 }
2510
2511 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2512 StringRef Str,
2513 SMLoc S,
2514 MCContext &Ctx,
2515 bool HasnXSModifier) {
2516 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2517 Op->Barrier.Val = Val;
2518 Op->Barrier.Data = Str.data();
2519 Op->Barrier.Length = Str.size();
2520 Op->Barrier.HasnXSModifier = HasnXSModifier;
2521 Op->StartLoc = S;
2522 Op->EndLoc = S;
2523 return Op;
2524 }
2525
2526 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2527 uint32_t MRSReg,
2528 uint32_t MSRReg,
2529 uint32_t PStateField,
2530 MCContext &Ctx) {
2531 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2532 Op->SysReg.Data = Str.data();
2533 Op->SysReg.Length = Str.size();
2534 Op->SysReg.MRSReg = MRSReg;
2535 Op->SysReg.MSRReg = MSRReg;
2536 Op->SysReg.PStateField = PStateField;
2537 Op->StartLoc = S;
2538 Op->EndLoc = S;
2539 return Op;
2540 }
2541
2542 static std::unique_ptr<AArch64Operand>
2543 CreatePHintInst(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2544 auto Op = std::make_unique<AArch64Operand>(k_PHint, Ctx);
2545 Op->PHint.Val = Val;
2546 Op->PHint.Data = Str.data();
2547 Op->PHint.Length = Str.size();
2548 Op->StartLoc = S;
2549 Op->EndLoc = S;
2550 return Op;
2551 }
2552
2553 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2554 SMLoc E, MCContext &Ctx) {
2555 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2556 Op->SysCRImm.Val = Val;
2557 Op->StartLoc = S;
2558 Op->EndLoc = E;
2559 return Op;
2560 }
2561
2562 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2563 StringRef Str,
2564 SMLoc S,
2565 MCContext &Ctx) {
2566 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2567 Op->Prefetch.Val = Val;
2568 Op->Barrier.Data = Str.data();
2569 Op->Barrier.Length = Str.size();
2570 Op->StartLoc = S;
2571 Op->EndLoc = S;
2572 return Op;
2573 }
2574
2575 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2576 StringRef Str,
2577 SMLoc S,
2578 MCContext &Ctx) {
2579 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2580 Op->PSBHint.Val = Val;
2581 Op->PSBHint.Data = Str.data();
2582 Op->PSBHint.Length = Str.size();
2583 Op->StartLoc = S;
2584 Op->EndLoc = S;
2585 return Op;
2586 }
2587
2588 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2589 StringRef Str,
2590 SMLoc S,
2591 MCContext &Ctx) {
2592 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2593 Op->BTIHint.Val = Val | 32;
2594 Op->BTIHint.Data = Str.data();
2595 Op->BTIHint.Length = Str.size();
2596 Op->StartLoc = S;
2597 Op->EndLoc = S;
2598 return Op;
2599 }
2600
2601 static std::unique_ptr<AArch64Operand>
2602 CreateCMHPriorityHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2603 auto Op = std::make_unique<AArch64Operand>(k_CMHPriorityHint, Ctx);
2604 Op->CMHPriorityHint.Val = Val;
2605 Op->CMHPriorityHint.Data = Str.data();
2606 Op->CMHPriorityHint.Length = Str.size();
2607 Op->StartLoc = S;
2608 Op->EndLoc = S;
2609 return Op;
2610 }
2611
2612 static std::unique_ptr<AArch64Operand>
2613 CreateTIndexHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2614 auto Op = std::make_unique<AArch64Operand>(k_TIndexHint, Ctx);
2615 Op->TIndexHint.Val = Val;
2616 Op->TIndexHint.Data = Str.data();
2617 Op->TIndexHint.Length = Str.size();
2618 Op->StartLoc = S;
2619 Op->EndLoc = S;
2620 return Op;
2621 }
2622
2623 static std::unique_ptr<AArch64Operand>
2624 CreateMatrixRegister(MCRegister Reg, unsigned ElementWidth, MatrixKind Kind,
2625 SMLoc S, SMLoc E, MCContext &Ctx) {
2626 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2627 Op->MatrixReg.Reg = Reg;
2628 Op->MatrixReg.ElementWidth = ElementWidth;
2629 Op->MatrixReg.Kind = Kind;
2630 Op->StartLoc = S;
2631 Op->EndLoc = E;
2632 return Op;
2633 }
2634
2635 static std::unique_ptr<AArch64Operand>
2636 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2637 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2638 Op->SVCR.PStateField = PStateField;
2639 Op->SVCR.Data = Str.data();
2640 Op->SVCR.Length = Str.size();
2641 Op->StartLoc = S;
2642 Op->EndLoc = S;
2643 return Op;
2644 }
2645
2646 static std::unique_ptr<AArch64Operand>
2647 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2648 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2649 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2650 Op->ShiftExtend.Type = ShOp;
2651 Op->ShiftExtend.Amount = Val;
2652 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2653 Op->StartLoc = S;
2654 Op->EndLoc = E;
2655 return Op;
2656 }
2657};
2658
2659} // end anonymous namespace.
2660
2661void AArch64Operand::print(raw_ostream &OS, const MCAsmInfo &MAI) const {
2662 switch (Kind) {
2663 case k_FPImm:
2664 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2665 if (!getFPImmIsExact())
2666 OS << " (inexact)";
2667 OS << ">";
2668 break;
2669 case k_Barrier: {
2670 StringRef Name = getBarrierName();
2671 if (!Name.empty())
2672 OS << "<barrier " << Name << ">";
2673 else
2674 OS << "<barrier invalid #" << getBarrier() << ">";
2675 break;
2676 }
2677 case k_Immediate:
2678 MAI.printExpr(OS, *getImm());
2679 break;
2680 case k_ShiftedImm: {
2681 unsigned Shift = getShiftedImmShift();
2682 OS << "<shiftedimm ";
2683 MAI.printExpr(OS, *getShiftedImmVal());
2684 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2685 break;
2686 }
2687 case k_ImmRange: {
2688 OS << "<immrange ";
2689 OS << getFirstImmVal();
2690 OS << ":" << getLastImmVal() << ">";
2691 break;
2692 }
2693 case k_CondCode:
2694 OS << "<condcode " << getCondCode() << ">";
2695 break;
2696 case k_VectorList: {
2697 OS << "<vectorlist ";
2698 MCRegister Reg = getVectorListStart();
2699 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2700 OS << Reg.id() + i * getVectorListStride() << " ";
2701 OS << ">";
2702 break;
2703 }
2704 case k_VectorIndex:
2705 OS << "<vectorindex " << getVectorIndex() << ">";
2706 break;
2707 case k_SysReg:
2708 OS << "<sysreg: " << getSysReg() << '>';
2709 break;
2710 case k_Token:
2711 OS << "'" << getToken() << "'";
2712 break;
2713 case k_SysCR:
2714 OS << "c" << getSysCR();
2715 break;
2716 case k_Prefetch: {
2717 StringRef Name = getPrefetchName();
2718 if (!Name.empty())
2719 OS << "<prfop " << Name << ">";
2720 else
2721 OS << "<prfop invalid #" << getPrefetch() << ">";
2722 break;
2723 }
2724 case k_PSBHint:
2725 OS << getPSBHintName();
2726 break;
2727 case k_PHint:
2728 OS << getPHintName();
2729 break;
2730 case k_BTIHint:
2731 OS << getBTIHintName();
2732 break;
2733 case k_CMHPriorityHint:
2734 OS << getCMHPriorityHintName();
2735 break;
2736 case k_TIndexHint:
2737 OS << getTIndexHintName();
2738 break;
2739 case k_MatrixRegister:
2740 OS << "<matrix " << getMatrixReg().id() << ">";
2741 break;
2742 case k_MatrixTileList: {
2743 OS << "<matrixlist ";
2744 unsigned RegMask = getMatrixTileListRegMask();
2745 unsigned MaxBits = 8;
2746 for (unsigned I = MaxBits; I > 0; --I)
2747 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2748 OS << '>';
2749 break;
2750 }
2751 case k_SVCR: {
2752 OS << getSVCR();
2753 break;
2754 }
2755 case k_Register:
2756 OS << "<register " << getReg().id() << ">";
2757 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2758 break;
2759 [[fallthrough]];
2760 case k_ShiftExtend:
2761 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2762 << getShiftExtendAmount();
2763 if (!hasShiftExtendAmount())
2764 OS << "<imp>";
2765 OS << '>';
2766 break;
2767 }
2768}
2769
2770/// @name Auto-generated Match Functions
2771/// {
2772
2774
2775/// }
2776
2777static unsigned MatchNeonVectorRegName(StringRef Name) {
2778 return StringSwitch<unsigned>(Name.lower())
2779 .Case("v0", AArch64::Q0)
2780 .Case("v1", AArch64::Q1)
2781 .Case("v2", AArch64::Q2)
2782 .Case("v3", AArch64::Q3)
2783 .Case("v4", AArch64::Q4)
2784 .Case("v5", AArch64::Q5)
2785 .Case("v6", AArch64::Q6)
2786 .Case("v7", AArch64::Q7)
2787 .Case("v8", AArch64::Q8)
2788 .Case("v9", AArch64::Q9)
2789 .Case("v10", AArch64::Q10)
2790 .Case("v11", AArch64::Q11)
2791 .Case("v12", AArch64::Q12)
2792 .Case("v13", AArch64::Q13)
2793 .Case("v14", AArch64::Q14)
2794 .Case("v15", AArch64::Q15)
2795 .Case("v16", AArch64::Q16)
2796 .Case("v17", AArch64::Q17)
2797 .Case("v18", AArch64::Q18)
2798 .Case("v19", AArch64::Q19)
2799 .Case("v20", AArch64::Q20)
2800 .Case("v21", AArch64::Q21)
2801 .Case("v22", AArch64::Q22)
2802 .Case("v23", AArch64::Q23)
2803 .Case("v24", AArch64::Q24)
2804 .Case("v25", AArch64::Q25)
2805 .Case("v26", AArch64::Q26)
2806 .Case("v27", AArch64::Q27)
2807 .Case("v28", AArch64::Q28)
2808 .Case("v29", AArch64::Q29)
2809 .Case("v30", AArch64::Q30)
2810 .Case("v31", AArch64::Q31)
2811 .Default(0);
2812}
2813
2814/// Returns an optional pair of (#elements, element-width) if Suffix
2815/// is a valid vector kind. Where the number of elements in a vector
2816/// or the vector width is implicit or explicitly unknown (but still a
2817/// valid suffix kind), 0 is used.
2818static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2819 RegKind VectorKind) {
2820 std::pair<int, int> Res = {-1, -1};
2821
2822 switch (VectorKind) {
2823 case RegKind::NeonVector:
2825 .Case("", {0, 0})
2826 .Case(".1d", {1, 64})
2827 .Case(".1q", {1, 128})
2828 // '.2h' needed for fp16 scalar pairwise reductions
2829 .Case(".2h", {2, 16})
2830 .Case(".2b", {2, 8})
2831 .Case(".2s", {2, 32})
2832 .Case(".2d", {2, 64})
2833 // '.4b' is another special case for the ARMv8.2a dot product
2834 // operand
2835 .Case(".4b", {4, 8})
2836 .Case(".4h", {4, 16})
2837 .Case(".4s", {4, 32})
2838 .Case(".8b", {8, 8})
2839 .Case(".8h", {8, 16})
2840 .Case(".16b", {16, 8})
2841 // Accept the width neutral ones, too, for verbose syntax. If
2842 // those aren't used in the right places, the token operand won't
2843 // match so all will work out.
2844 .Case(".b", {0, 8})
2845 .Case(".h", {0, 16})
2846 .Case(".s", {0, 32})
2847 .Case(".d", {0, 64})
2848 .Default({-1, -1});
2849 break;
2850 case RegKind::SVEPredicateAsCounter:
2851 case RegKind::SVEPredicateVector:
2852 case RegKind::SVEDataVector:
2853 case RegKind::Matrix:
2855 .Case("", {0, 0})
2856 .Case(".b", {0, 8})
2857 .Case(".h", {0, 16})
2858 .Case(".s", {0, 32})
2859 .Case(".d", {0, 64})
2860 .Case(".q", {0, 128})
2861 .Default({-1, -1});
2862 break;
2863 default:
2864 llvm_unreachable("Unsupported RegKind");
2865 }
2866
2867 if (Res == std::make_pair(-1, -1))
2868 return std::nullopt;
2869
2870 return std::optional<std::pair<int, int>>(Res);
2871}
2872
2873static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2874 return parseVectorKind(Suffix, VectorKind).has_value();
2875}
2876
2878 return StringSwitch<unsigned>(Name.lower())
2879 .Case("z0", AArch64::Z0)
2880 .Case("z1", AArch64::Z1)
2881 .Case("z2", AArch64::Z2)
2882 .Case("z3", AArch64::Z3)
2883 .Case("z4", AArch64::Z4)
2884 .Case("z5", AArch64::Z5)
2885 .Case("z6", AArch64::Z6)
2886 .Case("z7", AArch64::Z7)
2887 .Case("z8", AArch64::Z8)
2888 .Case("z9", AArch64::Z9)
2889 .Case("z10", AArch64::Z10)
2890 .Case("z11", AArch64::Z11)
2891 .Case("z12", AArch64::Z12)
2892 .Case("z13", AArch64::Z13)
2893 .Case("z14", AArch64::Z14)
2894 .Case("z15", AArch64::Z15)
2895 .Case("z16", AArch64::Z16)
2896 .Case("z17", AArch64::Z17)
2897 .Case("z18", AArch64::Z18)
2898 .Case("z19", AArch64::Z19)
2899 .Case("z20", AArch64::Z20)
2900 .Case("z21", AArch64::Z21)
2901 .Case("z22", AArch64::Z22)
2902 .Case("z23", AArch64::Z23)
2903 .Case("z24", AArch64::Z24)
2904 .Case("z25", AArch64::Z25)
2905 .Case("z26", AArch64::Z26)
2906 .Case("z27", AArch64::Z27)
2907 .Case("z28", AArch64::Z28)
2908 .Case("z29", AArch64::Z29)
2909 .Case("z30", AArch64::Z30)
2910 .Case("z31", AArch64::Z31)
2911 .Default(0);
2912}
2913
2915 return StringSwitch<unsigned>(Name.lower())
2916 .Case("p0", AArch64::P0)
2917 .Case("p1", AArch64::P1)
2918 .Case("p2", AArch64::P2)
2919 .Case("p3", AArch64::P3)
2920 .Case("p4", AArch64::P4)
2921 .Case("p5", AArch64::P5)
2922 .Case("p6", AArch64::P6)
2923 .Case("p7", AArch64::P7)
2924 .Case("p8", AArch64::P8)
2925 .Case("p9", AArch64::P9)
2926 .Case("p10", AArch64::P10)
2927 .Case("p11", AArch64::P11)
2928 .Case("p12", AArch64::P12)
2929 .Case("p13", AArch64::P13)
2930 .Case("p14", AArch64::P14)
2931 .Case("p15", AArch64::P15)
2932 .Default(0);
2933}
2934
2936 return StringSwitch<unsigned>(Name.lower())
2937 .Case("pn0", AArch64::PN0)
2938 .Case("pn1", AArch64::PN1)
2939 .Case("pn2", AArch64::PN2)
2940 .Case("pn3", AArch64::PN3)
2941 .Case("pn4", AArch64::PN4)
2942 .Case("pn5", AArch64::PN5)
2943 .Case("pn6", AArch64::PN6)
2944 .Case("pn7", AArch64::PN7)
2945 .Case("pn8", AArch64::PN8)
2946 .Case("pn9", AArch64::PN9)
2947 .Case("pn10", AArch64::PN10)
2948 .Case("pn11", AArch64::PN11)
2949 .Case("pn12", AArch64::PN12)
2950 .Case("pn13", AArch64::PN13)
2951 .Case("pn14", AArch64::PN14)
2952 .Case("pn15", AArch64::PN15)
2953 .Default(0);
2954}
2955
2957 return StringSwitch<unsigned>(Name.lower())
2958 .Case("za0.d", AArch64::ZAD0)
2959 .Case("za1.d", AArch64::ZAD1)
2960 .Case("za2.d", AArch64::ZAD2)
2961 .Case("za3.d", AArch64::ZAD3)
2962 .Case("za4.d", AArch64::ZAD4)
2963 .Case("za5.d", AArch64::ZAD5)
2964 .Case("za6.d", AArch64::ZAD6)
2965 .Case("za7.d", AArch64::ZAD7)
2966 .Case("za0.s", AArch64::ZAS0)
2967 .Case("za1.s", AArch64::ZAS1)
2968 .Case("za2.s", AArch64::ZAS2)
2969 .Case("za3.s", AArch64::ZAS3)
2970 .Case("za0.h", AArch64::ZAH0)
2971 .Case("za1.h", AArch64::ZAH1)
2972 .Case("za0.b", AArch64::ZAB0)
2973 .Default(0);
2974}
2975
2976static unsigned matchMatrixRegName(StringRef Name) {
2977 return StringSwitch<unsigned>(Name.lower())
2978 .Case("za", AArch64::ZA)
2979 .Case("za0.q", AArch64::ZAQ0)
2980 .Case("za1.q", AArch64::ZAQ1)
2981 .Case("za2.q", AArch64::ZAQ2)
2982 .Case("za3.q", AArch64::ZAQ3)
2983 .Case("za4.q", AArch64::ZAQ4)
2984 .Case("za5.q", AArch64::ZAQ5)
2985 .Case("za6.q", AArch64::ZAQ6)
2986 .Case("za7.q", AArch64::ZAQ7)
2987 .Case("za8.q", AArch64::ZAQ8)
2988 .Case("za9.q", AArch64::ZAQ9)
2989 .Case("za10.q", AArch64::ZAQ10)
2990 .Case("za11.q", AArch64::ZAQ11)
2991 .Case("za12.q", AArch64::ZAQ12)
2992 .Case("za13.q", AArch64::ZAQ13)
2993 .Case("za14.q", AArch64::ZAQ14)
2994 .Case("za15.q", AArch64::ZAQ15)
2995 .Case("za0.d", AArch64::ZAD0)
2996 .Case("za1.d", AArch64::ZAD1)
2997 .Case("za2.d", AArch64::ZAD2)
2998 .Case("za3.d", AArch64::ZAD3)
2999 .Case("za4.d", AArch64::ZAD4)
3000 .Case("za5.d", AArch64::ZAD5)
3001 .Case("za6.d", AArch64::ZAD6)
3002 .Case("za7.d", AArch64::ZAD7)
3003 .Case("za0.s", AArch64::ZAS0)
3004 .Case("za1.s", AArch64::ZAS1)
3005 .Case("za2.s", AArch64::ZAS2)
3006 .Case("za3.s", AArch64::ZAS3)
3007 .Case("za0.h", AArch64::ZAH0)
3008 .Case("za1.h", AArch64::ZAH1)
3009 .Case("za0.b", AArch64::ZAB0)
3010 .Case("za0h.q", AArch64::ZAQ0)
3011 .Case("za1h.q", AArch64::ZAQ1)
3012 .Case("za2h.q", AArch64::ZAQ2)
3013 .Case("za3h.q", AArch64::ZAQ3)
3014 .Case("za4h.q", AArch64::ZAQ4)
3015 .Case("za5h.q", AArch64::ZAQ5)
3016 .Case("za6h.q", AArch64::ZAQ6)
3017 .Case("za7h.q", AArch64::ZAQ7)
3018 .Case("za8h.q", AArch64::ZAQ8)
3019 .Case("za9h.q", AArch64::ZAQ9)
3020 .Case("za10h.q", AArch64::ZAQ10)
3021 .Case("za11h.q", AArch64::ZAQ11)
3022 .Case("za12h.q", AArch64::ZAQ12)
3023 .Case("za13h.q", AArch64::ZAQ13)
3024 .Case("za14h.q", AArch64::ZAQ14)
3025 .Case("za15h.q", AArch64::ZAQ15)
3026 .Case("za0h.d", AArch64::ZAD0)
3027 .Case("za1h.d", AArch64::ZAD1)
3028 .Case("za2h.d", AArch64::ZAD2)
3029 .Case("za3h.d", AArch64::ZAD3)
3030 .Case("za4h.d", AArch64::ZAD4)
3031 .Case("za5h.d", AArch64::ZAD5)
3032 .Case("za6h.d", AArch64::ZAD6)
3033 .Case("za7h.d", AArch64::ZAD7)
3034 .Case("za0h.s", AArch64::ZAS0)
3035 .Case("za1h.s", AArch64::ZAS1)
3036 .Case("za2h.s", AArch64::ZAS2)
3037 .Case("za3h.s", AArch64::ZAS3)
3038 .Case("za0h.h", AArch64::ZAH0)
3039 .Case("za1h.h", AArch64::ZAH1)
3040 .Case("za0h.b", AArch64::ZAB0)
3041 .Case("za0v.q", AArch64::ZAQ0)
3042 .Case("za1v.q", AArch64::ZAQ1)
3043 .Case("za2v.q", AArch64::ZAQ2)
3044 .Case("za3v.q", AArch64::ZAQ3)
3045 .Case("za4v.q", AArch64::ZAQ4)
3046 .Case("za5v.q", AArch64::ZAQ5)
3047 .Case("za6v.q", AArch64::ZAQ6)
3048 .Case("za7v.q", AArch64::ZAQ7)
3049 .Case("za8v.q", AArch64::ZAQ8)
3050 .Case("za9v.q", AArch64::ZAQ9)
3051 .Case("za10v.q", AArch64::ZAQ10)
3052 .Case("za11v.q", AArch64::ZAQ11)
3053 .Case("za12v.q", AArch64::ZAQ12)
3054 .Case("za13v.q", AArch64::ZAQ13)
3055 .Case("za14v.q", AArch64::ZAQ14)
3056 .Case("za15v.q", AArch64::ZAQ15)
3057 .Case("za0v.d", AArch64::ZAD0)
3058 .Case("za1v.d", AArch64::ZAD1)
3059 .Case("za2v.d", AArch64::ZAD2)
3060 .Case("za3v.d", AArch64::ZAD3)
3061 .Case("za4v.d", AArch64::ZAD4)
3062 .Case("za5v.d", AArch64::ZAD5)
3063 .Case("za6v.d", AArch64::ZAD6)
3064 .Case("za7v.d", AArch64::ZAD7)
3065 .Case("za0v.s", AArch64::ZAS0)
3066 .Case("za1v.s", AArch64::ZAS1)
3067 .Case("za2v.s", AArch64::ZAS2)
3068 .Case("za3v.s", AArch64::ZAS3)
3069 .Case("za0v.h", AArch64::ZAH0)
3070 .Case("za1v.h", AArch64::ZAH1)
3071 .Case("za0v.b", AArch64::ZAB0)
3072 .Default(0);
3073}
3074
3075bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
3076 SMLoc &EndLoc) {
3077 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
3078}
3079
3080ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
3081 SMLoc &EndLoc) {
3082 StartLoc = getLoc();
3083 ParseStatus Res = tryParseScalarRegister(Reg);
3084 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3085 return Res;
3086}
3087
3088// Matches a register name or register alias previously defined by '.req'
3089MCRegister AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3090 RegKind Kind) {
3091 MCRegister Reg = MCRegister();
3092 if ((Reg = matchSVEDataVectorRegName(Name)))
3093 return Kind == RegKind::SVEDataVector ? Reg : MCRegister();
3094
3095 if ((Reg = matchSVEPredicateVectorRegName(Name)))
3096 return Kind == RegKind::SVEPredicateVector ? Reg : MCRegister();
3097
3099 return Kind == RegKind::SVEPredicateAsCounter ? Reg : MCRegister();
3100
3101 if ((Reg = MatchNeonVectorRegName(Name)))
3102 return Kind == RegKind::NeonVector ? Reg : MCRegister();
3103
3104 if ((Reg = matchMatrixRegName(Name)))
3105 return Kind == RegKind::Matrix ? Reg : MCRegister();
3106
3107 if (Name.equals_insensitive("zt0"))
3108 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3109
3110 // The parsed register must be of RegKind Scalar
3111 if ((Reg = MatchRegisterName(Name)))
3112 return (Kind == RegKind::Scalar) ? Reg : MCRegister();
3113
3114 if (!Reg) {
3115 // Handle a few common aliases of registers.
3116 if (MCRegister Reg = StringSwitch<unsigned>(Name.lower())
3117 .Case("fp", AArch64::FP)
3118 .Case("lr", AArch64::LR)
3119 .Case("x31", AArch64::XZR)
3120 .Case("w31", AArch64::WZR)
3121 .Default(0))
3122 return Kind == RegKind::Scalar ? Reg : MCRegister();
3123
3124 // Check for aliases registered via .req. Canonicalize to lower case.
3125 // That's more consistent since register names are case insensitive, and
3126 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3127 auto Entry = RegisterReqs.find(Name.lower());
3128 if (Entry == RegisterReqs.end())
3129 return MCRegister();
3130
3131 // set Reg if the match is the right kind of register
3132 if (Kind == Entry->getValue().first)
3133 Reg = Entry->getValue().second;
3134 }
3135 return Reg;
3136}
3137
3138unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3139 switch (K) {
3140 case RegKind::Scalar:
3141 case RegKind::NeonVector:
3142 case RegKind::SVEDataVector:
3143 return 32;
3144 case RegKind::Matrix:
3145 case RegKind::SVEPredicateVector:
3146 case RegKind::SVEPredicateAsCounter:
3147 return 16;
3148 case RegKind::LookupTable:
3149 return 1;
3150 }
3151 llvm_unreachable("Unsupported RegKind");
3152}
3153
3154/// tryParseScalarRegister - Try to parse a register name. The token must be an
3155/// Identifier when called, and if it is a register name the token is eaten and
3156/// the register is added to the operand list.
3157ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3158 const AsmToken &Tok = getTok();
3159 if (Tok.isNot(AsmToken::Identifier))
3160 return ParseStatus::NoMatch;
3161
3162 std::string lowerCase = Tok.getString().lower();
3163 MCRegister Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3164 if (!Reg)
3165 return ParseStatus::NoMatch;
3166
3167 RegNum = Reg;
3168 Lex(); // Eat identifier token.
3169 return ParseStatus::Success;
3170}
3171
3172/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3173ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3174 SMLoc S = getLoc();
3175
3176 if (getTok().isNot(AsmToken::Identifier))
3177 return Error(S, "Expected cN operand where 0 <= N <= 15");
3178
3179 StringRef Tok = getTok().getIdentifier();
3180 if (Tok[0] != 'c' && Tok[0] != 'C')
3181 return Error(S, "Expected cN operand where 0 <= N <= 15");
3182
3183 uint32_t CRNum;
3184 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3185 if (BadNum || CRNum > 15)
3186 return Error(S, "Expected cN operand where 0 <= N <= 15");
3187
3188 Lex(); // Eat identifier token.
3189 Operands.push_back(
3190 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3191 return ParseStatus::Success;
3192}
3193
3194// Either an identifier for named values or a 6-bit immediate.
3195ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3196 SMLoc S = getLoc();
3197 const AsmToken &Tok = getTok();
3198
3199 unsigned MaxVal = 63;
3200
3201 // Immediate case, with optional leading hash:
3202 if (parseOptionalToken(AsmToken::Hash) ||
3203 Tok.is(AsmToken::Integer)) {
3204 const MCExpr *ImmVal;
3205 if (getParser().parseExpression(ImmVal))
3206 return ParseStatus::Failure;
3207
3208 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3209 if (!MCE)
3210 return TokError("immediate value expected for prefetch operand");
3211 unsigned prfop = MCE->getValue();
3212 if (prfop > MaxVal)
3213 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3214 "] expected");
3215
3216 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3217 Operands.push_back(AArch64Operand::CreatePrefetch(
3218 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3219 return ParseStatus::Success;
3220 }
3221
3222 if (Tok.isNot(AsmToken::Identifier))
3223 return TokError("prefetch hint expected");
3224
3225 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3226 if (!RPRFM)
3227 return TokError("prefetch hint expected");
3228
3229 Operands.push_back(AArch64Operand::CreatePrefetch(
3230 RPRFM->Encoding, Tok.getString(), S, getContext()));
3231 Lex(); // Eat identifier token.
3232 return ParseStatus::Success;
3233}
3234
3235/// tryParsePrefetch - Try to parse a prefetch operand.
3236template <bool IsSVEPrefetch>
3237ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3238 SMLoc S = getLoc();
3239 const AsmToken &Tok = getTok();
3240
3241 auto LookupByName = [](StringRef N) {
3242 if (IsSVEPrefetch) {
3243 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3244 return std::optional<unsigned>(Res->Encoding);
3245 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3246 return std::optional<unsigned>(Res->Encoding);
3247 return std::optional<unsigned>();
3248 };
3249
3250 auto LookupByEncoding = [](unsigned E) {
3251 if (IsSVEPrefetch) {
3252 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3253 return std::optional<StringRef>(Res->Name);
3254 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3255 return std::optional<StringRef>(Res->Name);
3256 return std::optional<StringRef>();
3257 };
3258 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3259
3260 // Either an identifier for named values or a 5-bit immediate.
3261 // Eat optional hash.
3262 if (parseOptionalToken(AsmToken::Hash) ||
3263 Tok.is(AsmToken::Integer)) {
3264 const MCExpr *ImmVal;
3265 if (getParser().parseExpression(ImmVal))
3266 return ParseStatus::Failure;
3267
3268 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3269 if (!MCE)
3270 return TokError("immediate value expected for prefetch operand");
3271 unsigned prfop = MCE->getValue();
3272 if (prfop > MaxVal)
3273 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3274 "] expected");
3275
3276 auto PRFM = LookupByEncoding(MCE->getValue());
3277 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3278 S, getContext()));
3279 return ParseStatus::Success;
3280 }
3281
3282 if (Tok.isNot(AsmToken::Identifier))
3283 return TokError("prefetch hint expected");
3284
3285 auto PRFM = LookupByName(Tok.getString());
3286 if (!PRFM)
3287 return TokError("prefetch hint expected");
3288
3289 Operands.push_back(AArch64Operand::CreatePrefetch(
3290 *PRFM, Tok.getString(), S, getContext()));
3291 Lex(); // Eat identifier token.
3292 return ParseStatus::Success;
3293}
3294
3295/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3296ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3297 SMLoc S = getLoc();
3298 const AsmToken &Tok = getTok();
3299 if (Tok.isNot(AsmToken::Identifier))
3300 return TokError("invalid operand for instruction");
3301
3302 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3303 if (!PSB)
3304 return TokError("invalid operand for instruction");
3305
3306 Operands.push_back(AArch64Operand::CreatePSBHint(
3307 PSB->Encoding, Tok.getString(), S, getContext()));
3308 Lex(); // Eat identifier token.
3309 return ParseStatus::Success;
3310}
3311
3312ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3313 SMLoc StartLoc = getLoc();
3314
3315 MCRegister RegNum;
3316
3317 // The case where xzr, xzr is not present is handled by an InstAlias.
3318
3319 auto RegTok = getTok(); // in case we need to backtrack
3320 if (!tryParseScalarRegister(RegNum).isSuccess())
3321 return ParseStatus::NoMatch;
3322
3323 if (RegNum != AArch64::XZR) {
3324 getLexer().UnLex(RegTok);
3325 return ParseStatus::NoMatch;
3326 }
3327
3328 if (parseComma())
3329 return ParseStatus::Failure;
3330
3331 if (!tryParseScalarRegister(RegNum).isSuccess())
3332 return TokError("expected register operand");
3333
3334 if (RegNum != AArch64::XZR)
3335 return TokError("xzr must be followed by xzr");
3336
3337 // We need to push something, since we claim this is an operand in .td.
3338 // See also AArch64AsmParser::parseKeywordOperand.
3339 Operands.push_back(AArch64Operand::CreateReg(
3340 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3341
3342 return ParseStatus::Success;
3343}
3344
3345/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3346ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3347 SMLoc S = getLoc();
3348 const AsmToken &Tok = getTok();
3349 if (Tok.isNot(AsmToken::Identifier))
3350 return TokError("invalid operand for instruction");
3351
3352 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3353 if (!BTI)
3354 return TokError("invalid operand for instruction");
3355
3356 Operands.push_back(AArch64Operand::CreateBTIHint(
3357 BTI->Encoding, Tok.getString(), S, getContext()));
3358 Lex(); // Eat identifier token.
3359 return ParseStatus::Success;
3360}
3361
3362/// tryParseCMHPriorityHint - Try to parse a CMHPriority operand
3363ParseStatus AArch64AsmParser::tryParseCMHPriorityHint(OperandVector &Operands) {
3364 SMLoc S = getLoc();
3365 const AsmToken &Tok = getTok();
3366 if (Tok.isNot(AsmToken::Identifier))
3367 return TokError("invalid operand for instruction");
3368
3369 auto CMHPriority =
3370 AArch64CMHPriorityHint::lookupCMHPriorityHintByName(Tok.getString());
3371 if (!CMHPriority)
3372 return TokError("invalid operand for instruction");
3373
3374 Operands.push_back(AArch64Operand::CreateCMHPriorityHint(
3375 CMHPriority->Encoding, Tok.getString(), S, getContext()));
3376 Lex(); // Eat identifier token.
3377 return ParseStatus::Success;
3378}
3379
3380/// tryParseTIndexHint - Try to parse a TIndex operand
3381ParseStatus AArch64AsmParser::tryParseTIndexHint(OperandVector &Operands) {
3382 SMLoc S = getLoc();
3383 const AsmToken &Tok = getTok();
3384 if (Tok.isNot(AsmToken::Identifier))
3385 return TokError("invalid operand for instruction");
3386
3387 auto TIndex = AArch64TIndexHint::lookupTIndexByName(Tok.getString());
3388 if (!TIndex)
3389 return TokError("invalid operand for instruction");
3390
3391 Operands.push_back(AArch64Operand::CreateTIndexHint(
3392 TIndex->Encoding, Tok.getString(), S, getContext()));
3393 Lex(); // Eat identifier token.
3394 return ParseStatus::Success;
3395}
3396
3397/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3398/// instruction.
3399ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3400 SMLoc S = getLoc();
3401 const MCExpr *Expr = nullptr;
3402
3403 if (getTok().is(AsmToken::Hash)) {
3404 Lex(); // Eat hash token.
3405 }
3406
3407 if (parseSymbolicImmVal(Expr))
3408 return ParseStatus::Failure;
3409
3410 AArch64::Specifier ELFSpec;
3411 AArch64::Specifier DarwinSpec;
3412 int64_t Addend;
3413 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3414 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3415 // No modifier was specified at all; this is the syntax for an ELF basic
3416 // ADRP relocation (unfortunately).
3417 Expr =
3419 } else if ((DarwinSpec == AArch64::S_MACHO_GOTPAGE ||
3420 DarwinSpec == AArch64::S_MACHO_TLVPPAGE) &&
3421 Addend != 0) {
3422 return Error(S, "gotpage label reference not allowed an addend");
3423 } else if (DarwinSpec != AArch64::S_MACHO_PAGE &&
3424 DarwinSpec != AArch64::S_MACHO_GOTPAGE &&
3425 DarwinSpec != AArch64::S_MACHO_TLVPPAGE &&
3426 ELFSpec != AArch64::S_ABS_PAGE_NC &&
3427 ELFSpec != AArch64::S_GOT_PAGE &&
3428 ELFSpec != AArch64::S_GOT_AUTH_PAGE &&
3429 ELFSpec != AArch64::S_GOT_PAGE_LO15 &&
3430 ELFSpec != AArch64::S_GOTTPREL_PAGE &&
3431 ELFSpec != AArch64::S_TLSDESC_PAGE &&
3432 ELFSpec != AArch64::S_TLSDESC_AUTH_PAGE) {
3433 // The operand must be an @page or @gotpage qualified symbolref.
3434 return Error(S, "page or gotpage label reference expected");
3435 }
3436 }
3437
3438 // We have either a label reference possibly with addend or an immediate. The
3439 // addend is a raw value here. The linker will adjust it to only reference the
3440 // page.
3441 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3442 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3443
3444 return ParseStatus::Success;
3445}
3446
3447/// tryParseAdrLabel - Parse and validate a source label for the ADR
3448/// instruction.
3449ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3450 SMLoc S = getLoc();
3451 const MCExpr *Expr = nullptr;
3452
3453 // Leave anything with a bracket to the default for SVE
3454 if (getTok().is(AsmToken::LBrac))
3455 return ParseStatus::NoMatch;
3456
3457 if (getTok().is(AsmToken::Hash))
3458 Lex(); // Eat hash token.
3459
3460 if (parseSymbolicImmVal(Expr))
3461 return ParseStatus::Failure;
3462
3463 AArch64::Specifier ELFSpec;
3464 AArch64::Specifier DarwinSpec;
3465 int64_t Addend;
3466 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3467 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3468 // No modifier was specified at all; this is the syntax for an ELF basic
3469 // ADR relocation (unfortunately).
3471 } else if (ELFSpec != AArch64::S_GOT_AUTH_PAGE) {
3472 // For tiny code model, we use :got_auth: operator to fill 21-bit imm of
3473 // adr. It's not actually GOT entry page address but the GOT address
3474 // itself - we just share the same variant kind with :got_auth: operator
3475 // applied for adrp.
3476 // TODO: can we somehow get current TargetMachine object to call
3477 // getCodeModel() on it to ensure we are using tiny code model?
3478 return Error(S, "unexpected adr label");
3479 }
3480 }
3481
3482 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3483 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3484 return ParseStatus::Success;
3485}
3486
3487/// tryParseFPImm - A floating point immediate expression operand.
3488template <bool AddFPZeroAsLiteral>
3489ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3490 SMLoc S = getLoc();
3491
3492 bool Hash = parseOptionalToken(AsmToken::Hash);
3493
3494 // Handle negation, as that still comes through as a separate token.
3495 bool isNegative = parseOptionalToken(AsmToken::Minus);
3496
3497 const AsmToken &Tok = getTok();
3498 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3499 if (!Hash)
3500 return ParseStatus::NoMatch;
3501 return TokError("invalid floating point immediate");
3502 }
3503
3504 // Parse hexadecimal representation.
3505 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3506 if (Tok.getIntVal() > 255 || isNegative)
3507 return TokError("encoded floating point value out of range");
3508
3510 Operands.push_back(
3511 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3512 } else {
3513 // Parse FP representation.
3514 APFloat RealVal(APFloat::IEEEdouble());
3515 auto StatusOrErr =
3516 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3517 if (errorToBool(StatusOrErr.takeError()))
3518 return TokError("invalid floating point representation");
3519
3520 if (isNegative)
3521 RealVal.changeSign();
3522
3523 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3524 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3525 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3526 } else
3527 Operands.push_back(AArch64Operand::CreateFPImm(
3528 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3529 }
3530
3531 Lex(); // Eat the token.
3532
3533 return ParseStatus::Success;
3534}
3535
3536/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3537/// a shift suffix, for example '#1, lsl #12'.
3538ParseStatus
3539AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3540 SMLoc S = getLoc();
3541
3542 if (getTok().is(AsmToken::Hash))
3543 Lex(); // Eat '#'
3544 else if (getTok().isNot(AsmToken::Integer))
3545 // Operand should start from # or should be integer, emit error otherwise.
3546 return ParseStatus::NoMatch;
3547
3548 if (getTok().is(AsmToken::Integer) &&
3549 getLexer().peekTok().is(AsmToken::Colon))
3550 return tryParseImmRange(Operands);
3551
3552 const MCExpr *Imm = nullptr;
3553 if (parseSymbolicImmVal(Imm))
3554 return ParseStatus::Failure;
3555 else if (getTok().isNot(AsmToken::Comma)) {
3556 Operands.push_back(
3557 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3558 return ParseStatus::Success;
3559 }
3560
3561 // Eat ','
3562 Lex();
3563 StringRef VecGroup;
3564 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3565 Operands.push_back(
3566 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3567 Operands.push_back(
3568 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3569 return ParseStatus::Success;
3570 }
3571
3572 // The optional operand must be "lsl #N" where N is non-negative.
3573 if (!getTok().is(AsmToken::Identifier) ||
3574 !getTok().getIdentifier().equals_insensitive("lsl"))
3575 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3576
3577 // Eat 'lsl'
3578 Lex();
3579
3580 parseOptionalToken(AsmToken::Hash);
3581
3582 if (getTok().isNot(AsmToken::Integer))
3583 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3584
3585 int64_t ShiftAmount = getTok().getIntVal();
3586
3587 if (ShiftAmount < 0)
3588 return Error(getLoc(), "positive shift amount required");
3589 Lex(); // Eat the number
3590
3591 // Just in case the optional lsl #0 is used for immediates other than zero.
3592 if (ShiftAmount == 0 && Imm != nullptr) {
3593 Operands.push_back(
3594 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3595 return ParseStatus::Success;
3596 }
3597
3598 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3599 getLoc(), getContext()));
3600 return ParseStatus::Success;
3601}
3602
3603/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3604/// suggestion to help common typos.
3606AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3607 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3608 .Case("eq", AArch64CC::EQ)
3609 .Case("ne", AArch64CC::NE)
3610 .Case("cs", AArch64CC::HS)
3611 .Case("hs", AArch64CC::HS)
3612 .Case("cc", AArch64CC::LO)
3613 .Case("lo", AArch64CC::LO)
3614 .Case("mi", AArch64CC::MI)
3615 .Case("pl", AArch64CC::PL)
3616 .Case("vs", AArch64CC::VS)
3617 .Case("vc", AArch64CC::VC)
3618 .Case("hi", AArch64CC::HI)
3619 .Case("ls", AArch64CC::LS)
3620 .Case("ge", AArch64CC::GE)
3621 .Case("lt", AArch64CC::LT)
3622 .Case("gt", AArch64CC::GT)
3623 .Case("le", AArch64CC::LE)
3624 .Case("al", AArch64CC::AL)
3625 .Case("nv", AArch64CC::NV)
3626 // SVE condition code aliases:
3627 .Case("none", AArch64CC::EQ)
3628 .Case("any", AArch64CC::NE)
3629 .Case("nlast", AArch64CC::HS)
3630 .Case("last", AArch64CC::LO)
3631 .Case("first", AArch64CC::MI)
3632 .Case("nfrst", AArch64CC::PL)
3633 .Case("pmore", AArch64CC::HI)
3634 .Case("plast", AArch64CC::LS)
3635 .Case("tcont", AArch64CC::GE)
3636 .Case("tstop", AArch64CC::LT)
3637 .Default(AArch64CC::Invalid);
3638
3639 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3640 Suggestion = "nfrst";
3641
3642 return CC;
3643}
3644
3645/// parseCondCode - Parse a Condition Code operand.
3646bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3647 bool invertCondCode) {
3648 SMLoc S = getLoc();
3649 const AsmToken &Tok = getTok();
3650 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3651
3652 StringRef Cond = Tok.getString();
3653 std::string Suggestion;
3654 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3655 if (CC == AArch64CC::Invalid) {
3656 std::string Msg = "invalid condition code";
3657 if (!Suggestion.empty())
3658 Msg += ", did you mean " + Suggestion + "?";
3659 return TokError(Msg);
3660 }
3661 Lex(); // Eat identifier token.
3662
3663 if (invertCondCode) {
3664 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3665 return TokError("condition codes AL and NV are invalid for this instruction");
3667 }
3668
3669 Operands.push_back(
3670 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3671 return false;
3672}
3673
3674ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3675 const AsmToken &Tok = getTok();
3676 SMLoc S = getLoc();
3677
3678 if (Tok.isNot(AsmToken::Identifier))
3679 return TokError("invalid operand for instruction");
3680
3681 unsigned PStateImm = -1;
3682 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3683 if (!SVCR)
3684 return ParseStatus::NoMatch;
3685 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3686 PStateImm = SVCR->Encoding;
3687
3688 Operands.push_back(
3689 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3690 Lex(); // Eat identifier token.
3691 return ParseStatus::Success;
3692}
3693
3694ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3695 const AsmToken &Tok = getTok();
3696 SMLoc S = getLoc();
3697
3698 StringRef Name = Tok.getString();
3699
3700 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3701 Lex(); // eat "za[.(b|h|s|d)]"
3702 unsigned ElementWidth = 0;
3703 auto DotPosition = Name.find('.');
3704 if (DotPosition != StringRef::npos) {
3705 const auto &KindRes =
3706 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3707 if (!KindRes)
3708 return TokError(
3709 "Expected the register to be followed by element width suffix");
3710 ElementWidth = KindRes->second;
3711 }
3712 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3713 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3714 getContext()));
3715 if (getLexer().is(AsmToken::LBrac)) {
3716 // There's no comma after matrix operand, so we can parse the next operand
3717 // immediately.
3718 if (parseOperand(Operands, false, false))
3719 return ParseStatus::NoMatch;
3720 }
3721 return ParseStatus::Success;
3722 }
3723
3724 // Try to parse matrix register.
3725 MCRegister Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3726 if (!Reg)
3727 return ParseStatus::NoMatch;
3728
3729 size_t DotPosition = Name.find('.');
3730 assert(DotPosition != StringRef::npos && "Unexpected register");
3731
3732 StringRef Head = Name.take_front(DotPosition);
3733 StringRef Tail = Name.drop_front(DotPosition);
3734 StringRef RowOrColumn = Head.take_back();
3735
3736 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3737 .Case("h", MatrixKind::Row)
3738 .Case("v", MatrixKind::Col)
3739 .Default(MatrixKind::Tile);
3740
3741 // Next up, parsing the suffix
3742 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3743 if (!KindRes)
3744 return TokError(
3745 "Expected the register to be followed by element width suffix");
3746 unsigned ElementWidth = KindRes->second;
3747
3748 Lex();
3749
3750 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3751 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3752
3753 if (getLexer().is(AsmToken::LBrac)) {
3754 // There's no comma after matrix operand, so we can parse the next operand
3755 // immediately.
3756 if (parseOperand(Operands, false, false))
3757 return ParseStatus::NoMatch;
3758 }
3759 return ParseStatus::Success;
3760}
3761
3762/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3763/// them if present.
3764ParseStatus
3765AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3766 const AsmToken &Tok = getTok();
3767 std::string LowerID = Tok.getString().lower();
3769 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3770 .Case("lsl", AArch64_AM::LSL)
3771 .Case("lsr", AArch64_AM::LSR)
3772 .Case("asr", AArch64_AM::ASR)
3773 .Case("ror", AArch64_AM::ROR)
3774 .Case("msl", AArch64_AM::MSL)
3775 .Case("uxtb", AArch64_AM::UXTB)
3776 .Case("uxth", AArch64_AM::UXTH)
3777 .Case("uxtw", AArch64_AM::UXTW)
3778 .Case("uxtx", AArch64_AM::UXTX)
3779 .Case("sxtb", AArch64_AM::SXTB)
3780 .Case("sxth", AArch64_AM::SXTH)
3781 .Case("sxtw", AArch64_AM::SXTW)
3782 .Case("sxtx", AArch64_AM::SXTX)
3784
3786 return ParseStatus::NoMatch;
3787
3788 SMLoc S = Tok.getLoc();
3789 Lex();
3790
3791 bool Hash = parseOptionalToken(AsmToken::Hash);
3792
3793 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3794 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3795 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3796 ShOp == AArch64_AM::MSL) {
3797 // We expect a number here.
3798 return TokError("expected #imm after shift specifier");
3799 }
3800
3801 // "extend" type operations don't need an immediate, #0 is implicit.
3802 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3803 Operands.push_back(
3804 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3805 return ParseStatus::Success;
3806 }
3807
3808 // Make sure we do actually have a number, identifier or a parenthesized
3809 // expression.
3810 SMLoc E = getLoc();
3811 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3812 !getTok().is(AsmToken::Identifier))
3813 return Error(E, "expected integer shift amount");
3814
3815 const MCExpr *ImmVal;
3816 if (getParser().parseExpression(ImmVal))
3817 return ParseStatus::Failure;
3818
3819 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3820 if (!MCE)
3821 return Error(E, "expected constant '#imm' after shift specifier");
3822
3823 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3824 Operands.push_back(AArch64Operand::CreateShiftExtend(
3825 ShOp, MCE->getValue(), true, S, E, getContext()));
3826 return ParseStatus::Success;
3827}
3828
3829static const struct Extension {
3830 const char *Name;
3832} ExtensionMap[] = {
3833 {"crc", {AArch64::FeatureCRC}},
3834 {"sm4", {AArch64::FeatureSM4}},
3835 {"sha3", {AArch64::FeatureSHA3}},
3836 {"sha2", {AArch64::FeatureSHA2}},
3837 {"aes", {AArch64::FeatureAES}},
3838 {"crypto", {AArch64::FeatureCrypto}},
3839 {"fp", {AArch64::FeatureFPARMv8}},
3840 {"simd", {AArch64::FeatureNEON}},
3841 {"ras", {AArch64::FeatureRAS}},
3842 {"rasv2", {AArch64::FeatureRASv2}},
3843 {"lse", {AArch64::FeatureLSE}},
3844 {"predres", {AArch64::FeaturePredRes}},
3845 {"predres2", {AArch64::FeatureSPECRES2}},
3846 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3847 {"mte", {AArch64::FeatureMTE}},
3848 {"memtag", {AArch64::FeatureMTE}},
3849 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3850 {"pan", {AArch64::FeaturePAN}},
3851 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3852 {"ccpp", {AArch64::FeatureCCPP}},
3853 {"rcpc", {AArch64::FeatureRCPC}},
3854 {"rng", {AArch64::FeatureRandGen}},
3855 {"sve", {AArch64::FeatureSVE}},
3856 {"sve-b16b16", {AArch64::FeatureSVEB16B16}},
3857 {"sve2", {AArch64::FeatureSVE2}},
3858 {"sve-aes", {AArch64::FeatureSVEAES}},
3859 {"sve2-aes", {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3860 {"sve-sm4", {AArch64::FeatureSVESM4}},
3861 {"sve2-sm4", {AArch64::FeatureAliasSVE2SM4, AArch64::FeatureSVESM4}},
3862 {"sve-sha3", {AArch64::FeatureSVESHA3}},
3863 {"sve2-sha3", {AArch64::FeatureAliasSVE2SHA3, AArch64::FeatureSVESHA3}},
3864 {"sve-bitperm", {AArch64::FeatureSVEBitPerm}},
3865 {"sve2-bitperm",
3866 {AArch64::FeatureAliasSVE2BitPerm, AArch64::FeatureSVEBitPerm,
3867 AArch64::FeatureSVE2}},
3868 {"sve2p1", {AArch64::FeatureSVE2p1}},
3869 {"ls64", {AArch64::FeatureLS64}},
3870 {"xs", {AArch64::FeatureXS}},
3871 {"pauth", {AArch64::FeaturePAuth}},
3872 {"flagm", {AArch64::FeatureFlagM}},
3873 {"rme", {AArch64::FeatureRME}},
3874 {"sme", {AArch64::FeatureSME}},
3875 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3876 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3877 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3878 {"sme2", {AArch64::FeatureSME2}},
3879 {"sme2p1", {AArch64::FeatureSME2p1}},
3880 {"sme-b16b16", {AArch64::FeatureSMEB16B16}},
3881 {"hbc", {AArch64::FeatureHBC}},
3882 {"mops", {AArch64::FeatureMOPS}},
3883 {"mec", {AArch64::FeatureMEC}},
3884 {"the", {AArch64::FeatureTHE}},
3885 {"d128", {AArch64::FeatureD128}},
3886 {"lse128", {AArch64::FeatureLSE128}},
3887 {"ite", {AArch64::FeatureITE}},
3888 {"cssc", {AArch64::FeatureCSSC}},
3889 {"rcpc3", {AArch64::FeatureRCPC3}},
3890 {"gcs", {AArch64::FeatureGCS}},
3891 {"bf16", {AArch64::FeatureBF16}},
3892 {"compnum", {AArch64::FeatureComplxNum}},
3893 {"dotprod", {AArch64::FeatureDotProd}},
3894 {"f32mm", {AArch64::FeatureMatMulFP32}},
3895 {"f64mm", {AArch64::FeatureMatMulFP64}},
3896 {"fp16", {AArch64::FeatureFullFP16}},
3897 {"fp16fml", {AArch64::FeatureFP16FML}},
3898 {"i8mm", {AArch64::FeatureMatMulInt8}},
3899 {"lor", {AArch64::FeatureLOR}},
3900 {"profile", {AArch64::FeatureSPE}},
3901 // "rdma" is the name documented by binutils for the feature, but
3902 // binutils also accepts incomplete prefixes of features, so "rdm"
3903 // works too. Support both spellings here.
3904 {"rdm", {AArch64::FeatureRDM}},
3905 {"rdma", {AArch64::FeatureRDM}},
3906 {"sb", {AArch64::FeatureSB}},
3907 {"ssbs", {AArch64::FeatureSSBS}},
3908 {"fp8", {AArch64::FeatureFP8}},
3909 {"faminmax", {AArch64::FeatureFAMINMAX}},
3910 {"fp8fma", {AArch64::FeatureFP8FMA}},
3911 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3912 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3913 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3914 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3915 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3916 {"lut", {AArch64::FeatureLUT}},
3917 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3918 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3919 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3920 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3921 {"cpa", {AArch64::FeatureCPA}},
3922 {"tlbiw", {AArch64::FeatureTLBIW}},
3923 {"pops", {AArch64::FeaturePoPS}},
3924 {"cmpbr", {AArch64::FeatureCMPBR}},
3925 {"f8f32mm", {AArch64::FeatureF8F32MM}},
3926 {"f8f16mm", {AArch64::FeatureF8F16MM}},
3927 {"fprcvt", {AArch64::FeatureFPRCVT}},
3928 {"lsfe", {AArch64::FeatureLSFE}},
3929 {"sme2p2", {AArch64::FeatureSME2p2}},
3930 {"ssve-aes", {AArch64::FeatureSSVE_AES}},
3931 {"sve2p2", {AArch64::FeatureSVE2p2}},
3932 {"sve-aes2", {AArch64::FeatureSVEAES2}},
3933 {"sve-bfscale", {AArch64::FeatureSVEBFSCALE}},
3934 {"sve-f16f32mm", {AArch64::FeatureSVE_F16F32MM}},
3935 {"lsui", {AArch64::FeatureLSUI}},
3936 {"occmo", {AArch64::FeatureOCCMO}},
3937 {"pcdphint", {AArch64::FeaturePCDPHINT}},
3938 {"ssve-bitperm", {AArch64::FeatureSSVE_BitPerm}},
3939 {"sme-mop4", {AArch64::FeatureSME_MOP4}},
3940 {"sme-tmop", {AArch64::FeatureSME_TMOP}},
3941 {"cmh", {AArch64::FeatureCMH}},
3942 {"lscp", {AArch64::FeatureLSCP}},
3943 {"tlbid", {AArch64::FeatureTLBID}},
3944 {"mpamv2", {AArch64::FeatureMPAMv2}},
3945 {"mtetc", {AArch64::FeatureMTETC}},
3946 {"gcie", {AArch64::FeatureGCIE}},
3947 {"sme2p3", {AArch64::FeatureSME2p3}},
3948 {"sve2p3", {AArch64::FeatureSVE2p3}},
3949 {"sve-b16mm", {AArch64::FeatureSVE_B16MM}},
3950 {"f16mm", {AArch64::FeatureF16MM}},
3951 {"f16f32dot", {AArch64::FeatureF16F32DOT}},
3952 {"f16f32mm", {AArch64::FeatureF16F32MM}},
3953 {"mops-go", {AArch64::FeatureMOPS_GO}},
3954 {"poe2", {AArch64::FeatureS1POE2}},
3955 {"tev", {AArch64::FeatureTEV}},
3956 {"btie", {AArch64::FeatureBTIE}},
3957 {"dit", {AArch64::FeatureDIT}},
3958 {"brbe", {AArch64::FeatureBRBE}},
3959 {"bti", {AArch64::FeatureBranchTargetId}},
3960 {"fcma", {AArch64::FeatureComplxNum}},
3961 {"jscvt", {AArch64::FeatureJS}},
3962 {"pauth-lr", {AArch64::FeaturePAuthLR}},
3963 {"ssve-fexpa", {AArch64::FeatureSSVE_FEXPA}},
3964 {"wfxt", {AArch64::FeatureWFxT}},
3966
3967static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3968 if (FBS[AArch64::HasV8_0aOps])
3969 Str += "ARMv8a";
3970 if (FBS[AArch64::HasV8_1aOps])
3971 Str += "ARMv8.1a";
3972 else if (FBS[AArch64::HasV8_2aOps])
3973 Str += "ARMv8.2a";
3974 else if (FBS[AArch64::HasV8_3aOps])
3975 Str += "ARMv8.3a";
3976 else if (FBS[AArch64::HasV8_4aOps])
3977 Str += "ARMv8.4a";
3978 else if (FBS[AArch64::HasV8_5aOps])
3979 Str += "ARMv8.5a";
3980 else if (FBS[AArch64::HasV8_6aOps])
3981 Str += "ARMv8.6a";
3982 else if (FBS[AArch64::HasV8_7aOps])
3983 Str += "ARMv8.7a";
3984 else if (FBS[AArch64::HasV8_8aOps])
3985 Str += "ARMv8.8a";
3986 else if (FBS[AArch64::HasV8_9aOps])
3987 Str += "ARMv8.9a";
3988 else if (FBS[AArch64::HasV9_0aOps])
3989 Str += "ARMv9-a";
3990 else if (FBS[AArch64::HasV9_1aOps])
3991 Str += "ARMv9.1a";
3992 else if (FBS[AArch64::HasV9_2aOps])
3993 Str += "ARMv9.2a";
3994 else if (FBS[AArch64::HasV9_3aOps])
3995 Str += "ARMv9.3a";
3996 else if (FBS[AArch64::HasV9_4aOps])
3997 Str += "ARMv9.4a";
3998 else if (FBS[AArch64::HasV9_5aOps])
3999 Str += "ARMv9.5a";
4000 else if (FBS[AArch64::HasV9_6aOps])
4001 Str += "ARMv9.6a";
4002 else if (FBS[AArch64::HasV9_7aOps])
4003 Str += "ARMv9.7a";
4004 else if (FBS[AArch64::HasV8_0rOps])
4005 Str += "ARMv8r";
4006 else {
4007 SmallVector<std::string, 2> ExtMatches;
4008 for (const auto& Ext : ExtensionMap) {
4009 // Use & in case multiple features are enabled
4010 if ((FBS & Ext.Features) != FeatureBitset())
4011 ExtMatches.push_back(Ext.Name);
4012 }
4013 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
4014 }
4015}
4016
4017void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
4018 SMLoc S) {
4019 const uint16_t Op2 = Encoding & 7;
4020 const uint16_t Cm = (Encoding & 0x78) >> 3;
4021 const uint16_t Cn = (Encoding & 0x780) >> 7;
4022 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
4023
4024 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
4025
4026 Operands.push_back(
4027 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
4028 Operands.push_back(
4029 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
4030 Operands.push_back(
4031 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
4032 Expr = MCConstantExpr::create(Op2, getContext());
4033 Operands.push_back(
4034 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
4035}
4036
4037/// parseSysAlias - The IC, DC, AT, TLBI, MLBI and GIC{R} and GSB instructions
4038/// are simple aliases for the SYS instruction. Parse them specially so that
4039/// we create a SYS MCInst.
4040bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
4041 OperandVector &Operands) {
4042 if (Name.contains('.'))
4043 return TokError("invalid operand");
4044
4045 Mnemonic = Name;
4046 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
4047
4048 const AsmToken &Tok = getTok();
4049 StringRef Op = Tok.getString();
4050 SMLoc S = Tok.getLoc();
4051 bool ExpectRegister = true;
4052 bool OptionalRegister = false;
4053 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
4054 bool hasTLBID = getSTI().hasFeature(AArch64::FeatureTLBID);
4055
4056 if (Mnemonic == "ic") {
4057 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
4058 if (!IC)
4059 return TokError("invalid operand for IC instruction");
4060 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
4061 std::string Str("IC " + std::string(IC->Name) + " requires: ");
4063 return TokError(Str);
4064 }
4065 ExpectRegister = IC->NeedsReg;
4066 createSysAlias(IC->Encoding, Operands, S);
4067 } else if (Mnemonic == "dc") {
4068 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
4069 if (!DC)
4070 return TokError("invalid operand for DC instruction");
4071 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
4072 std::string Str("DC " + std::string(DC->Name) + " requires: ");
4074 return TokError(Str);
4075 }
4076 createSysAlias(DC->Encoding, Operands, S);
4077 } else if (Mnemonic == "at") {
4078 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
4079 if (!AT)
4080 return TokError("invalid operand for AT instruction");
4081 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
4082 std::string Str("AT " + std::string(AT->Name) + " requires: ");
4084 return TokError(Str);
4085 }
4086 createSysAlias(AT->Encoding, Operands, S);
4087 } else if (Mnemonic == "tlbi") {
4088 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
4089 if (!TLBI)
4090 return TokError("invalid operand for TLBI instruction");
4091 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
4092 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
4094 return TokError(Str);
4095 }
4096 ExpectRegister = TLBI->NeedsReg;
4097 bool hasTLBID = getSTI().hasFeature(AArch64::FeatureTLBID);
4098 if (hasAll || hasTLBID) {
4099 OptionalRegister = TLBI->OptionalReg;
4100 }
4101 createSysAlias(TLBI->Encoding, Operands, S);
4102 } else if (Mnemonic == "mlbi") {
4103 const AArch64MLBI::MLBI *MLBI = AArch64MLBI::lookupMLBIByName(Op);
4104 if (!MLBI)
4105 return TokError("invalid operand for MLBI instruction");
4106 else if (!MLBI->haveFeatures(getSTI().getFeatureBits())) {
4107 std::string Str("MLBI " + std::string(MLBI->Name) + " requires: ");
4109 return TokError(Str);
4110 }
4111 ExpectRegister = MLBI->NeedsReg;
4112 createSysAlias(MLBI->Encoding, Operands, S);
4113 } else if (Mnemonic == "gic") {
4114 const AArch64GIC::GIC *GIC = AArch64GIC::lookupGICByName(Op);
4115 if (!GIC)
4116 return TokError("invalid operand for GIC instruction");
4117 else if (!GIC->haveFeatures(getSTI().getFeatureBits())) {
4118 std::string Str("GIC " + std::string(GIC->Name) + " requires: ");
4120 return TokError(Str);
4121 }
4122 ExpectRegister = GIC->NeedsReg;
4123 createSysAlias(GIC->Encoding, Operands, S);
4124 } else if (Mnemonic == "gsb") {
4125 const AArch64GSB::GSB *GSB = AArch64GSB::lookupGSBByName(Op);
4126 if (!GSB)
4127 return TokError("invalid operand for GSB instruction");
4128 else if (!GSB->haveFeatures(getSTI().getFeatureBits())) {
4129 std::string Str("GSB " + std::string(GSB->Name) + " requires: ");
4131 return TokError(Str);
4132 }
4133 ExpectRegister = false;
4134 createSysAlias(GSB->Encoding, Operands, S);
4135 } else if (Mnemonic == "plbi") {
4136 const AArch64PLBI::PLBI *PLBI = AArch64PLBI::lookupPLBIByName(Op);
4137 if (!PLBI)
4138 return TokError("invalid operand for PLBI instruction");
4139 else if (!PLBI->haveFeatures(getSTI().getFeatureBits())) {
4140 std::string Str("PLBI " + std::string(PLBI->Name) + " requires: ");
4142 return TokError(Str);
4143 }
4144 ExpectRegister = PLBI->NeedsReg;
4145 if (hasAll || hasTLBID) {
4146 OptionalRegister = PLBI->OptionalReg;
4147 }
4148 createSysAlias(PLBI->Encoding, Operands, S);
4149 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" ||
4150 Mnemonic == "cosp") {
4151
4152 if (Op.lower() != "rctx")
4153 return TokError("invalid operand for prediction restriction instruction");
4154
4155 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
4156 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
4157
4158 if (Mnemonic == "cosp" && !hasSpecres2)
4159 return TokError("COSP requires: predres2");
4160 if (!hasPredres)
4161 return TokError(Mnemonic.upper() + "RCTX requires: predres");
4162
4163 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
4164 : Mnemonic == "dvp" ? 0b101
4165 : Mnemonic == "cosp" ? 0b110
4166 : Mnemonic == "cpp" ? 0b111
4167 : 0;
4168 assert(PRCTX_Op2 &&
4169 "Invalid mnemonic for prediction restriction instruction");
4170 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
4171 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
4172
4173 createSysAlias(Encoding, Operands, S);
4174 }
4175
4176 Lex(); // Eat operand.
4177
4178 bool HasRegister = false;
4179
4180 // Check for the optional register operand.
4181 if (parseOptionalToken(AsmToken::Comma)) {
4182 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
4183 return TokError("expected register operand");
4184 HasRegister = true;
4185 }
4186
4187 if (!OptionalRegister) {
4188 if (ExpectRegister && !HasRegister)
4189 return TokError("specified " + Mnemonic + " op requires a register");
4190 else if (!ExpectRegister && HasRegister)
4191 return TokError("specified " + Mnemonic + " op does not use a register");
4192 }
4193
4194 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4195 return true;
4196
4197 return false;
4198}
4199
4200/// parseSyslAlias - The GICR instructions are simple aliases for
4201/// the SYSL instruction. Parse them specially so that we create a
4202/// SYS MCInst.
4203bool AArch64AsmParser::parseSyslAlias(StringRef Name, SMLoc NameLoc,
4204 OperandVector &Operands) {
4205
4206 Mnemonic = Name;
4207 Operands.push_back(
4208 AArch64Operand::CreateToken("sysl", NameLoc, getContext()));
4209
4210 // Now expect two operands (identifier + register)
4211 SMLoc startLoc = getLoc();
4212 const AsmToken &regTok = getTok();
4213 StringRef reg = regTok.getString();
4214 MCRegister Reg = matchRegisterNameAlias(reg.lower(), RegKind::Scalar);
4215 if (!Reg)
4216 return TokError("expected register operand");
4217
4218 Operands.push_back(AArch64Operand::CreateReg(
4219 Reg, RegKind::Scalar, startLoc, getLoc(), getContext(), EqualsReg));
4220
4221 Lex(); // Eat token
4222 if (parseToken(AsmToken::Comma))
4223 return true;
4224
4225 // Check for identifier
4226 const AsmToken &operandTok = getTok();
4227 StringRef Op = operandTok.getString();
4228 SMLoc S2 = operandTok.getLoc();
4229 Lex(); // Eat token
4230
4231 if (Mnemonic == "gicr") {
4232 const AArch64GICR::GICR *GICR = AArch64GICR::lookupGICRByName(Op);
4233 if (!GICR)
4234 return Error(S2, "invalid operand for GICR instruction");
4235 else if (!GICR->haveFeatures(getSTI().getFeatureBits())) {
4236 std::string Str("GICR " + std::string(GICR->Name) + " requires: ");
4238 return Error(S2, Str);
4239 }
4240 createSysAlias(GICR->Encoding, Operands, S2);
4241 }
4242
4243 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4244 return true;
4245
4246 return false;
4247}
4248
4249/// parseSyspAlias - The TLBIP instructions are simple aliases for
4250/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
4251bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4252 OperandVector &Operands) {
4253 if (Name.contains('.'))
4254 return TokError("invalid operand");
4255
4256 Mnemonic = Name;
4257 Operands.push_back(
4258 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
4259
4260 const AsmToken &Tok = getTok();
4261 StringRef Op = Tok.getString();
4262 SMLoc S = Tok.getLoc();
4263
4264 if (Mnemonic == "tlbip") {
4265 bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
4266 if (HasnXSQualifier) {
4267 Op = Op.drop_back(3);
4268 }
4269 const AArch64TLBIP::TLBIP *TLBIPorig = AArch64TLBIP::lookupTLBIPByName(Op);
4270 if (!TLBIPorig)
4271 return TokError("invalid operand for TLBIP instruction");
4272 const AArch64TLBIP::TLBIP TLBIP(
4273 TLBIPorig->Name, TLBIPorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
4274 TLBIPorig->NeedsReg, TLBIPorig->OptionalReg,
4275 HasnXSQualifier
4276 ? TLBIPorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
4277 : TLBIPorig->FeaturesRequired);
4278 if (!TLBIP.haveFeatures(getSTI().getFeatureBits())) {
4279 std::string Name =
4280 std::string(TLBIP.Name) + (HasnXSQualifier ? "nXS" : "");
4281 std::string Str("TLBIP " + Name + " requires: ");
4282 setRequiredFeatureString(TLBIP.getRequiredFeatures(), Str);
4283 return TokError(Str);
4284 }
4285 createSysAlias(TLBIP.Encoding, Operands, S);
4286 }
4287
4288 Lex(); // Eat operand.
4289
4290 if (parseComma())
4291 return true;
4292
4293 if (Tok.isNot(AsmToken::Identifier))
4294 return TokError("expected register identifier");
4295 auto Result = tryParseSyspXzrPair(Operands);
4296 if (Result.isNoMatch())
4297 Result = tryParseGPRSeqPair(Operands);
4298 if (!Result.isSuccess())
4299 return TokError("specified " + Mnemonic +
4300 " op requires a pair of registers");
4301
4302 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4303 return true;
4304
4305 return false;
4306}
4307
4308ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
4309 MCAsmParser &Parser = getParser();
4310 const AsmToken &Tok = getTok();
4311
4312 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
4313 return TokError("'csync' operand expected");
4314 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4315 // Immediate operand.
4316 const MCExpr *ImmVal;
4317 SMLoc ExprLoc = getLoc();
4318 AsmToken IntTok = Tok;
4319 if (getParser().parseExpression(ImmVal))
4320 return ParseStatus::Failure;
4321 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4322 if (!MCE)
4323 return Error(ExprLoc, "immediate value expected for barrier operand");
4324 int64_t Value = MCE->getValue();
4325 if (Mnemonic == "dsb" && Value > 15) {
4326 // This case is a no match here, but it might be matched by the nXS
4327 // variant. Deliberately not unlex the optional '#' as it is not necessary
4328 // to characterize an integer immediate.
4329 Parser.getLexer().UnLex(IntTok);
4330 return ParseStatus::NoMatch;
4331 }
4332 if (Value < 0 || Value > 15)
4333 return Error(ExprLoc, "barrier operand out of range");
4334 auto DB = AArch64DB::lookupDBByEncoding(Value);
4335 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
4336 ExprLoc, getContext(),
4337 false /*hasnXSModifier*/));
4338 return ParseStatus::Success;
4339 }
4340
4341 if (Tok.isNot(AsmToken::Identifier))
4342 return TokError("invalid operand for instruction");
4343
4344 StringRef Operand = Tok.getString();
4345 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4346 auto DB = AArch64DB::lookupDBByName(Operand);
4347 // The only valid named option for ISB is 'sy'
4348 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4349 return TokError("'sy' or #imm operand expected");
4350 // The only valid named option for TSB is 'csync'
4351 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4352 return TokError("'csync' operand expected");
4353 if (!DB && !TSB) {
4354 if (Mnemonic == "dsb") {
4355 // This case is a no match here, but it might be matched by the nXS
4356 // variant.
4357 return ParseStatus::NoMatch;
4358 }
4359 return TokError("invalid barrier option name");
4360 }
4361
4362 Operands.push_back(AArch64Operand::CreateBarrier(
4363 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
4364 getContext(), false /*hasnXSModifier*/));
4365 Lex(); // Consume the option
4366
4367 return ParseStatus::Success;
4368}
4369
4370ParseStatus
4371AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4372 const AsmToken &Tok = getTok();
4373
4374 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4375 if (Mnemonic != "dsb")
4376 return ParseStatus::Failure;
4377
4378 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4379 // Immediate operand.
4380 const MCExpr *ImmVal;
4381 SMLoc ExprLoc = getLoc();
4382 if (getParser().parseExpression(ImmVal))
4383 return ParseStatus::Failure;
4384 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4385 if (!MCE)
4386 return Error(ExprLoc, "immediate value expected for barrier operand");
4387 int64_t Value = MCE->getValue();
4388 // v8.7-A DSB in the nXS variant accepts only the following immediate
4389 // values: 16, 20, 24, 28.
4390 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4391 return Error(ExprLoc, "barrier operand out of range");
4392 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4393 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4394 ExprLoc, getContext(),
4395 true /*hasnXSModifier*/));
4396 return ParseStatus::Success;
4397 }
4398
4399 if (Tok.isNot(AsmToken::Identifier))
4400 return TokError("invalid operand for instruction");
4401
4402 StringRef Operand = Tok.getString();
4403 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4404
4405 if (!DB)
4406 return TokError("invalid barrier option name");
4407
4408 Operands.push_back(
4409 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4410 getContext(), true /*hasnXSModifier*/));
4411 Lex(); // Consume the option
4412
4413 return ParseStatus::Success;
4414}
4415
4416ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4417 const AsmToken &Tok = getTok();
4418
4419 if (Tok.isNot(AsmToken::Identifier))
4420 return ParseStatus::NoMatch;
4421
4422 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4423 return ParseStatus::NoMatch;
4424
4425 int MRSReg, MSRReg;
4426 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4427 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4428 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4429 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4430 } else
4431 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4432
4433 unsigned PStateImm = -1;
4434 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4435 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4436 PStateImm = PState15->Encoding;
4437 if (!PState15) {
4438 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4439 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4440 PStateImm = PState1->Encoding;
4441 }
4442
4443 Operands.push_back(
4444 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4445 PStateImm, getContext()));
4446 Lex(); // Eat identifier
4447
4448 return ParseStatus::Success;
4449}
4450
4451ParseStatus
4452AArch64AsmParser::tryParsePHintInstOperand(OperandVector &Operands) {
4453 SMLoc S = getLoc();
4454 const AsmToken &Tok = getTok();
4455 if (Tok.isNot(AsmToken::Identifier))
4456 return TokError("invalid operand for instruction");
4457
4459 if (!PH)
4460 return TokError("invalid operand for instruction");
4461
4462 Operands.push_back(AArch64Operand::CreatePHintInst(
4463 PH->Encoding, Tok.getString(), S, getContext()));
4464 Lex(); // Eat identifier token.
4465 return ParseStatus::Success;
4466}
4467
4468/// tryParseNeonVectorRegister - Parse a vector register operand.
4469bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4470 if (getTok().isNot(AsmToken::Identifier))
4471 return true;
4472
4473 SMLoc S = getLoc();
4474 // Check for a vector register specifier first.
4475 StringRef Kind;
4476 MCRegister Reg;
4477 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4478 if (!Res.isSuccess())
4479 return true;
4480
4481 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4482 if (!KindRes)
4483 return true;
4484
4485 unsigned ElementWidth = KindRes->second;
4486 Operands.push_back(
4487 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4488 S, getLoc(), getContext()));
4489
4490 // If there was an explicit qualifier, that goes on as a literal text
4491 // operand.
4492 if (!Kind.empty())
4493 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4494
4495 return tryParseVectorIndex(Operands).isFailure();
4496}
4497
4498ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4499 SMLoc SIdx = getLoc();
4500 if (parseOptionalToken(AsmToken::LBrac)) {
4501 const MCExpr *ImmVal;
4502 if (getParser().parseExpression(ImmVal))
4503 return ParseStatus::NoMatch;
4504 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4505 if (!MCE)
4506 return TokError("immediate value expected for vector index");
4507
4508 SMLoc E = getLoc();
4509
4510 if (parseToken(AsmToken::RBrac, "']' expected"))
4511 return ParseStatus::Failure;
4512
4513 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4514 E, getContext()));
4515 return ParseStatus::Success;
4516 }
4517
4518 return ParseStatus::NoMatch;
4519}
4520
4521// tryParseVectorRegister - Try to parse a vector register name with
4522// optional kind specifier. If it is a register specifier, eat the token
4523// and return it.
4524ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4525 StringRef &Kind,
4526 RegKind MatchKind) {
4527 const AsmToken &Tok = getTok();
4528
4529 if (Tok.isNot(AsmToken::Identifier))
4530 return ParseStatus::NoMatch;
4531
4532 StringRef Name = Tok.getString();
4533 // If there is a kind specifier, it's separated from the register name by
4534 // a '.'.
4535 size_t Start = 0, Next = Name.find('.');
4536 StringRef Head = Name.slice(Start, Next);
4537 MCRegister RegNum = matchRegisterNameAlias(Head, MatchKind);
4538
4539 if (RegNum) {
4540 if (Next != StringRef::npos) {
4541 Kind = Name.substr(Next);
4542 if (!isValidVectorKind(Kind, MatchKind))
4543 return TokError("invalid vector kind qualifier");
4544 }
4545 Lex(); // Eat the register token.
4546
4547 Reg = RegNum;
4548 return ParseStatus::Success;
4549 }
4550
4551 return ParseStatus::NoMatch;
4552}
4553
4554ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4555 OperandVector &Operands) {
4556 ParseStatus Status =
4557 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4558 if (!Status.isSuccess())
4559 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4560 return Status;
4561}
4562
4563/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4564template <RegKind RK>
4565ParseStatus
4566AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4567 // Check for a SVE predicate register specifier first.
4568 const SMLoc S = getLoc();
4569 StringRef Kind;
4570 MCRegister RegNum;
4571 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4572 if (!Res.isSuccess())
4573 return Res;
4574
4575 const auto &KindRes = parseVectorKind(Kind, RK);
4576 if (!KindRes)
4577 return ParseStatus::NoMatch;
4578
4579 unsigned ElementWidth = KindRes->second;
4580 Operands.push_back(AArch64Operand::CreateVectorReg(
4581 RegNum, RK, ElementWidth, S,
4582 getLoc(), getContext()));
4583
4584 if (getLexer().is(AsmToken::LBrac)) {
4585 if (RK == RegKind::SVEPredicateAsCounter) {
4586 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4587 if (ResIndex.isSuccess())
4588 return ParseStatus::Success;
4589 } else {
4590 // Indexed predicate, there's no comma so try parse the next operand
4591 // immediately.
4592 if (parseOperand(Operands, false, false))
4593 return ParseStatus::NoMatch;
4594 }
4595 }
4596
4597 // Not all predicates are followed by a '/m' or '/z'.
4598 if (getTok().isNot(AsmToken::Slash))
4599 return ParseStatus::Success;
4600
4601 // But when they do they shouldn't have an element type suffix.
4602 if (!Kind.empty())
4603 return Error(S, "not expecting size suffix");
4604
4605 // Add a literal slash as operand
4606 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4607
4608 Lex(); // Eat the slash.
4609
4610 // Zeroing or merging?
4611 auto Pred = getTok().getString().lower();
4612 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4613 return Error(getLoc(), "expecting 'z' predication");
4614
4615 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4616 return Error(getLoc(), "expecting 'm' or 'z' predication");
4617
4618 // Add zero/merge token.
4619 const char *ZM = Pred == "z" ? "z" : "m";
4620 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4621
4622 Lex(); // Eat zero/merge token.
4623 return ParseStatus::Success;
4624}
4625
4626/// parseRegister - Parse a register operand.
4627bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4628 // Try for a Neon vector register.
4629 if (!tryParseNeonVectorRegister(Operands))
4630 return false;
4631
4632 if (tryParseZTOperand(Operands).isSuccess())
4633 return false;
4634
4635 // Otherwise try for a scalar register.
4636 if (tryParseGPROperand<false>(Operands).isSuccess())
4637 return false;
4638
4639 return true;
4640}
4641
4642bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4643 bool HasELFModifier = false;
4644 AArch64::Specifier RefKind;
4645 SMLoc Loc = getLexer().getLoc();
4646 if (parseOptionalToken(AsmToken::Colon)) {
4647 HasELFModifier = true;
4648
4649 if (getTok().isNot(AsmToken::Identifier))
4650 return TokError("expect relocation specifier in operand after ':'");
4651
4652 std::string LowerCase = getTok().getIdentifier().lower();
4653 RefKind = StringSwitch<AArch64::Specifier>(LowerCase)
4654 .Case("lo12", AArch64::S_LO12)
4655 .Case("abs_g3", AArch64::S_ABS_G3)
4656 .Case("abs_g2", AArch64::S_ABS_G2)
4657 .Case("abs_g2_s", AArch64::S_ABS_G2_S)
4658 .Case("abs_g2_nc", AArch64::S_ABS_G2_NC)
4659 .Case("abs_g1", AArch64::S_ABS_G1)
4660 .Case("abs_g1_s", AArch64::S_ABS_G1_S)
4661 .Case("abs_g1_nc", AArch64::S_ABS_G1_NC)
4662 .Case("abs_g0", AArch64::S_ABS_G0)
4663 .Case("abs_g0_s", AArch64::S_ABS_G0_S)
4664 .Case("abs_g0_nc", AArch64::S_ABS_G0_NC)
4665 .Case("prel_g3", AArch64::S_PREL_G3)
4666 .Case("prel_g2", AArch64::S_PREL_G2)
4667 .Case("prel_g2_nc", AArch64::S_PREL_G2_NC)
4668 .Case("prel_g1", AArch64::S_PREL_G1)
4669 .Case("prel_g1_nc", AArch64::S_PREL_G1_NC)
4670 .Case("prel_g0", AArch64::S_PREL_G0)
4671 .Case("prel_g0_nc", AArch64::S_PREL_G0_NC)
4672 .Case("dtprel_g2", AArch64::S_DTPREL_G2)
4673 .Case("dtprel_g1", AArch64::S_DTPREL_G1)
4674 .Case("dtprel_g1_nc", AArch64::S_DTPREL_G1_NC)
4675 .Case("dtprel_g0", AArch64::S_DTPREL_G0)
4676 .Case("dtprel_g0_nc", AArch64::S_DTPREL_G0_NC)
4677 .Case("dtprel_hi12", AArch64::S_DTPREL_HI12)
4678 .Case("dtprel_lo12", AArch64::S_DTPREL_LO12)
4679 .Case("dtprel_lo12_nc", AArch64::S_DTPREL_LO12_NC)
4680 .Case("pg_hi21_nc", AArch64::S_ABS_PAGE_NC)
4681 .Case("tprel_g2", AArch64::S_TPREL_G2)
4682 .Case("tprel_g1", AArch64::S_TPREL_G1)
4683 .Case("tprel_g1_nc", AArch64::S_TPREL_G1_NC)
4684 .Case("tprel_g0", AArch64::S_TPREL_G0)
4685 .Case("tprel_g0_nc", AArch64::S_TPREL_G0_NC)
4686 .Case("tprel_hi12", AArch64::S_TPREL_HI12)
4687 .Case("tprel_lo12", AArch64::S_TPREL_LO12)
4688 .Case("tprel_lo12_nc", AArch64::S_TPREL_LO12_NC)
4689 .Case("tlsdesc_lo12", AArch64::S_TLSDESC_LO12)
4690 .Case("tlsdesc_auth_lo12", AArch64::S_TLSDESC_AUTH_LO12)
4691 .Case("got", AArch64::S_GOT_PAGE)
4692 .Case("gotpage_lo15", AArch64::S_GOT_PAGE_LO15)
4693 .Case("got_lo12", AArch64::S_GOT_LO12)
4694 .Case("got_auth", AArch64::S_GOT_AUTH_PAGE)
4695 .Case("got_auth_lo12", AArch64::S_GOT_AUTH_LO12)
4696 .Case("gottprel", AArch64::S_GOTTPREL_PAGE)
4697 .Case("gottprel_lo12", AArch64::S_GOTTPREL_LO12_NC)
4698 .Case("gottprel_g1", AArch64::S_GOTTPREL_G1)
4699 .Case("gottprel_g0_nc", AArch64::S_GOTTPREL_G0_NC)
4700 .Case("tlsdesc", AArch64::S_TLSDESC_PAGE)
4701 .Case("tlsdesc_auth", AArch64::S_TLSDESC_AUTH_PAGE)
4702 .Case("secrel_lo12", AArch64::S_SECREL_LO12)
4703 .Case("secrel_hi12", AArch64::S_SECREL_HI12)
4704 .Default(AArch64::S_INVALID);
4705
4706 if (RefKind == AArch64::S_INVALID)
4707 return TokError("expect relocation specifier in operand after ':'");
4708
4709 Lex(); // Eat identifier
4710
4711 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4712 return true;
4713 }
4714
4715 if (getParser().parseExpression(ImmVal))
4716 return true;
4717
4718 if (HasELFModifier)
4719 ImmVal = MCSpecifierExpr::create(ImmVal, RefKind, getContext(), Loc);
4720
4721 SMLoc EndLoc;
4722 if (getContext().getAsmInfo()->hasSubsectionsViaSymbols()) {
4723 if (getParser().parseAtSpecifier(ImmVal, EndLoc))
4724 return true;
4725 const MCExpr *Term;
4726 MCBinaryExpr::Opcode Opcode;
4727 if (parseOptionalToken(AsmToken::Plus))
4728 Opcode = MCBinaryExpr::Add;
4729 else if (parseOptionalToken(AsmToken::Minus))
4730 Opcode = MCBinaryExpr::Sub;
4731 else
4732 return false;
4733 if (getParser().parsePrimaryExpr(Term, EndLoc))
4734 return true;
4735 ImmVal = MCBinaryExpr::create(Opcode, ImmVal, Term, getContext());
4736 }
4737
4738 return false;
4739}
4740
4741ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4742 if (getTok().isNot(AsmToken::LCurly))
4743 return ParseStatus::NoMatch;
4744
4745 auto ParseMatrixTile = [this](unsigned &Reg,
4746 unsigned &ElementWidth) -> ParseStatus {
4747 StringRef Name = getTok().getString();
4748 size_t DotPosition = Name.find('.');
4749 if (DotPosition == StringRef::npos)
4750 return ParseStatus::NoMatch;
4751
4752 unsigned RegNum = matchMatrixTileListRegName(Name);
4753 if (!RegNum)
4754 return ParseStatus::NoMatch;
4755
4756 StringRef Tail = Name.drop_front(DotPosition);
4757 const std::optional<std::pair<int, int>> &KindRes =
4758 parseVectorKind(Tail, RegKind::Matrix);
4759 if (!KindRes)
4760 return TokError(
4761 "Expected the register to be followed by element width suffix");
4762 ElementWidth = KindRes->second;
4763 Reg = RegNum;
4764 Lex(); // Eat the register.
4765 return ParseStatus::Success;
4766 };
4767
4768 SMLoc S = getLoc();
4769 auto LCurly = getTok();
4770 Lex(); // Eat left bracket token.
4771
4772 // Empty matrix list
4773 if (parseOptionalToken(AsmToken::RCurly)) {
4774 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4775 /*RegMask=*/0, S, getLoc(), getContext()));
4776 return ParseStatus::Success;
4777 }
4778
4779 // Try parse {za} alias early
4780 if (getTok().getString().equals_insensitive("za")) {
4781 Lex(); // Eat 'za'
4782
4783 if (parseToken(AsmToken::RCurly, "'}' expected"))
4784 return ParseStatus::Failure;
4785
4786 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4787 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4788 return ParseStatus::Success;
4789 }
4790
4791 SMLoc TileLoc = getLoc();
4792
4793 unsigned FirstReg, ElementWidth;
4794 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4795 if (!ParseRes.isSuccess()) {
4796 getLexer().UnLex(LCurly);
4797 return ParseRes;
4798 }
4799
4800 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4801
4802 unsigned PrevReg = FirstReg;
4803
4804 SmallSet<unsigned, 8> DRegs;
4805 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4806
4807 SmallSet<unsigned, 8> SeenRegs;
4808 SeenRegs.insert(FirstReg);
4809
4810 while (parseOptionalToken(AsmToken::Comma)) {
4811 TileLoc = getLoc();
4812 unsigned Reg, NextElementWidth;
4813 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4814 if (!ParseRes.isSuccess())
4815 return ParseRes;
4816
4817 // Element size must match on all regs in the list.
4818 if (ElementWidth != NextElementWidth)
4819 return Error(TileLoc, "mismatched register size suffix");
4820
4821 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4822 Warning(TileLoc, "tile list not in ascending order");
4823
4824 if (SeenRegs.contains(Reg))
4825 Warning(TileLoc, "duplicate tile in list");
4826 else {
4827 SeenRegs.insert(Reg);
4828 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4829 }
4830
4831 PrevReg = Reg;
4832 }
4833
4834 if (parseToken(AsmToken::RCurly, "'}' expected"))
4835 return ParseStatus::Failure;
4836
4837 unsigned RegMask = 0;
4838 for (auto Reg : DRegs)
4839 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4840 RI->getEncodingValue(AArch64::ZAD0));
4841 Operands.push_back(
4842 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4843
4844 return ParseStatus::Success;
4845}
4846
4847template <RegKind VectorKind>
4848ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4849 bool ExpectMatch) {
4850 MCAsmParser &Parser = getParser();
4851 if (!getTok().is(AsmToken::LCurly))
4852 return ParseStatus::NoMatch;
4853
4854 // Wrapper around parse function
4855 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4856 bool NoMatchIsError) -> ParseStatus {
4857 auto RegTok = getTok();
4858 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4859 if (ParseRes.isSuccess()) {
4860 if (parseVectorKind(Kind, VectorKind))
4861 return ParseRes;
4862 llvm_unreachable("Expected a valid vector kind");
4863 }
4864
4865 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4866 RegTok.getString().equals_insensitive("zt0"))
4867 return ParseStatus::NoMatch;
4868
4869 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4870 (ParseRes.isNoMatch() && NoMatchIsError &&
4871 !RegTok.getString().starts_with_insensitive("za")))
4872 return Error(Loc, "vector register expected");
4873
4874 return ParseStatus::NoMatch;
4875 };
4876
4877 unsigned NumRegs = getNumRegsForRegKind(VectorKind);
4878 SMLoc S = getLoc();
4879 auto LCurly = getTok();
4880 Lex(); // Eat left bracket token.
4881
4882 StringRef Kind;
4883 MCRegister FirstReg;
4884 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4885
4886 // Put back the original left bracket if there was no match, so that
4887 // different types of list-operands can be matched (e.g. SVE, Neon).
4888 if (ParseRes.isNoMatch())
4889 Parser.getLexer().UnLex(LCurly);
4890
4891 if (!ParseRes.isSuccess())
4892 return ParseRes;
4893
4894 MCRegister PrevReg = FirstReg;
4895 unsigned Count = 1;
4896
4897 unsigned Stride = 1;
4898 if (parseOptionalToken(AsmToken::Minus)) {
4899 SMLoc Loc = getLoc();
4900 StringRef NextKind;
4901
4902 MCRegister Reg;
4903 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4904 if (!ParseRes.isSuccess())
4905 return ParseRes;
4906
4907 // Any Kind suffices must match on all regs in the list.
4908 if (Kind != NextKind)
4909 return Error(Loc, "mismatched register size suffix");
4910
4911 unsigned Space =
4912 (PrevReg < Reg) ? (Reg - PrevReg) : (NumRegs - (PrevReg - Reg));
4913
4914 if (Space == 0 || Space > 3)
4915 return Error(Loc, "invalid number of vectors");
4916
4917 Count += Space;
4918 }
4919 else {
4920 bool HasCalculatedStride = false;
4921 while (parseOptionalToken(AsmToken::Comma)) {
4922 SMLoc Loc = getLoc();
4923 StringRef NextKind;
4924 MCRegister Reg;
4925 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4926 if (!ParseRes.isSuccess())
4927 return ParseRes;
4928
4929 // Any Kind suffices must match on all regs in the list.
4930 if (Kind != NextKind)
4931 return Error(Loc, "mismatched register size suffix");
4932
4933 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4934 unsigned PrevRegVal =
4935 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4936 if (!HasCalculatedStride) {
4937 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4938 : (NumRegs - (PrevRegVal - RegVal));
4939 HasCalculatedStride = true;
4940 }
4941
4942 // Register must be incremental (with a wraparound at last register).
4943 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4944 return Error(Loc, "registers must have the same sequential stride");
4945
4946 PrevReg = Reg;
4947 ++Count;
4948 }
4949 }
4950
4951 if (parseToken(AsmToken::RCurly, "'}' expected"))
4952 return ParseStatus::Failure;
4953
4954 if (Count > 4)
4955 return Error(S, "invalid number of vectors");
4956
4957 unsigned NumElements = 0;
4958 unsigned ElementWidth = 0;
4959 if (!Kind.empty()) {
4960 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4961 std::tie(NumElements, ElementWidth) = *VK;
4962 }
4963
4964 Operands.push_back(AArch64Operand::CreateVectorList(
4965 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4966 getLoc(), getContext()));
4967
4968 if (getTok().is(AsmToken::LBrac)) {
4969 ParseStatus Res = tryParseVectorIndex(Operands);
4970 if (Res.isFailure())
4971 return ParseStatus::Failure;
4972 return ParseStatus::Success;
4973 }
4974
4975 return ParseStatus::Success;
4976}
4977
4978/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4979bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4980 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4981 if (!ParseRes.isSuccess())
4982 return true;
4983
4984 return tryParseVectorIndex(Operands).isFailure();
4985}
4986
4987ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4988 SMLoc StartLoc = getLoc();
4989
4990 MCRegister RegNum;
4991 ParseStatus Res = tryParseScalarRegister(RegNum);
4992 if (!Res.isSuccess())
4993 return Res;
4994
4995 if (!parseOptionalToken(AsmToken::Comma)) {
4996 Operands.push_back(AArch64Operand::CreateReg(
4997 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4998 return ParseStatus::Success;
4999 }
5000
5001 parseOptionalToken(AsmToken::Hash);
5002
5003 if (getTok().isNot(AsmToken::Integer))
5004 return Error(getLoc(), "index must be absent or #0");
5005
5006 const MCExpr *ImmVal;
5007 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
5008 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
5009 return Error(getLoc(), "index must be absent or #0");
5010
5011 Operands.push_back(AArch64Operand::CreateReg(
5012 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
5013 return ParseStatus::Success;
5014}
5015
5016ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
5017 SMLoc StartLoc = getLoc();
5018 const AsmToken &Tok = getTok();
5019 std::string Name = Tok.getString().lower();
5020
5021 MCRegister Reg = matchRegisterNameAlias(Name, RegKind::LookupTable);
5022
5023 if (!Reg)
5024 return ParseStatus::NoMatch;
5025
5026 Operands.push_back(AArch64Operand::CreateReg(
5027 Reg, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
5028 Lex(); // Eat register.
5029
5030 // Check if register is followed by an index
5031 if (parseOptionalToken(AsmToken::LBrac)) {
5032 Operands.push_back(
5033 AArch64Operand::CreateToken("[", getLoc(), getContext()));
5034 const MCExpr *ImmVal;
5035 if (getParser().parseExpression(ImmVal))
5036 return ParseStatus::NoMatch;
5037 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
5038 if (!MCE)
5039 return TokError("immediate value expected for vector index");
5040 Operands.push_back(AArch64Operand::CreateImm(
5041 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
5042 getLoc(), getContext()));
5043 if (parseOptionalToken(AsmToken::Comma))
5044 if (parseOptionalMulOperand(Operands))
5045 return ParseStatus::Failure;
5046 if (parseToken(AsmToken::RBrac, "']' expected"))
5047 return ParseStatus::Failure;
5048 Operands.push_back(
5049 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5050 }
5051 return ParseStatus::Success;
5052}
5053
5054template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
5055ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
5056 SMLoc StartLoc = getLoc();
5057
5058 MCRegister RegNum;
5059 ParseStatus Res = tryParseScalarRegister(RegNum);
5060 if (!Res.isSuccess())
5061 return Res;
5062
5063 // No shift/extend is the default.
5064 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
5065 Operands.push_back(AArch64Operand::CreateReg(
5066 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
5067 return ParseStatus::Success;
5068 }
5069
5070 // Eat the comma
5071 Lex();
5072
5073 // Match the shift
5075 Res = tryParseOptionalShiftExtend(ExtOpnd);
5076 if (!Res.isSuccess())
5077 return Res;
5078
5079 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
5080 Operands.push_back(AArch64Operand::CreateReg(
5081 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
5082 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
5083 Ext->hasShiftExtendAmount()));
5084
5085 return ParseStatus::Success;
5086}
5087
5088bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
5089 MCAsmParser &Parser = getParser();
5090
5091 // Some SVE instructions have a decoration after the immediate, i.e.
5092 // "mul vl". We parse them here and add tokens, which must be present in the
5093 // asm string in the tablegen instruction.
5094 bool NextIsVL =
5095 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
5096 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
5097 if (!getTok().getString().equals_insensitive("mul") ||
5098 !(NextIsVL || NextIsHash))
5099 return true;
5100
5101 Operands.push_back(
5102 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
5103 Lex(); // Eat the "mul"
5104
5105 if (NextIsVL) {
5106 Operands.push_back(
5107 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
5108 Lex(); // Eat the "vl"
5109 return false;
5110 }
5111
5112 if (NextIsHash) {
5113 Lex(); // Eat the #
5114 SMLoc S = getLoc();
5115
5116 // Parse immediate operand.
5117 const MCExpr *ImmVal;
5118 if (!Parser.parseExpression(ImmVal))
5119 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
5120 Operands.push_back(AArch64Operand::CreateImm(
5121 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
5122 getContext()));
5123 return false;
5124 }
5125 }
5126
5127 return Error(getLoc(), "expected 'vl' or '#<imm>'");
5128}
5129
5130bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
5131 StringRef &VecGroup) {
5132 MCAsmParser &Parser = getParser();
5133 auto Tok = Parser.getTok();
5134 if (Tok.isNot(AsmToken::Identifier))
5135 return true;
5136
5137 StringRef VG = StringSwitch<StringRef>(Tok.getString().lower())
5138 .Case("vgx2", "vgx2")
5139 .Case("vgx4", "vgx4")
5140 .Default("");
5141
5142 if (VG.empty())
5143 return true;
5144
5145 VecGroup = VG;
5146 Parser.Lex(); // Eat vgx[2|4]
5147 return false;
5148}
5149
5150bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
5151 auto Tok = getTok();
5152 if (Tok.isNot(AsmToken::Identifier))
5153 return true;
5154
5155 auto Keyword = Tok.getString();
5156 Keyword = StringSwitch<StringRef>(Keyword.lower())
5157 .Case("sm", "sm")
5158 .Case("za", "za")
5159 .Default(Keyword);
5160 Operands.push_back(
5161 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
5162
5163 Lex();
5164 return false;
5165}
5166
5167/// parseOperand - Parse a arm instruction operand. For now this parses the
5168/// operand regardless of the mnemonic.
5169bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
5170 bool invertCondCode) {
5171 MCAsmParser &Parser = getParser();
5172
5173 ParseStatus ResTy =
5174 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
5175
5176 // Check if the current operand has a custom associated parser, if so, try to
5177 // custom parse the operand, or fallback to the general approach.
5178 if (ResTy.isSuccess())
5179 return false;
5180 // If there wasn't a custom match, try the generic matcher below. Otherwise,
5181 // there was a match, but an error occurred, in which case, just return that
5182 // the operand parsing failed.
5183 if (ResTy.isFailure())
5184 return true;
5185
5186 // Nothing custom, so do general case parsing.
5187 SMLoc S, E;
5188 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
5189 if (parseOptionalToken(AsmToken::Comma)) {
5190 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
5191 if (!Res.isNoMatch())
5192 return Res.isFailure();
5193 getLexer().UnLex(SavedTok);
5194 }
5195 return false;
5196 };
5197 switch (getLexer().getKind()) {
5198 default: {
5199 SMLoc S = getLoc();
5200 const MCExpr *Expr;
5201 if (parseSymbolicImmVal(Expr))
5202 return Error(S, "invalid operand");
5203
5204 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5205 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
5206 return parseOptionalShiftExtend(getTok());
5207 }
5208 case AsmToken::LBrac: {
5209 Operands.push_back(
5210 AArch64Operand::CreateToken("[", getLoc(), getContext()));
5211 Lex(); // Eat '['
5212
5213 // There's no comma after a '[', so we can parse the next operand
5214 // immediately.
5215 return parseOperand(Operands, false, false);
5216 }
5217 case AsmToken::LCurly: {
5218 if (!parseNeonVectorList(Operands))
5219 return false;
5220
5221 Operands.push_back(
5222 AArch64Operand::CreateToken("{", getLoc(), getContext()));
5223 Lex(); // Eat '{'
5224
5225 // There's no comma after a '{', so we can parse the next operand
5226 // immediately.
5227 return parseOperand(Operands, false, false);
5228 }
5229 case AsmToken::Identifier: {
5230 // See if this is a "VG" decoration used by SME instructions.
5231 StringRef VecGroup;
5232 if (!parseOptionalVGOperand(Operands, VecGroup)) {
5233 Operands.push_back(
5234 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
5235 return false;
5236 }
5237 // If we're expecting a Condition Code operand, then just parse that.
5238 if (isCondCode)
5239 return parseCondCode(Operands, invertCondCode);
5240
5241 // If it's a register name, parse it.
5242 if (!parseRegister(Operands)) {
5243 // Parse an optional shift/extend modifier.
5244 AsmToken SavedTok = getTok();
5245 if (parseOptionalToken(AsmToken::Comma)) {
5246 // The operand after the register may be a label (e.g. ADR/ADRP). Check
5247 // such cases and don't report an error when <label> happens to match a
5248 // shift/extend modifier.
5249 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
5250 /*ParseForAllFeatures=*/true);
5251 if (!Res.isNoMatch())
5252 return Res.isFailure();
5253 Res = tryParseOptionalShiftExtend(Operands);
5254 if (!Res.isNoMatch())
5255 return Res.isFailure();
5256 getLexer().UnLex(SavedTok);
5257 }
5258 return false;
5259 }
5260
5261 // See if this is a "mul vl" decoration or "mul #<int>" operand used
5262 // by SVE instructions.
5263 if (!parseOptionalMulOperand(Operands))
5264 return false;
5265
5266 // If this is a two-word mnemonic, parse its special keyword
5267 // operand as an identifier.
5268 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
5269 Mnemonic == "gcsb")
5270 return parseKeywordOperand(Operands);
5271
5272 // This was not a register so parse other operands that start with an
5273 // identifier (like labels) as expressions and create them as immediates.
5274 const MCExpr *IdVal, *Term;
5275 S = getLoc();
5276 if (getParser().parseExpression(IdVal))
5277 return true;
5278 if (getParser().parseAtSpecifier(IdVal, E))
5279 return true;
5280 std::optional<MCBinaryExpr::Opcode> Opcode;
5281 if (parseOptionalToken(AsmToken::Plus))
5282 Opcode = MCBinaryExpr::Add;
5283 else if (parseOptionalToken(AsmToken::Minus))
5284 Opcode = MCBinaryExpr::Sub;
5285 if (Opcode) {
5286 if (getParser().parsePrimaryExpr(Term, E))
5287 return true;
5288 IdVal = MCBinaryExpr::create(*Opcode, IdVal, Term, getContext());
5289 }
5290 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
5291
5292 // Parse an optional shift/extend modifier.
5293 return parseOptionalShiftExtend(getTok());
5294 }
5295 case AsmToken::Integer:
5296 case AsmToken::Real:
5297 case AsmToken::Hash: {
5298 // #42 -> immediate.
5299 S = getLoc();
5300
5301 parseOptionalToken(AsmToken::Hash);
5302
5303 // Parse a negative sign
5304 bool isNegative = false;
5305 if (getTok().is(AsmToken::Minus)) {
5306 isNegative = true;
5307 // We need to consume this token only when we have a Real, otherwise
5308 // we let parseSymbolicImmVal take care of it
5309 if (Parser.getLexer().peekTok().is(AsmToken::Real))
5310 Lex();
5311 }
5312
5313 // The only Real that should come through here is a literal #0.0 for
5314 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
5315 // so convert the value.
5316 const AsmToken &Tok = getTok();
5317 if (Tok.is(AsmToken::Real)) {
5318 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
5319 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5320 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
5321 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
5322 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
5323 return TokError("unexpected floating point literal");
5324 else if (IntVal != 0 || isNegative)
5325 return TokError("expected floating-point constant #0.0");
5326 Lex(); // Eat the token.
5327
5328 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
5329 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
5330 return false;
5331 }
5332
5333 const MCExpr *ImmVal;
5334 if (parseSymbolicImmVal(ImmVal))
5335 return true;
5336
5337 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5338 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
5339
5340 // Parse an optional shift/extend modifier.
5341 return parseOptionalShiftExtend(Tok);
5342 }
5343 case AsmToken::Equal: {
5344 SMLoc Loc = getLoc();
5345 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5346 return TokError("unexpected token in operand");
5347 Lex(); // Eat '='
5348 const MCExpr *SubExprVal;
5349 if (getParser().parseExpression(SubExprVal))
5350 return true;
5351
5352 if (Operands.size() < 2 ||
5353 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
5354 return Error(Loc, "Only valid when first operand is register");
5355
5356 bool IsXReg =
5357 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5358 Operands[1]->getReg());
5359
5360 MCContext& Ctx = getContext();
5361 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
5362 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
5363 if (isa<MCConstantExpr>(SubExprVal)) {
5364 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
5365 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
5366 while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) {
5367 ShiftAmt += 16;
5368 Imm >>= 16;
5369 }
5370 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
5371 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
5372 Operands.push_back(AArch64Operand::CreateImm(
5373 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
5374 if (ShiftAmt)
5375 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
5376 ShiftAmt, true, S, E, Ctx));
5377 return false;
5378 }
5379 APInt Simm = APInt(64, Imm << ShiftAmt);
5380 // check if the immediate is an unsigned or signed 32-bit int for W regs
5381 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
5382 return Error(Loc, "Immediate too large for register");
5383 }
5384 // If it is a label or an imm that cannot fit in a movz, put it into CP.
5385 const MCExpr *CPLoc =
5386 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
5387 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
5388 return false;
5389 }
5390 }
5391}
5392
5393bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
5394 const MCExpr *Expr = nullptr;
5395 SMLoc L = getLoc();
5396 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5397 return true;
5398 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5399 if (check(!Value, L, "expected constant expression"))
5400 return true;
5401 Out = Value->getValue();
5402 return false;
5403}
5404
5405bool AArch64AsmParser::parseComma() {
5406 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
5407 return true;
5408 // Eat the comma
5409 Lex();
5410 return false;
5411}
5412
5413bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5414 unsigned First, unsigned Last) {
5415 MCRegister Reg;
5416 SMLoc Start, End;
5417 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register"))
5418 return true;
5419
5420 // Special handling for FP and LR; they aren't linearly after x28 in
5421 // the registers enum.
5422 unsigned RangeEnd = Last;
5423 if (Base == AArch64::X0) {
5424 if (Last == AArch64::FP) {
5425 RangeEnd = AArch64::X28;
5426 if (Reg == AArch64::FP) {
5427 Out = 29;
5428 return false;
5429 }
5430 }
5431 if (Last == AArch64::LR) {
5432 RangeEnd = AArch64::X28;
5433 if (Reg == AArch64::FP) {
5434 Out = 29;
5435 return false;
5436 } else if (Reg == AArch64::LR) {
5437 Out = 30;
5438 return false;
5439 }
5440 }
5441 }
5442
5443 if (check(Reg < First || Reg > RangeEnd, Start,
5444 Twine("expected register in range ") +
5447 return true;
5448 Out = Reg - Base;
5449 return false;
5450}
5451
5452bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5453 const MCParsedAsmOperand &Op2) const {
5454 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5455 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5456
5457 if (AOp1.isVectorList() && AOp2.isVectorList())
5458 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5459 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5460 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5461
5462 if (!AOp1.isReg() || !AOp2.isReg())
5463 return false;
5464
5465 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5466 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5467 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5468
5469 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5470 "Testing equality of non-scalar registers not supported");
5471
5472 // Check if a registers match their sub/super register classes.
5473 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5474 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
5475 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5476 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
5477 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5478 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
5479 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5480 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
5481
5482 return false;
5483}
5484
5485/// Parse an AArch64 instruction mnemonic followed by its operands.
5486bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &Info,
5487 StringRef Name, SMLoc NameLoc,
5488 OperandVector &Operands) {
5489 Name = StringSwitch<StringRef>(Name.lower())
5490 .Case("beq", "b.eq")
5491 .Case("bne", "b.ne")
5492 .Case("bhs", "b.hs")
5493 .Case("bcs", "b.cs")
5494 .Case("blo", "b.lo")
5495 .Case("bcc", "b.cc")
5496 .Case("bmi", "b.mi")
5497 .Case("bpl", "b.pl")
5498 .Case("bvs", "b.vs")
5499 .Case("bvc", "b.vc")
5500 .Case("bhi", "b.hi")
5501 .Case("bls", "b.ls")
5502 .Case("bge", "b.ge")
5503 .Case("blt", "b.lt")
5504 .Case("bgt", "b.gt")
5505 .Case("ble", "b.le")
5506 .Case("bal", "b.al")
5507 .Case("bnv", "b.nv")
5508 .Default(Name);
5509
5510 // First check for the AArch64-specific .req directive.
5511 if (getTok().is(AsmToken::Identifier) &&
5512 getTok().getIdentifier().lower() == ".req") {
5513 parseDirectiveReq(Name, NameLoc);
5514 // We always return 'error' for this, as we're done with this
5515 // statement and don't need to match the 'instruction."
5516 return true;
5517 }
5518
5519 // Create the leading tokens for the mnemonic, split by '.' characters.
5520 size_t Start = 0, Next = Name.find('.');
5521 StringRef Head = Name.slice(Start, Next);
5522
5523 // IC, DC, AT, TLBI, MLBI, PLBI, GIC{R}, GSB and Prediction invalidation
5524 // instructions are aliases for the SYS instruction.
5525 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5526 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp" ||
5527 Head == "mlbi" || Head == "plbi" || Head == "gic" || Head == "gsb")
5528 return parseSysAlias(Head, NameLoc, Operands);
5529
5530 // GICR instructions are aliases for the SYSL instruction.
5531 if (Head == "gicr")
5532 return parseSyslAlias(Head, NameLoc, Operands);
5533
5534 // TLBIP instructions are aliases for the SYSP instruction.
5535 if (Head == "tlbip")
5536 return parseSyspAlias(Head, NameLoc, Operands);
5537
5538 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
5539 Mnemonic = Head;
5540
5541 // Handle condition codes for a branch mnemonic
5542 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5543 Start = Next;
5544 Next = Name.find('.', Start + 1);
5545 Head = Name.slice(Start + 1, Next);
5546
5547 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5548 (Head.data() - Name.data()));
5549 std::string Suggestion;
5550 AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5551 if (CC == AArch64CC::Invalid) {
5552 std::string Msg = "invalid condition code";
5553 if (!Suggestion.empty())
5554 Msg += ", did you mean " + Suggestion + "?";
5555 return Error(SuffixLoc, Msg);
5556 }
5557 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5558 /*IsSuffix=*/true));
5559 Operands.push_back(
5560 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5561 }
5562
5563 // Add the remaining tokens in the mnemonic.
5564 while (Next != StringRef::npos) {
5565 Start = Next;
5566 Next = Name.find('.', Start + 1);
5567 Head = Name.slice(Start, Next);
5568 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5569 (Head.data() - Name.data()) + 1);
5570 Operands.push_back(AArch64Operand::CreateToken(
5571 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5572 }
5573
5574 // Conditional compare instructions have a Condition Code operand, which needs
5575 // to be parsed and an immediate operand created.
5576 bool condCodeFourthOperand =
5577 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5578 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5579 Head == "csinc" || Head == "csinv" || Head == "csneg");
5580
5581 // These instructions are aliases to some of the conditional select
5582 // instructions. However, the condition code is inverted in the aliased
5583 // instruction.
5584 //
5585 // FIXME: Is this the correct way to handle these? Or should the parser
5586 // generate the aliased instructions directly?
5587 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5588 bool condCodeThirdOperand =
5589 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5590
5591 // Read the remaining operands.
5592 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5593
5594 unsigned N = 1;
5595 do {
5596 // Parse and remember the operand.
5597 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5598 (N == 3 && condCodeThirdOperand) ||
5599 (N == 2 && condCodeSecondOperand),
5600 condCodeSecondOperand || condCodeThirdOperand)) {
5601 return true;
5602 }
5603
5604 // After successfully parsing some operands there are three special cases
5605 // to consider (i.e. notional operands not separated by commas). Two are
5606 // due to memory specifiers:
5607 // + An RBrac will end an address for load/store/prefetch
5608 // + An '!' will indicate a pre-indexed operation.
5609 //
5610 // And a further case is '}', which ends a group of tokens specifying the
5611 // SME accumulator array 'ZA' or tile vector, i.e.
5612 //
5613 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5614 //
5615 // It's someone else's responsibility to make sure these tokens are sane
5616 // in the given context!
5617
5618 if (parseOptionalToken(AsmToken::RBrac))
5619 Operands.push_back(
5620 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5621 if (parseOptionalToken(AsmToken::Exclaim))
5622 Operands.push_back(
5623 AArch64Operand::CreateToken("!", getLoc(), getContext()));
5624 if (parseOptionalToken(AsmToken::RCurly))
5625 Operands.push_back(
5626 AArch64Operand::CreateToken("}", getLoc(), getContext()));
5627
5628 ++N;
5629 } while (parseOptionalToken(AsmToken::Comma));
5630 }
5631
5632 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5633 return true;
5634
5635 return false;
5636}
5637
5638static inline bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg) {
5639 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5640 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5641 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5642 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5643 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5644 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5645 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5646}
5647
5648// FIXME: This entire function is a giant hack to provide us with decent
5649// operand range validation/diagnostics until TableGen/MC can be extended
5650// to support autogeneration of this kind of validation.
5651bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5652 SmallVectorImpl<SMLoc> &Loc) {
5653 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5654 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5655
5656 // A prefix only applies to the instruction following it. Here we extract
5657 // prefix information for the next instruction before validating the current
5658 // one so that in the case of failure we don't erroneously continue using the
5659 // current prefix.
5660 PrefixInfo Prefix = NextPrefix;
5661 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5662
5663 // Before validating the instruction in isolation we run through the rules
5664 // applicable when it follows a prefix instruction.
5665 // NOTE: brk & hlt can be prefixed but require no additional validation.
5666 if (Prefix.isActive() &&
5667 (Inst.getOpcode() != AArch64::BRK) &&
5668 (Inst.getOpcode() != AArch64::HLT)) {
5669
5670 // Prefixed instructions must have a destructive operand.
5673 return Error(IDLoc, "instruction is unpredictable when following a"
5674 " movprfx, suggest replacing movprfx with mov");
5675
5676 // Destination operands must match.
5677 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5678 return Error(Loc[0], "instruction is unpredictable when following a"
5679 " movprfx writing to a different destination");
5680
5681 // Destination operand must not be used in any other location.
5682 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5683 if (Inst.getOperand(i).isReg() &&
5684 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5685 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5686 return Error(Loc[0], "instruction is unpredictable when following a"
5687 " movprfx and destination also used as non-destructive"
5688 " source");
5689 }
5690
5691 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5692 if (Prefix.isPredicated()) {
5693 int PgIdx = -1;
5694
5695 // Find the instructions general predicate.
5696 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5697 if (Inst.getOperand(i).isReg() &&
5698 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5699 PgIdx = i;
5700 break;
5701 }
5702
5703 // Instruction must be predicated if the movprfx is predicated.
5704 if (PgIdx == -1 ||
5706 return Error(IDLoc, "instruction is unpredictable when following a"
5707 " predicated movprfx, suggest using unpredicated movprfx");
5708
5709 // Instruction must use same general predicate as the movprfx.
5710 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5711 return Error(IDLoc, "instruction is unpredictable when following a"
5712 " predicated movprfx using a different general predicate");
5713
5714 // Instruction element type must match the movprfx.
5715 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5716 return Error(IDLoc, "instruction is unpredictable when following a"
5717 " predicated movprfx with a different element size");
5718 }
5719 }
5720
5721 // On ARM64EC, only valid registers may be used. Warn against using
5722 // explicitly disallowed registers.
5723 if (IsWindowsArm64EC) {
5724 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
5725 if (Inst.getOperand(i).isReg()) {
5726 MCRegister Reg = Inst.getOperand(i).getReg();
5727 // At this point, vector registers are matched to their
5728 // appropriately sized alias.
5729 if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
5730 (Reg == AArch64::W14 || Reg == AArch64::X14) ||
5731 (Reg == AArch64::W23 || Reg == AArch64::X23) ||
5732 (Reg == AArch64::W24 || Reg == AArch64::X24) ||
5733 (Reg == AArch64::W28 || Reg == AArch64::X28) ||
5734 (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) ||
5735 (Reg >= AArch64::D16 && Reg <= AArch64::D31) ||
5736 (Reg >= AArch64::S16 && Reg <= AArch64::S31) ||
5737 (Reg >= AArch64::H16 && Reg <= AArch64::H31) ||
5738 (Reg >= AArch64::B16 && Reg <= AArch64::B31)) {
5739 Warning(IDLoc, "register " + Twine(RI->getName(Reg)) +
5740 " is disallowed on ARM64EC.");
5741 }
5742 }
5743 }
5744 }
5745
5746 // Check for indexed addressing modes w/ the base register being the
5747 // same as a destination/source register or pair load where
5748 // the Rt == Rt2. All of those are undefined behaviour.
5749 switch (Inst.getOpcode()) {
5750 case AArch64::LDPSWpre:
5751 case AArch64::LDPWpost:
5752 case AArch64::LDPWpre:
5753 case AArch64::LDPXpost:
5754 case AArch64::LDPXpre: {
5755 MCRegister Rt = Inst.getOperand(1).getReg();
5756 MCRegister Rt2 = Inst.getOperand(2).getReg();
5757 MCRegister Rn = Inst.getOperand(3).getReg();
5758 if (RI->isSubRegisterEq(Rn, Rt))
5759 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5760 "is also a destination");
5761 if (RI->isSubRegisterEq(Rn, Rt2))
5762 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5763 "is also a destination");
5764 [[fallthrough]];
5765 }
5766 case AArch64::LDR_ZA:
5767 case AArch64::STR_ZA: {
5768 if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() &&
5769 Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm())
5770 return Error(Loc[1],
5771 "unpredictable instruction, immediate and offset mismatch.");
5772 break;
5773 }
5774 case AArch64::LDPDi:
5775 case AArch64::LDPQi:
5776 case AArch64::LDPSi:
5777 case AArch64::LDPSWi:
5778 case AArch64::LDPWi:
5779 case AArch64::LDPXi: {
5780 MCRegister Rt = Inst.getOperand(0).getReg();
5781 MCRegister Rt2 = Inst.getOperand(1).getReg();
5782 if (Rt == Rt2)
5783 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5784 break;
5785 }
5786 case AArch64::LDPDpost:
5787 case AArch64::LDPDpre:
5788 case AArch64::LDPQpost:
5789 case AArch64::LDPQpre:
5790 case AArch64::LDPSpost:
5791 case AArch64::LDPSpre:
5792 case AArch64::LDPSWpost: {
5793 MCRegister Rt = Inst.getOperand(1).getReg();
5794 MCRegister Rt2 = Inst.getOperand(2).getReg();
5795 if (Rt == Rt2)
5796 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5797 break;
5798 }
5799 case AArch64::STPDpost:
5800 case AArch64::STPDpre:
5801 case AArch64::STPQpost:
5802 case AArch64::STPQpre:
5803 case AArch64::STPSpost:
5804 case AArch64::STPSpre:
5805 case AArch64::STPWpost:
5806 case AArch64::STPWpre:
5807 case AArch64::STPXpost:
5808 case AArch64::STPXpre: {
5809 MCRegister Rt = Inst.getOperand(1).getReg();
5810 MCRegister Rt2 = Inst.getOperand(2).getReg();
5811 MCRegister Rn = Inst.getOperand(3).getReg();
5812 if (RI->isSubRegisterEq(Rn, Rt))
5813 return Error(Loc[0], "unpredictable STP instruction, writeback base "
5814 "is also a source");
5815 if (RI->isSubRegisterEq(Rn, Rt2))
5816 return Error(Loc[1], "unpredictable STP instruction, writeback base "
5817 "is also a source");
5818 break;
5819 }
5820 case AArch64::LDRBBpre:
5821 case AArch64::LDRBpre:
5822 case AArch64::LDRHHpre:
5823 case AArch64::LDRHpre:
5824 case AArch64::LDRSBWpre:
5825 case AArch64::LDRSBXpre:
5826 case AArch64::LDRSHWpre:
5827 case AArch64::LDRSHXpre:
5828 case AArch64::LDRSWpre:
5829 case AArch64::LDRWpre:
5830 case AArch64::LDRXpre:
5831 case AArch64::LDRBBpost:
5832 case AArch64::LDRBpost:
5833 case AArch64::LDRHHpost:
5834 case AArch64::LDRHpost:
5835 case AArch64::LDRSBWpost:
5836 case AArch64::LDRSBXpost:
5837 case AArch64::LDRSHWpost:
5838 case AArch64::LDRSHXpost:
5839 case AArch64::LDRSWpost:
5840 case AArch64::LDRWpost:
5841 case AArch64::LDRXpost: {
5842 MCRegister Rt = Inst.getOperand(1).getReg();
5843 MCRegister Rn = Inst.getOperand(2).getReg();
5844 if (RI->isSubRegisterEq(Rn, Rt))
5845 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5846 "is also a source");
5847 break;
5848 }
5849 case AArch64::STRBBpost:
5850 case AArch64::STRBpost:
5851 case AArch64::STRHHpost:
5852 case AArch64::STRHpost:
5853 case AArch64::STRWpost:
5854 case AArch64::STRXpost:
5855 case AArch64::STRBBpre:
5856 case AArch64::STRBpre:
5857 case AArch64::STRHHpre:
5858 case AArch64::STRHpre:
5859 case AArch64::STRWpre:
5860 case AArch64::STRXpre: {
5861 MCRegister Rt = Inst.getOperand(1).getReg();
5862 MCRegister Rn = Inst.getOperand(2).getReg();
5863 if (RI->isSubRegisterEq(Rn, Rt))
5864 return Error(Loc[0], "unpredictable STR instruction, writeback base "
5865 "is also a source");
5866 break;
5867 }
5868 case AArch64::STXRB:
5869 case AArch64::STXRH:
5870 case AArch64::STXRW:
5871 case AArch64::STXRX:
5872 case AArch64::STLXRB:
5873 case AArch64::STLXRH:
5874 case AArch64::STLXRW:
5875 case AArch64::STLXRX: {
5876 MCRegister Rs = Inst.getOperand(0).getReg();
5877 MCRegister Rt = Inst.getOperand(1).getReg();
5878 MCRegister Rn = Inst.getOperand(2).getReg();
5879 if (RI->isSubRegisterEq(Rt, Rs) ||
5880 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5881 return Error(Loc[0],
5882 "unpredictable STXR instruction, status is also a source");
5883 break;
5884 }
5885 case AArch64::STXPW:
5886 case AArch64::STXPX:
5887 case AArch64::STLXPW:
5888 case AArch64::STLXPX: {
5889 MCRegister Rs = Inst.getOperand(0).getReg();
5890 MCRegister Rt1 = Inst.getOperand(1).getReg();
5891 MCRegister Rt2 = Inst.getOperand(2).getReg();
5892 MCRegister Rn = Inst.getOperand(3).getReg();
5893 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5894 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5895 return Error(Loc[0],
5896 "unpredictable STXP instruction, status is also a source");
5897 break;
5898 }
5899 case AArch64::LDRABwriteback:
5900 case AArch64::LDRAAwriteback: {
5901 MCRegister Xt = Inst.getOperand(0).getReg();
5902 MCRegister Xn = Inst.getOperand(1).getReg();
5903 if (Xt == Xn)
5904 return Error(Loc[0],
5905 "unpredictable LDRA instruction, writeback base"
5906 " is also a destination");
5907 break;
5908 }
5909 }
5910
5911 // Check v8.8-A memops instructions.
5912 switch (Inst.getOpcode()) {
5913 case AArch64::CPYFP:
5914 case AArch64::CPYFPWN:
5915 case AArch64::CPYFPRN:
5916 case AArch64::CPYFPN:
5917 case AArch64::CPYFPWT:
5918 case AArch64::CPYFPWTWN:
5919 case AArch64::CPYFPWTRN:
5920 case AArch64::CPYFPWTN:
5921 case AArch64::CPYFPRT:
5922 case AArch64::CPYFPRTWN:
5923 case AArch64::CPYFPRTRN:
5924 case AArch64::CPYFPRTN:
5925 case AArch64::CPYFPT:
5926 case AArch64::CPYFPTWN:
5927 case AArch64::CPYFPTRN:
5928 case AArch64::CPYFPTN:
5929 case AArch64::CPYFM:
5930 case AArch64::CPYFMWN:
5931 case AArch64::CPYFMRN:
5932 case AArch64::CPYFMN:
5933 case AArch64::CPYFMWT:
5934 case AArch64::CPYFMWTWN:
5935 case AArch64::CPYFMWTRN:
5936 case AArch64::CPYFMWTN:
5937 case AArch64::CPYFMRT:
5938 case AArch64::CPYFMRTWN:
5939 case AArch64::CPYFMRTRN:
5940 case AArch64::CPYFMRTN:
5941 case AArch64::CPYFMT:
5942 case AArch64::CPYFMTWN:
5943 case AArch64::CPYFMTRN:
5944 case AArch64::CPYFMTN:
5945 case AArch64::CPYFE:
5946 case AArch64::CPYFEWN:
5947 case AArch64::CPYFERN:
5948 case AArch64::CPYFEN:
5949 case AArch64::CPYFEWT:
5950 case AArch64::CPYFEWTWN:
5951 case AArch64::CPYFEWTRN:
5952 case AArch64::CPYFEWTN:
5953 case AArch64::CPYFERT:
5954 case AArch64::CPYFERTWN:
5955 case AArch64::CPYFERTRN:
5956 case AArch64::CPYFERTN:
5957 case AArch64::CPYFET:
5958 case AArch64::CPYFETWN:
5959 case AArch64::CPYFETRN:
5960 case AArch64::CPYFETN:
5961 case AArch64::CPYP:
5962 case AArch64::CPYPWN:
5963 case AArch64::CPYPRN:
5964 case AArch64::CPYPN:
5965 case AArch64::CPYPWT:
5966 case AArch64::CPYPWTWN:
5967 case AArch64::CPYPWTRN:
5968 case AArch64::CPYPWTN:
5969 case AArch64::CPYPRT:
5970 case AArch64::CPYPRTWN:
5971 case AArch64::CPYPRTRN:
5972 case AArch64::CPYPRTN:
5973 case AArch64::CPYPT:
5974 case AArch64::CPYPTWN:
5975 case AArch64::CPYPTRN:
5976 case AArch64::CPYPTN:
5977 case AArch64::CPYM:
5978 case AArch64::CPYMWN:
5979 case AArch64::CPYMRN:
5980 case AArch64::CPYMN:
5981 case AArch64::CPYMWT:
5982 case AArch64::CPYMWTWN:
5983 case AArch64::CPYMWTRN:
5984 case AArch64::CPYMWTN:
5985 case AArch64::CPYMRT:
5986 case AArch64::CPYMRTWN:
5987 case AArch64::CPYMRTRN:
5988 case AArch64::CPYMRTN:
5989 case AArch64::CPYMT:
5990 case AArch64::CPYMTWN:
5991 case AArch64::CPYMTRN:
5992 case AArch64::CPYMTN:
5993 case AArch64::CPYE:
5994 case AArch64::CPYEWN:
5995 case AArch64::CPYERN:
5996 case AArch64::CPYEN:
5997 case AArch64::CPYEWT:
5998 case AArch64::CPYEWTWN:
5999 case AArch64::CPYEWTRN:
6000 case AArch64::CPYEWTN:
6001 case AArch64::CPYERT:
6002 case AArch64::CPYERTWN:
6003 case AArch64::CPYERTRN:
6004 case AArch64::CPYERTN:
6005 case AArch64::CPYET:
6006 case AArch64::CPYETWN:
6007 case AArch64::CPYETRN:
6008 case AArch64::CPYETN: {
6009 // Xd_wb == op0, Xs_wb == op1, Xn_wb == op2
6010 MCRegister Xd = Inst.getOperand(3).getReg();
6011 MCRegister Xs = Inst.getOperand(4).getReg();
6012 MCRegister Xn = Inst.getOperand(5).getReg();
6013
6014 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6015 assert(Xs == Inst.getOperand(1).getReg() && "Xs_wb and Xs do not match");
6016 assert(Xn == Inst.getOperand(2).getReg() && "Xn_wb and Xn do not match");
6017
6018 if (Xd == Xs)
6019 return Error(Loc[0], "invalid CPY instruction, destination and source"
6020 " registers are the same");
6021 if (Xd == Xn)
6022 return Error(Loc[0], "invalid CPY instruction, destination and size"
6023 " registers are the same");
6024 if (Xs == Xn)
6025 return Error(Loc[0], "invalid CPY instruction, source and size"
6026 " registers are the same");
6027 break;
6028 }
6029 case AArch64::SETP:
6030 case AArch64::SETPT:
6031 case AArch64::SETPN:
6032 case AArch64::SETPTN:
6033 case AArch64::SETM:
6034 case AArch64::SETMT:
6035 case AArch64::SETMN:
6036 case AArch64::SETMTN:
6037 case AArch64::SETE:
6038 case AArch64::SETET:
6039 case AArch64::SETEN:
6040 case AArch64::SETETN:
6041 case AArch64::SETGP:
6042 case AArch64::SETGPT:
6043 case AArch64::SETGPN:
6044 case AArch64::SETGPTN:
6045 case AArch64::SETGM:
6046 case AArch64::SETGMT:
6047 case AArch64::SETGMN:
6048 case AArch64::SETGMTN:
6049 case AArch64::MOPSSETGE:
6050 case AArch64::MOPSSETGET:
6051 case AArch64::MOPSSETGEN:
6052 case AArch64::MOPSSETGETN: {
6053 // Xd_wb == op0, Xn_wb == op1
6054 MCRegister Xd = Inst.getOperand(2).getReg();
6055 MCRegister Xn = Inst.getOperand(3).getReg();
6056 MCRegister Xm = Inst.getOperand(4).getReg();
6057
6058 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6059 assert(Xn == Inst.getOperand(1).getReg() && "Xn_wb and Xn do not match");
6060
6061 if (Xd == Xn)
6062 return Error(Loc[0], "invalid SET instruction, destination and size"
6063 " registers are the same");
6064 if (Xd == Xm)
6065 return Error(Loc[0], "invalid SET instruction, destination and source"
6066 " registers are the same");
6067 if (Xn == Xm)
6068 return Error(Loc[0], "invalid SET instruction, source and size"
6069 " registers are the same");
6070 break;
6071 }
6072 case AArch64::SETGOP:
6073 case AArch64::SETGOPT:
6074 case AArch64::SETGOPN:
6075 case AArch64::SETGOPTN:
6076 case AArch64::SETGOM:
6077 case AArch64::SETGOMT:
6078 case AArch64::SETGOMN:
6079 case AArch64::SETGOMTN:
6080 case AArch64::SETGOE:
6081 case AArch64::SETGOET:
6082 case AArch64::SETGOEN:
6083 case AArch64::SETGOETN: {
6084 // Xd_wb == op0, Xn_wb == op1
6085 MCRegister Xd = Inst.getOperand(2).getReg();
6086 MCRegister Xn = Inst.getOperand(3).getReg();
6087
6088 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6089 assert(Xn == Inst.getOperand(1).getReg() && "Xn_wb and Xn do not match");
6090
6091 if (Xd == Xn)
6092 return Error(Loc[0], "invalid SET instruction, destination and size"
6093 " registers are the same");
6094 break;
6095 }
6096 }
6097
6098 // Now check immediate ranges. Separate from the above as there is overlap
6099 // in the instructions being checked and this keeps the nested conditionals
6100 // to a minimum.
6101 switch (Inst.getOpcode()) {
6102 case AArch64::ADDSWri:
6103 case AArch64::ADDSXri:
6104 case AArch64::ADDWri:
6105 case AArch64::ADDXri:
6106 case AArch64::SUBSWri:
6107 case AArch64::SUBSXri:
6108 case AArch64::SUBWri:
6109 case AArch64::SUBXri: {
6110 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
6111 // some slight duplication here.
6112 if (Inst.getOperand(2).isExpr()) {
6113 const MCExpr *Expr = Inst.getOperand(2).getExpr();
6114 AArch64::Specifier ELFSpec;
6115 AArch64::Specifier DarwinSpec;
6116 int64_t Addend;
6117 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
6118
6119 // Only allow these with ADDXri.
6120 if ((DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
6121 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) &&
6122 Inst.getOpcode() == AArch64::ADDXri)
6123 return false;
6124
6125 // Only allow these with ADDXri/ADDWri
6133 ELFSpec) &&
6134 (Inst.getOpcode() == AArch64::ADDXri ||
6135 Inst.getOpcode() == AArch64::ADDWri))
6136 return false;
6137
6138 // Don't allow symbol refs in the immediate field otherwise
6139 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
6140 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
6141 // 'cmp w0, 'borked')
6142 return Error(Loc.back(), "invalid immediate expression");
6143 }
6144 // We don't validate more complex expressions here
6145 }
6146 return false;
6147 }
6148 default:
6149 return false;
6150 }
6151}
6152
6154 const FeatureBitset &FBS,
6155 unsigned VariantID = 0);
6156
6157bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
6159 OperandVector &Operands) {
6160 switch (ErrCode) {
6161 case Match_InvalidTiedOperand: {
6162 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
6163 if (Op.isVectorList())
6164 return Error(Loc, "operand must match destination register list");
6165
6166 assert(Op.isReg() && "Unexpected operand type");
6167 switch (Op.getRegEqualityTy()) {
6168 case RegConstraintEqualityTy::EqualsSubReg:
6169 return Error(Loc, "operand must be 64-bit form of destination register");
6170 case RegConstraintEqualityTy::EqualsSuperReg:
6171 return Error(Loc, "operand must be 32-bit form of destination register");
6172 case RegConstraintEqualityTy::EqualsReg:
6173 return Error(Loc, "operand must match destination register");
6174 }
6175 llvm_unreachable("Unknown RegConstraintEqualityTy");
6176 }
6177 case Match_MissingFeature:
6178 return Error(Loc,
6179 "instruction requires a CPU feature not currently enabled");
6180 case Match_InvalidOperand:
6181 return Error(Loc, "invalid operand for instruction");
6182 case Match_InvalidSuffix:
6183 return Error(Loc, "invalid type suffix for instruction");
6184 case Match_InvalidCondCode:
6185 return Error(Loc, "expected AArch64 condition code");
6186 case Match_AddSubRegExtendSmall:
6187 return Error(Loc,
6188 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
6189 case Match_AddSubRegExtendLarge:
6190 return Error(Loc,
6191 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
6192 case Match_AddSubSecondSource:
6193 return Error(Loc,
6194 "expected compatible register, symbol or integer in range [0, 4095]");
6195 case Match_LogicalSecondSource:
6196 return Error(Loc, "expected compatible register or logical immediate");
6197 case Match_InvalidMovImm32Shift:
6198 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
6199 case Match_InvalidMovImm64Shift:
6200 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
6201 case Match_AddSubRegShift32:
6202 return Error(Loc,
6203 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
6204 case Match_AddSubRegShift64:
6205 return Error(Loc,
6206 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
6207 case Match_InvalidFPImm:
6208 return Error(Loc,
6209 "expected compatible register or floating-point constant");
6210 case Match_InvalidMemoryIndexedSImm6:
6211 return Error(Loc, "index must be an integer in range [-32, 31].");
6212 case Match_InvalidMemoryIndexedSImm5:
6213 return Error(Loc, "index must be an integer in range [-16, 15].");
6214 case Match_InvalidMemoryIndexed1SImm4:
6215 return Error(Loc, "index must be an integer in range [-8, 7].");
6216 case Match_InvalidMemoryIndexed2SImm4:
6217 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
6218 case Match_InvalidMemoryIndexed3SImm4:
6219 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
6220 case Match_InvalidMemoryIndexed4SImm4:
6221 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
6222 case Match_InvalidMemoryIndexed16SImm4:
6223 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
6224 case Match_InvalidMemoryIndexed32SImm4:
6225 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
6226 case Match_InvalidMemoryIndexed1SImm6:
6227 return Error(Loc, "index must be an integer in range [-32, 31].");
6228 case Match_InvalidMemoryIndexedSImm8:
6229 return Error(Loc, "index must be an integer in range [-128, 127].");
6230 case Match_InvalidMemoryIndexedSImm9:
6231 return Error(Loc, "index must be an integer in range [-256, 255].");
6232 case Match_InvalidMemoryIndexed16SImm9:
6233 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
6234 case Match_InvalidMemoryIndexed8SImm10:
6235 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
6236 case Match_InvalidMemoryIndexed4SImm7:
6237 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
6238 case Match_InvalidMemoryIndexed8SImm7:
6239 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
6240 case Match_InvalidMemoryIndexed16SImm7:
6241 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
6242 case Match_InvalidMemoryIndexed8UImm5:
6243 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
6244 case Match_InvalidMemoryIndexed8UImm3:
6245 return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
6246 case Match_InvalidMemoryIndexed4UImm5:
6247 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
6248 case Match_InvalidMemoryIndexed2UImm5:
6249 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
6250 case Match_InvalidMemoryIndexed8UImm6:
6251 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
6252 case Match_InvalidMemoryIndexed16UImm6:
6253 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
6254 case Match_InvalidMemoryIndexed4UImm6:
6255 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
6256 case Match_InvalidMemoryIndexed2UImm6:
6257 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
6258 case Match_InvalidMemoryIndexed1UImm6:
6259 return Error(Loc, "index must be in range [0, 63].");
6260 case Match_InvalidMemoryWExtend8:
6261 return Error(Loc,
6262 "expected 'uxtw' or 'sxtw' with optional shift of #0");
6263 case Match_InvalidMemoryWExtend16:
6264 return Error(Loc,
6265 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
6266 case Match_InvalidMemoryWExtend32:
6267 return Error(Loc,
6268 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
6269 case Match_InvalidMemoryWExtend64:
6270 return Error(Loc,
6271 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
6272 case Match_InvalidMemoryWExtend128:
6273 return Error(Loc,
6274 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
6275 case Match_InvalidMemoryXExtend8:
6276 return Error(Loc,
6277 "expected 'lsl' or 'sxtx' with optional shift of #0");
6278 case Match_InvalidMemoryXExtend16:
6279 return Error(Loc,
6280 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
6281 case Match_InvalidMemoryXExtend32:
6282 return Error(Loc,
6283 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
6284 case Match_InvalidMemoryXExtend64:
6285 return Error(Loc,
6286 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
6287 case Match_InvalidMemoryXExtend128:
6288 return Error(Loc,
6289 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
6290 case Match_InvalidMemoryIndexed1:
6291 return Error(Loc, "index must be an integer in range [0, 4095].");
6292 case Match_InvalidMemoryIndexed2:
6293 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
6294 case Match_InvalidMemoryIndexed4:
6295 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
6296 case Match_InvalidMemoryIndexed8:
6297 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
6298 case Match_InvalidMemoryIndexed16:
6299 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
6300 case Match_InvalidImm0_0:
6301 return Error(Loc, "immediate must be 0.");
6302 case Match_InvalidImm0_1:
6303 return Error(Loc, "immediate must be an integer in range [0, 1].");
6304 case Match_InvalidImm0_3:
6305 return Error(Loc, "immediate must be an integer in range [0, 3].");
6306 case Match_InvalidImm0_7:
6307 return Error(Loc, "immediate must be an integer in range [0, 7].");
6308 case Match_InvalidImm0_15:
6309 return Error(Loc, "immediate must be an integer in range [0, 15].");
6310 case Match_InvalidImm0_31:
6311 return Error(Loc, "immediate must be an integer in range [0, 31].");
6312 case Match_InvalidImm0_63:
6313 return Error(Loc, "immediate must be an integer in range [0, 63].");
6314 case Match_InvalidImm0_127:
6315 return Error(Loc, "immediate must be an integer in range [0, 127].");
6316 case Match_InvalidImm0_255:
6317 return Error(Loc, "immediate must be an integer in range [0, 255].");
6318 case Match_InvalidImm0_65535:
6319 return Error(Loc, "immediate must be an integer in range [0, 65535].");
6320 case Match_InvalidImm1_8:
6321 return Error(Loc, "immediate must be an integer in range [1, 8].");
6322 case Match_InvalidImm1_16:
6323 return Error(Loc, "immediate must be an integer in range [1, 16].");
6324 case Match_InvalidImm1_32:
6325 return Error(Loc, "immediate must be an integer in range [1, 32].");
6326 case Match_InvalidImm1_64:
6327 return Error(Loc, "immediate must be an integer in range [1, 64].");
6328 case Match_InvalidImmM1_62:
6329 return Error(Loc, "immediate must be an integer in range [-1, 62].");
6330 case Match_InvalidMemoryIndexedRange2UImm0:
6331 return Error(Loc, "vector select offset must be the immediate range 0:1.");
6332 case Match_InvalidMemoryIndexedRange2UImm1:
6333 return Error(Loc, "vector select offset must be an immediate range of the "
6334 "form <immf>:<imml>, where the first "
6335 "immediate is a multiple of 2 in the range [0, 2], and "
6336 "the second immediate is immf + 1.");
6337 case Match_InvalidMemoryIndexedRange2UImm2:
6338 case Match_InvalidMemoryIndexedRange2UImm3:
6339 return Error(
6340 Loc,
6341 "vector select offset must be an immediate range of the form "
6342 "<immf>:<imml>, "
6343 "where the first immediate is a multiple of 2 in the range [0, 6] or "
6344 "[0, 14] "
6345 "depending on the instruction, and the second immediate is immf + 1.");
6346 case Match_InvalidMemoryIndexedRange4UImm0:
6347 return Error(Loc, "vector select offset must be the immediate range 0:3.");
6348 case Match_InvalidMemoryIndexedRange4UImm1:
6349 case Match_InvalidMemoryIndexedRange4UImm2:
6350 return Error(
6351 Loc,
6352 "vector select offset must be an immediate range of the form "
6353 "<immf>:<imml>, "
6354 "where the first immediate is a multiple of 4 in the range [0, 4] or "
6355 "[0, 12] "
6356 "depending on the instruction, and the second immediate is immf + 3.");
6357 case Match_InvalidSVEAddSubImm8:
6358 return Error(Loc, "immediate must be an integer in range [0, 255]"
6359 " with a shift amount of 0");
6360 case Match_InvalidSVEAddSubImm16:
6361 case Match_InvalidSVEAddSubImm32:
6362 case Match_InvalidSVEAddSubImm64:
6363 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
6364 "multiple of 256 in range [256, 65280]");
6365 case Match_InvalidSVECpyImm8:
6366 return Error(Loc, "immediate must be an integer in range [-128, 255]"
6367 " with a shift amount of 0");
6368 case Match_InvalidSVECpyImm16:
6369 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6370 "multiple of 256 in range [-32768, 65280]");
6371 case Match_InvalidSVECpyImm32:
6372 case Match_InvalidSVECpyImm64:
6373 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6374 "multiple of 256 in range [-32768, 32512]");
6375 case Match_InvalidIndexRange0_0:
6376 return Error(Loc, "expected lane specifier '[0]'");
6377 case Match_InvalidIndexRange1_1:
6378 return Error(Loc, "expected lane specifier '[1]'");
6379 case Match_InvalidIndexRange0_15:
6380 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6381 case Match_InvalidIndexRange0_7:
6382 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6383 case Match_InvalidIndexRange0_3:
6384 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6385 case Match_InvalidIndexRange0_1:
6386 return Error(Loc, "vector lane must be an integer in range [0, 1].");
6387 case Match_InvalidSVEIndexRange0_63:
6388 return Error(Loc, "vector lane must be an integer in range [0, 63].");
6389 case Match_InvalidSVEIndexRange0_31:
6390 return Error(Loc, "vector lane must be an integer in range [0, 31].");
6391 case Match_InvalidSVEIndexRange0_15:
6392 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6393 case Match_InvalidSVEIndexRange0_7:
6394 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6395 case Match_InvalidSVEIndexRange0_3:
6396 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6397 case Match_InvalidLabel:
6398 return Error(Loc, "expected label or encodable integer pc offset");
6399 case Match_MRS:
6400 return Error(Loc, "expected readable system register");
6401 case Match_MSR:
6402 case Match_InvalidSVCR:
6403 return Error(Loc, "expected writable system register or pstate");
6404 case Match_InvalidComplexRotationEven:
6405 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
6406 case Match_InvalidComplexRotationOdd:
6407 return Error(Loc, "complex rotation must be 90 or 270.");
6408 case Match_MnemonicFail: {
6409 std::string Suggestion = AArch64MnemonicSpellCheck(
6410 ((AArch64Operand &)*Operands[0]).getToken(),
6411 ComputeAvailableFeatures(STI->getFeatureBits()));
6412 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
6413 }
6414 case Match_InvalidGPR64shifted8:
6415 return Error(Loc, "register must be x0..x30 or xzr, without shift");
6416 case Match_InvalidGPR64shifted16:
6417 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
6418 case Match_InvalidGPR64shifted32:
6419 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
6420 case Match_InvalidGPR64shifted64:
6421 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
6422 case Match_InvalidGPR64shifted128:
6423 return Error(
6424 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
6425 case Match_InvalidGPR64NoXZRshifted8:
6426 return Error(Loc, "register must be x0..x30 without shift");
6427 case Match_InvalidGPR64NoXZRshifted16:
6428 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
6429 case Match_InvalidGPR64NoXZRshifted32:
6430 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
6431 case Match_InvalidGPR64NoXZRshifted64:
6432 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
6433 case Match_InvalidGPR64NoXZRshifted128:
6434 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
6435 case Match_InvalidZPR32UXTW8:
6436 case Match_InvalidZPR32SXTW8:
6437 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6438 case Match_InvalidZPR32UXTW16:
6439 case Match_InvalidZPR32SXTW16:
6440 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6441 case Match_InvalidZPR32UXTW32:
6442 case Match_InvalidZPR32SXTW32:
6443 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6444 case Match_InvalidZPR32UXTW64:
6445 case Match_InvalidZPR32SXTW64:
6446 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6447 case Match_InvalidZPR64UXTW8:
6448 case Match_InvalidZPR64SXTW8:
6449 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6450 case Match_InvalidZPR64UXTW16:
6451 case Match_InvalidZPR64SXTW16:
6452 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6453 case Match_InvalidZPR64UXTW32:
6454 case Match_InvalidZPR64SXTW32:
6455 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6456 case Match_InvalidZPR64UXTW64:
6457 case Match_InvalidZPR64SXTW64:
6458 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6459 case Match_InvalidZPR32LSL8:
6460 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
6461 case Match_InvalidZPR32LSL16:
6462 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6463 case Match_InvalidZPR32LSL32:
6464 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6465 case Match_InvalidZPR32LSL64:
6466 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6467 case Match_InvalidZPR64LSL8:
6468 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
6469 case Match_InvalidZPR64LSL16:
6470 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6471 case Match_InvalidZPR64LSL32:
6472 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6473 case Match_InvalidZPR64LSL64:
6474 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6475 case Match_InvalidZPR0:
6476 return Error(Loc, "expected register without element width suffix");
6477 case Match_InvalidZPR8:
6478 case Match_InvalidZPR16:
6479 case Match_InvalidZPR32:
6480 case Match_InvalidZPR64:
6481 case Match_InvalidZPR128:
6482 return Error(Loc, "invalid element width");
6483 case Match_InvalidZPR_3b8:
6484 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
6485 case Match_InvalidZPR_3b16:
6486 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
6487 case Match_InvalidZPR_3b32:
6488 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
6489 case Match_InvalidZPR_4b8:
6490 return Error(Loc,
6491 "Invalid restricted vector register, expected z0.b..z15.b");
6492 case Match_InvalidZPR_4b16:
6493 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
6494 case Match_InvalidZPR_4b32:
6495 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
6496 case Match_InvalidZPR_4b64:
6497 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
6498 case Match_InvalidZPRMul2_Lo8:
6499 return Error(Loc, "Invalid restricted vector register, expected even "
6500 "register in z0.b..z14.b");
6501 case Match_InvalidZPRMul2_Hi8:
6502 return Error(Loc, "Invalid restricted vector register, expected even "
6503 "register in z16.b..z30.b");
6504 case Match_InvalidZPRMul2_Lo16:
6505 return Error(Loc, "Invalid restricted vector register, expected even "
6506 "register in z0.h..z14.h");
6507 case Match_InvalidZPRMul2_Hi16:
6508 return Error(Loc, "Invalid restricted vector register, expected even "
6509 "register in z16.h..z30.h");
6510 case Match_InvalidZPRMul2_Lo32:
6511 return Error(Loc, "Invalid restricted vector register, expected even "
6512 "register in z0.s..z14.s");
6513 case Match_InvalidZPRMul2_Hi32:
6514 return Error(Loc, "Invalid restricted vector register, expected even "
6515 "register in z16.s..z30.s");
6516 case Match_InvalidZPRMul2_Lo64:
6517 return Error(Loc, "Invalid restricted vector register, expected even "
6518 "register in z0.d..z14.d");
6519 case Match_InvalidZPRMul2_Hi64:
6520 return Error(Loc, "Invalid restricted vector register, expected even "
6521 "register in z16.d..z30.d");
6522 case Match_InvalidZPR_K0:
6523 return Error(Loc, "invalid restricted vector register, expected register "
6524 "in z20..z23 or z28..z31");
6525 case Match_InvalidSVEPattern:
6526 return Error(Loc, "invalid predicate pattern");
6527 case Match_InvalidSVEPPRorPNRAnyReg:
6528 case Match_InvalidSVEPPRorPNRBReg:
6529 case Match_InvalidSVEPredicateAnyReg:
6530 case Match_InvalidSVEPredicateBReg:
6531 case Match_InvalidSVEPredicateHReg:
6532 case Match_InvalidSVEPredicateSReg:
6533 case Match_InvalidSVEPredicateDReg:
6534 return Error(Loc, "invalid predicate register.");
6535 case Match_InvalidSVEPredicate3bAnyReg:
6536 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6537 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6538 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6539 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6540 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6541 return Error(Loc, "Invalid predicate register, expected PN in range "
6542 "pn8..pn15 with element suffix.");
6543 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6544 return Error(Loc, "invalid restricted predicate-as-counter register "
6545 "expected pn8..pn15");
6546 case Match_InvalidSVEPNPredicateBReg:
6547 case Match_InvalidSVEPNPredicateHReg:
6548 case Match_InvalidSVEPNPredicateSReg:
6549 case Match_InvalidSVEPNPredicateDReg:
6550 return Error(Loc, "Invalid predicate register, expected PN in range "
6551 "pn0..pn15 with element suffix.");
6552 case Match_InvalidSVEVecLenSpecifier:
6553 return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
6554 case Match_InvalidSVEPredicateListMul2x8:
6555 case Match_InvalidSVEPredicateListMul2x16:
6556 case Match_InvalidSVEPredicateListMul2x32:
6557 case Match_InvalidSVEPredicateListMul2x64:
6558 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6559 "predicate registers, where the first vector is a multiple of 2 "
6560 "and with correct element type");
6561 case Match_InvalidSVEExactFPImmOperandHalfOne:
6562 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
6563 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6564 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
6565 case Match_InvalidSVEExactFPImmOperandZeroOne:
6566 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
6567 case Match_InvalidMatrixTileVectorH8:
6568 case Match_InvalidMatrixTileVectorV8:
6569 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
6570 case Match_InvalidMatrixTileVectorH16:
6571 case Match_InvalidMatrixTileVectorV16:
6572 return Error(Loc,
6573 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6574 case Match_InvalidMatrixTileVectorH32:
6575 case Match_InvalidMatrixTileVectorV32:
6576 return Error(Loc,
6577 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6578 case Match_InvalidMatrixTileVectorH64:
6579 case Match_InvalidMatrixTileVectorV64:
6580 return Error(Loc,
6581 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6582 case Match_InvalidMatrixTileVectorH128:
6583 case Match_InvalidMatrixTileVectorV128:
6584 return Error(Loc,
6585 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6586 case Match_InvalidMatrixTile16:
6587 return Error(Loc, "invalid matrix operand, expected za[0-1].h");
6588 case Match_InvalidMatrixTile32:
6589 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
6590 case Match_InvalidMatrixTile64:
6591 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
6592 case Match_InvalidMatrix:
6593 return Error(Loc, "invalid matrix operand, expected za");
6594 case Match_InvalidMatrix8:
6595 return Error(Loc, "invalid matrix operand, expected suffix .b");
6596 case Match_InvalidMatrix16:
6597 return Error(Loc, "invalid matrix operand, expected suffix .h");
6598 case Match_InvalidMatrix32:
6599 return Error(Loc, "invalid matrix operand, expected suffix .s");
6600 case Match_InvalidMatrix64:
6601 return Error(Loc, "invalid matrix operand, expected suffix .d");
6602 case Match_InvalidMatrixIndexGPR32_12_15:
6603 return Error(Loc, "operand must be a register in range [w12, w15]");
6604 case Match_InvalidMatrixIndexGPR32_8_11:
6605 return Error(Loc, "operand must be a register in range [w8, w11]");
6606 case Match_InvalidSVEVectorList2x8Mul2:
6607 case Match_InvalidSVEVectorList2x16Mul2:
6608 case Match_InvalidSVEVectorList2x32Mul2:
6609 case Match_InvalidSVEVectorList2x64Mul2:
6610 case Match_InvalidSVEVectorList2x128Mul2:
6611 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6612 "SVE vectors, where the first vector is a multiple of 2 "
6613 "and with matching element types");
6614 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6615 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6616 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6617 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6618 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6619 "SVE vectors in the range z0-z14, where the first vector "
6620 "is a multiple of 2 "
6621 "and with matching element types");
6622 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6623 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6624 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6625 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6626 return Error(Loc,
6627 "Invalid vector list, expected list with 2 consecutive "
6628 "SVE vectors in the range z16-z30, where the first vector "
6629 "is a multiple of 2 "
6630 "and with matching element types");
6631 case Match_InvalidSVEVectorList4x8Mul4:
6632 case Match_InvalidSVEVectorList4x16Mul4:
6633 case Match_InvalidSVEVectorList4x32Mul4:
6634 case Match_InvalidSVEVectorList4x64Mul4:
6635 case Match_InvalidSVEVectorList4x128Mul4:
6636 return Error(Loc, "Invalid vector list, expected list with 4 consecutive "
6637 "SVE vectors, where the first vector is a multiple of 4 "
6638 "and with matching element types");
6639 case Match_InvalidLookupTable:
6640 return Error(Loc, "Invalid lookup table, expected zt0");
6641 case Match_InvalidSVEVectorListStrided2x8:
6642 case Match_InvalidSVEVectorListStrided2x16:
6643 case Match_InvalidSVEVectorListStrided2x32:
6644 case Match_InvalidSVEVectorListStrided2x64:
6645 return Error(
6646 Loc,
6647 "Invalid vector list, expected list with each SVE vector in the list "
6648 "8 registers apart, and the first register in the range [z0, z7] or "
6649 "[z16, z23] and with correct element type");
6650 case Match_InvalidSVEVectorListStrided4x8:
6651 case Match_InvalidSVEVectorListStrided4x16:
6652 case Match_InvalidSVEVectorListStrided4x32:
6653 case Match_InvalidSVEVectorListStrided4x64:
6654 return Error(
6655 Loc,
6656 "Invalid vector list, expected list with each SVE vector in the list "
6657 "4 registers apart, and the first register in the range [z0, z3] or "
6658 "[z16, z19] and with correct element type");
6659 case Match_AddSubLSLImm3ShiftLarge:
6660 return Error(Loc,
6661 "expected 'lsl' with optional integer in range [0, 7]");
6662 default:
6663 llvm_unreachable("unexpected error code!");
6664 }
6665}
6666
6667static const char *getSubtargetFeatureName(uint64_t Val);
6668
6669bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6670 OperandVector &Operands,
6671 MCStreamer &Out,
6673 bool MatchingInlineAsm) {
6674 assert(!Operands.empty() && "Unexpected empty operand list!");
6675 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6676 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6677
6678 StringRef Tok = Op.getToken();
6679 unsigned NumOperands = Operands.size();
6680
6681 if (NumOperands == 4 && Tok == "lsl") {
6682 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6683 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6684 if (Op2.isScalarReg() && Op3.isImm()) {
6685 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6686 if (Op3CE) {
6687 uint64_t Op3Val = Op3CE->getValue();
6688 uint64_t NewOp3Val = 0;
6689 uint64_t NewOp4Val = 0;
6690 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6691 Op2.getReg())) {
6692 NewOp3Val = (32 - Op3Val) & 0x1f;
6693 NewOp4Val = 31 - Op3Val;
6694 } else {
6695 NewOp3Val = (64 - Op3Val) & 0x3f;
6696 NewOp4Val = 63 - Op3Val;
6697 }
6698
6699 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
6700 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
6701
6702 Operands[0] =
6703 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
6704 Operands.push_back(AArch64Operand::CreateImm(
6705 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
6706 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6707 Op3.getEndLoc(), getContext());
6708 }
6709 }
6710 } else if (NumOperands == 4 && Tok == "bfc") {
6711 // FIXME: Horrible hack to handle BFC->BFM alias.
6712 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6713 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6714 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6715
6716 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6717 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
6718 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
6719
6720 if (LSBCE && WidthCE) {
6721 uint64_t LSB = LSBCE->getValue();
6722 uint64_t Width = WidthCE->getValue();
6723
6724 uint64_t RegWidth = 0;
6725 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6726 Op1.getReg()))
6727 RegWidth = 64;
6728 else
6729 RegWidth = 32;
6730
6731 if (LSB >= RegWidth)
6732 return Error(LSBOp.getStartLoc(),
6733 "expected integer in range [0, 31]");
6734 if (Width < 1 || Width > RegWidth)
6735 return Error(WidthOp.getStartLoc(),
6736 "expected integer in range [1, 32]");
6737
6738 uint64_t ImmR = 0;
6739 if (RegWidth == 32)
6740 ImmR = (32 - LSB) & 0x1f;
6741 else
6742 ImmR = (64 - LSB) & 0x3f;
6743
6744 uint64_t ImmS = Width - 1;
6745
6746 if (ImmR != 0 && ImmS >= ImmR)
6747 return Error(WidthOp.getStartLoc(),
6748 "requested insert overflows register");
6749
6750 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
6751 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
6752 Operands[0] =
6753 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
6754 Operands[2] = AArch64Operand::CreateReg(
6755 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6756 SMLoc(), SMLoc(), getContext());
6757 Operands[3] = AArch64Operand::CreateImm(
6758 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
6759 Operands.emplace_back(
6760 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6761 WidthOp.getEndLoc(), getContext()));
6762 }
6763 }
6764 } else if (NumOperands == 5) {
6765 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6766 // UBFIZ -> UBFM aliases.
6767 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6768 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6769 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6770 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6771
6772 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6773 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6774 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6775
6776 if (Op3CE && Op4CE) {
6777 uint64_t Op3Val = Op3CE->getValue();
6778 uint64_t Op4Val = Op4CE->getValue();
6779
6780 uint64_t RegWidth = 0;
6781 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6782 Op1.getReg()))
6783 RegWidth = 64;
6784 else
6785 RegWidth = 32;
6786
6787 if (Op3Val >= RegWidth)
6788 return Error(Op3.getStartLoc(),
6789 "expected integer in range [0, 31]");
6790 if (Op4Val < 1 || Op4Val > RegWidth)
6791 return Error(Op4.getStartLoc(),
6792 "expected integer in range [1, 32]");
6793
6794 uint64_t NewOp3Val = 0;
6795 if (RegWidth == 32)
6796 NewOp3Val = (32 - Op3Val) & 0x1f;
6797 else
6798 NewOp3Val = (64 - Op3Val) & 0x3f;
6799
6800 uint64_t NewOp4Val = Op4Val - 1;
6801
6802 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6803 return Error(Op4.getStartLoc(),
6804 "requested insert overflows register");
6805
6806 const MCExpr *NewOp3 =
6807 MCConstantExpr::create(NewOp3Val, getContext());
6808 const MCExpr *NewOp4 =
6809 MCConstantExpr::create(NewOp4Val, getContext());
6810 Operands[3] = AArch64Operand::CreateImm(
6811 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
6812 Operands[4] = AArch64Operand::CreateImm(
6813 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6814 if (Tok == "bfi")
6815 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6816 getContext());
6817 else if (Tok == "sbfiz")
6818 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6819 getContext());
6820 else if (Tok == "ubfiz")
6821 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6822 getContext());
6823 else
6824 llvm_unreachable("No valid mnemonic for alias?");
6825 }
6826 }
6827
6828 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6829 // UBFX -> UBFM aliases.
6830 } else if (NumOperands == 5 &&
6831 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6832 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6833 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6834 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6835
6836 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6837 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6838 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6839
6840 if (Op3CE && Op4CE) {
6841 uint64_t Op3Val = Op3CE->getValue();
6842 uint64_t Op4Val = Op4CE->getValue();
6843
6844 uint64_t RegWidth = 0;
6845 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6846 Op1.getReg()))
6847 RegWidth = 64;
6848 else
6849 RegWidth = 32;
6850
6851 if (Op3Val >= RegWidth)
6852 return Error(Op3.getStartLoc(),
6853 "expected integer in range [0, 31]");
6854 if (Op4Val < 1 || Op4Val > RegWidth)
6855 return Error(Op4.getStartLoc(),
6856 "expected integer in range [1, 32]");
6857
6858 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6859
6860 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6861 return Error(Op4.getStartLoc(),
6862 "requested extract overflows register");
6863
6864 const MCExpr *NewOp4 =
6865 MCConstantExpr::create(NewOp4Val, getContext());
6866 Operands[4] = AArch64Operand::CreateImm(
6867 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6868 if (Tok == "bfxil")
6869 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6870 getContext());
6871 else if (Tok == "sbfx")
6872 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6873 getContext());
6874 else if (Tok == "ubfx")
6875 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6876 getContext());
6877 else
6878 llvm_unreachable("No valid mnemonic for alias?");
6879 }
6880 }
6881 }
6882 }
6883
6884 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6885 // instruction for FP registers correctly in some rare circumstances. Convert
6886 // it to a safe instruction and warn (because silently changing someone's
6887 // assembly is rude).
6888 if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6889 NumOperands == 4 && Tok == "movi") {
6890 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6891 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6892 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6893 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6894 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6895 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6896 if (Suffix.lower() == ".2d" &&
6897 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
6898 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
6899 " correctly on this CPU, converting to equivalent movi.16b");
6900 // Switch the suffix to .16b.
6901 unsigned Idx = Op1.isToken() ? 1 : 2;
6902 Operands[Idx] =
6903 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
6904 }
6905 }
6906 }
6907
6908 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6909 // InstAlias can't quite handle this since the reg classes aren't
6910 // subclasses.
6911 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6912 // The source register can be Wn here, but the matcher expects a
6913 // GPR64. Twiddle it here if necessary.
6914 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6915 if (Op.isScalarReg()) {
6916 MCRegister Reg = getXRegFromWReg(Op.getReg());
6917 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6918 Op.getStartLoc(), Op.getEndLoc(),
6919 getContext());
6920 }
6921 }
6922 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6923 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6924 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6925 if (Op.isScalarReg() &&
6926 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6927 Op.getReg())) {
6928 // The source register can be Wn here, but the matcher expects a
6929 // GPR64. Twiddle it here if necessary.
6930 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6931 if (Op.isScalarReg()) {
6932 MCRegister Reg = getXRegFromWReg(Op.getReg());
6933 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6934 Op.getStartLoc(),
6935 Op.getEndLoc(), getContext());
6936 }
6937 }
6938 }
6939 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6940 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6941 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6942 if (Op.isScalarReg() &&
6943 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6944 Op.getReg())) {
6945 // The source register can be Wn here, but the matcher expects a
6946 // GPR32. Twiddle it here if necessary.
6947 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6948 if (Op.isScalarReg()) {
6949 MCRegister Reg = getWRegFromXReg(Op.getReg());
6950 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6951 Op.getStartLoc(),
6952 Op.getEndLoc(), getContext());
6953 }
6954 }
6955 }
6956
6957 MCInst Inst;
6958 FeatureBitset MissingFeatures;
6959 // First try to match against the secondary set of tables containing the
6960 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6961 unsigned MatchResult =
6962 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6963 MatchingInlineAsm, 1);
6964
6965 // If that fails, try against the alternate table containing long-form NEON:
6966 // "fadd v0.2s, v1.2s, v2.2s"
6967 if (MatchResult != Match_Success) {
6968 // But first, save the short-form match result: we can use it in case the
6969 // long-form match also fails.
6970 auto ShortFormNEONErrorInfo = ErrorInfo;
6971 auto ShortFormNEONMatchResult = MatchResult;
6972 auto ShortFormNEONMissingFeatures = MissingFeatures;
6973
6974 MatchResult =
6975 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6976 MatchingInlineAsm, 0);
6977
6978 // Now, both matches failed, and the long-form match failed on the mnemonic
6979 // suffix token operand. The short-form match failure is probably more
6980 // relevant: use it instead.
6981 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6982 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6983 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6984 MatchResult = ShortFormNEONMatchResult;
6985 ErrorInfo = ShortFormNEONErrorInfo;
6986 MissingFeatures = ShortFormNEONMissingFeatures;
6987 }
6988 }
6989
6990 switch (MatchResult) {
6991 case Match_Success: {
6992 // Perform range checking and other semantic validations
6993 SmallVector<SMLoc, 8> OperandLocs;
6994 NumOperands = Operands.size();
6995 for (unsigned i = 1; i < NumOperands; ++i)
6996 OperandLocs.push_back(Operands[i]->getStartLoc());
6997 if (validateInstruction(Inst, IDLoc, OperandLocs))
6998 return true;
6999
7000 Inst.setLoc(IDLoc);
7001 Out.emitInstruction(Inst, getSTI());
7002 return false;
7003 }
7004 case Match_MissingFeature: {
7005 assert(MissingFeatures.any() && "Unknown missing feature!");
7006 // Special case the error message for the very common case where only
7007 // a single subtarget feature is missing (neon, e.g.).
7008 std::string Msg = "instruction requires:";
7009 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
7010 if (MissingFeatures[i]) {
7011 Msg += " ";
7012 Msg += getSubtargetFeatureName(i);
7013 }
7014 }
7015 return Error(IDLoc, Msg);
7016 }
7017 case Match_MnemonicFail:
7018 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
7019 case Match_InvalidOperand: {
7020 SMLoc ErrorLoc = IDLoc;
7021
7022 if (ErrorInfo != ~0ULL) {
7023 if (ErrorInfo >= Operands.size())
7024 return Error(IDLoc, "too few operands for instruction",
7025 SMRange(IDLoc, getTok().getLoc()));
7026
7027 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7028 if (ErrorLoc == SMLoc())
7029 ErrorLoc = IDLoc;
7030 }
7031 // If the match failed on a suffix token operand, tweak the diagnostic
7032 // accordingly.
7033 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
7034 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
7035 MatchResult = Match_InvalidSuffix;
7036
7037 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
7038 }
7039 case Match_InvalidTiedOperand:
7040 case Match_InvalidMemoryIndexed1:
7041 case Match_InvalidMemoryIndexed2:
7042 case Match_InvalidMemoryIndexed4:
7043 case Match_InvalidMemoryIndexed8:
7044 case Match_InvalidMemoryIndexed16:
7045 case Match_InvalidCondCode:
7046 case Match_AddSubLSLImm3ShiftLarge:
7047 case Match_AddSubRegExtendSmall:
7048 case Match_AddSubRegExtendLarge:
7049 case Match_AddSubSecondSource:
7050 case Match_LogicalSecondSource:
7051 case Match_AddSubRegShift32:
7052 case Match_AddSubRegShift64:
7053 case Match_InvalidMovImm32Shift:
7054 case Match_InvalidMovImm64Shift:
7055 case Match_InvalidFPImm:
7056 case Match_InvalidMemoryWExtend8:
7057 case Match_InvalidMemoryWExtend16:
7058 case Match_InvalidMemoryWExtend32:
7059 case Match_InvalidMemoryWExtend64:
7060 case Match_InvalidMemoryWExtend128:
7061 case Match_InvalidMemoryXExtend8:
7062 case Match_InvalidMemoryXExtend16:
7063 case Match_InvalidMemoryXExtend32:
7064 case Match_InvalidMemoryXExtend64:
7065 case Match_InvalidMemoryXExtend128:
7066 case Match_InvalidMemoryIndexed1SImm4:
7067 case Match_InvalidMemoryIndexed2SImm4:
7068 case Match_InvalidMemoryIndexed3SImm4:
7069 case Match_InvalidMemoryIndexed4SImm4:
7070 case Match_InvalidMemoryIndexed1SImm6:
7071 case Match_InvalidMemoryIndexed16SImm4:
7072 case Match_InvalidMemoryIndexed32SImm4:
7073 case Match_InvalidMemoryIndexed4SImm7:
7074 case Match_InvalidMemoryIndexed8SImm7:
7075 case Match_InvalidMemoryIndexed16SImm7:
7076 case Match_InvalidMemoryIndexed8UImm5:
7077 case Match_InvalidMemoryIndexed8UImm3:
7078 case Match_InvalidMemoryIndexed4UImm5:
7079 case Match_InvalidMemoryIndexed2UImm5:
7080 case Match_InvalidMemoryIndexed1UImm6:
7081 case Match_InvalidMemoryIndexed2UImm6:
7082 case Match_InvalidMemoryIndexed4UImm6:
7083 case Match_InvalidMemoryIndexed8UImm6:
7084 case Match_InvalidMemoryIndexed16UImm6:
7085 case Match_InvalidMemoryIndexedSImm6:
7086 case Match_InvalidMemoryIndexedSImm5:
7087 case Match_InvalidMemoryIndexedSImm8:
7088 case Match_InvalidMemoryIndexedSImm9:
7089 case Match_InvalidMemoryIndexed16SImm9:
7090 case Match_InvalidMemoryIndexed8SImm10:
7091 case Match_InvalidImm0_0:
7092 case Match_InvalidImm0_1:
7093 case Match_InvalidImm0_3:
7094 case Match_InvalidImm0_7:
7095 case Match_InvalidImm0_15:
7096 case Match_InvalidImm0_31:
7097 case Match_InvalidImm0_63:
7098 case Match_InvalidImm0_127:
7099 case Match_InvalidImm0_255:
7100 case Match_InvalidImm0_65535:
7101 case Match_InvalidImm1_8:
7102 case Match_InvalidImm1_16:
7103 case Match_InvalidImm1_32:
7104 case Match_InvalidImm1_64:
7105 case Match_InvalidImmM1_62:
7106 case Match_InvalidMemoryIndexedRange2UImm0:
7107 case Match_InvalidMemoryIndexedRange2UImm1:
7108 case Match_InvalidMemoryIndexedRange2UImm2:
7109 case Match_InvalidMemoryIndexedRange2UImm3:
7110 case Match_InvalidMemoryIndexedRange4UImm0:
7111 case Match_InvalidMemoryIndexedRange4UImm1:
7112 case Match_InvalidMemoryIndexedRange4UImm2:
7113 case Match_InvalidSVEAddSubImm8:
7114 case Match_InvalidSVEAddSubImm16:
7115 case Match_InvalidSVEAddSubImm32:
7116 case Match_InvalidSVEAddSubImm64:
7117 case Match_InvalidSVECpyImm8:
7118 case Match_InvalidSVECpyImm16:
7119 case Match_InvalidSVECpyImm32:
7120 case Match_InvalidSVECpyImm64:
7121 case Match_InvalidIndexRange0_0:
7122 case Match_InvalidIndexRange1_1:
7123 case Match_InvalidIndexRange0_15:
7124 case Match_InvalidIndexRange0_7:
7125 case Match_InvalidIndexRange0_3:
7126 case Match_InvalidIndexRange0_1:
7127 case Match_InvalidSVEIndexRange0_63:
7128 case Match_InvalidSVEIndexRange0_31:
7129 case Match_InvalidSVEIndexRange0_15:
7130 case Match_InvalidSVEIndexRange0_7:
7131 case Match_InvalidSVEIndexRange0_3:
7132 case Match_InvalidLabel:
7133 case Match_InvalidComplexRotationEven:
7134 case Match_InvalidComplexRotationOdd:
7135 case Match_InvalidGPR64shifted8:
7136 case Match_InvalidGPR64shifted16:
7137 case Match_InvalidGPR64shifted32:
7138 case Match_InvalidGPR64shifted64:
7139 case Match_InvalidGPR64shifted128:
7140 case Match_InvalidGPR64NoXZRshifted8:
7141 case Match_InvalidGPR64NoXZRshifted16:
7142 case Match_InvalidGPR64NoXZRshifted32:
7143 case Match_InvalidGPR64NoXZRshifted64:
7144 case Match_InvalidGPR64NoXZRshifted128:
7145 case Match_InvalidZPR32UXTW8:
7146 case Match_InvalidZPR32UXTW16:
7147 case Match_InvalidZPR32UXTW32:
7148 case Match_InvalidZPR32UXTW64:
7149 case Match_InvalidZPR32SXTW8:
7150 case Match_InvalidZPR32SXTW16:
7151 case Match_InvalidZPR32SXTW32:
7152 case Match_InvalidZPR32SXTW64:
7153 case Match_InvalidZPR64UXTW8:
7154 case Match_InvalidZPR64SXTW8:
7155 case Match_InvalidZPR64UXTW16:
7156 case Match_InvalidZPR64SXTW16:
7157 case Match_InvalidZPR64UXTW32:
7158 case Match_InvalidZPR64SXTW32:
7159 case Match_InvalidZPR64UXTW64:
7160 case Match_InvalidZPR64SXTW64:
7161 case Match_InvalidZPR32LSL8:
7162 case Match_InvalidZPR32LSL16:
7163 case Match_InvalidZPR32LSL32:
7164 case Match_InvalidZPR32LSL64:
7165 case Match_InvalidZPR64LSL8:
7166 case Match_InvalidZPR64LSL16:
7167 case Match_InvalidZPR64LSL32:
7168 case Match_InvalidZPR64LSL64:
7169 case Match_InvalidZPR0:
7170 case Match_InvalidZPR8:
7171 case Match_InvalidZPR16:
7172 case Match_InvalidZPR32:
7173 case Match_InvalidZPR64:
7174 case Match_InvalidZPR128:
7175 case Match_InvalidZPR_3b8:
7176 case Match_InvalidZPR_3b16:
7177 case Match_InvalidZPR_3b32:
7178 case Match_InvalidZPR_4b8:
7179 case Match_InvalidZPR_4b16:
7180 case Match_InvalidZPR_4b32:
7181 case Match_InvalidZPR_4b64:
7182 case Match_InvalidSVEPPRorPNRAnyReg:
7183 case Match_InvalidSVEPPRorPNRBReg:
7184 case Match_InvalidSVEPredicateAnyReg:
7185 case Match_InvalidSVEPattern:
7186 case Match_InvalidSVEVecLenSpecifier:
7187 case Match_InvalidSVEPredicateBReg:
7188 case Match_InvalidSVEPredicateHReg:
7189 case Match_InvalidSVEPredicateSReg:
7190 case Match_InvalidSVEPredicateDReg:
7191 case Match_InvalidSVEPredicate3bAnyReg:
7192 case Match_InvalidSVEPNPredicateB_p8to15Reg:
7193 case Match_InvalidSVEPNPredicateH_p8to15Reg:
7194 case Match_InvalidSVEPNPredicateS_p8to15Reg:
7195 case Match_InvalidSVEPNPredicateD_p8to15Reg:
7196 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
7197 case Match_InvalidSVEPNPredicateBReg:
7198 case Match_InvalidSVEPNPredicateHReg:
7199 case Match_InvalidSVEPNPredicateSReg:
7200 case Match_InvalidSVEPNPredicateDReg:
7201 case Match_InvalidSVEPredicateListMul2x8:
7202 case Match_InvalidSVEPredicateListMul2x16:
7203 case Match_InvalidSVEPredicateListMul2x32:
7204 case Match_InvalidSVEPredicateListMul2x64:
7205 case Match_InvalidSVEExactFPImmOperandHalfOne:
7206 case Match_InvalidSVEExactFPImmOperandHalfTwo:
7207 case Match_InvalidSVEExactFPImmOperandZeroOne:
7208 case Match_InvalidMatrixTile16:
7209 case Match_InvalidMatrixTile32:
7210 case Match_InvalidMatrixTile64:
7211 case Match_InvalidMatrix:
7212 case Match_InvalidMatrix8:
7213 case Match_InvalidMatrix16:
7214 case Match_InvalidMatrix32:
7215 case Match_InvalidMatrix64:
7216 case Match_InvalidMatrixTileVectorH8:
7217 case Match_InvalidMatrixTileVectorH16:
7218 case Match_InvalidMatrixTileVectorH32:
7219 case Match_InvalidMatrixTileVectorH64:
7220 case Match_InvalidMatrixTileVectorH128:
7221 case Match_InvalidMatrixTileVectorV8:
7222 case Match_InvalidMatrixTileVectorV16:
7223 case Match_InvalidMatrixTileVectorV32:
7224 case Match_InvalidMatrixTileVectorV64:
7225 case Match_InvalidMatrixTileVectorV128:
7226 case Match_InvalidSVCR:
7227 case Match_InvalidMatrixIndexGPR32_12_15:
7228 case Match_InvalidMatrixIndexGPR32_8_11:
7229 case Match_InvalidLookupTable:
7230 case Match_InvalidZPRMul2_Lo8:
7231 case Match_InvalidZPRMul2_Hi8:
7232 case Match_InvalidZPRMul2_Lo16:
7233 case Match_InvalidZPRMul2_Hi16:
7234 case Match_InvalidZPRMul2_Lo32:
7235 case Match_InvalidZPRMul2_Hi32:
7236 case Match_InvalidZPRMul2_Lo64:
7237 case Match_InvalidZPRMul2_Hi64:
7238 case Match_InvalidZPR_K0:
7239 case Match_InvalidSVEVectorList2x8Mul2:
7240 case Match_InvalidSVEVectorList2x16Mul2:
7241 case Match_InvalidSVEVectorList2x32Mul2:
7242 case Match_InvalidSVEVectorList2x64Mul2:
7243 case Match_InvalidSVEVectorList2x128Mul2:
7244 case Match_InvalidSVEVectorList4x8Mul4:
7245 case Match_InvalidSVEVectorList4x16Mul4:
7246 case Match_InvalidSVEVectorList4x32Mul4:
7247 case Match_InvalidSVEVectorList4x64Mul4:
7248 case Match_InvalidSVEVectorList4x128Mul4:
7249 case Match_InvalidSVEVectorList2x8Mul2_Lo:
7250 case Match_InvalidSVEVectorList2x16Mul2_Lo:
7251 case Match_InvalidSVEVectorList2x32Mul2_Lo:
7252 case Match_InvalidSVEVectorList2x64Mul2_Lo:
7253 case Match_InvalidSVEVectorList2x8Mul2_Hi:
7254 case Match_InvalidSVEVectorList2x16Mul2_Hi:
7255 case Match_InvalidSVEVectorList2x32Mul2_Hi:
7256 case Match_InvalidSVEVectorList2x64Mul2_Hi:
7257 case Match_InvalidSVEVectorListStrided2x8:
7258 case Match_InvalidSVEVectorListStrided2x16:
7259 case Match_InvalidSVEVectorListStrided2x32:
7260 case Match_InvalidSVEVectorListStrided2x64:
7261 case Match_InvalidSVEVectorListStrided4x8:
7262 case Match_InvalidSVEVectorListStrided4x16:
7263 case Match_InvalidSVEVectorListStrided4x32:
7264 case Match_InvalidSVEVectorListStrided4x64:
7265 case Match_MSR:
7266 case Match_MRS: {
7267 if (ErrorInfo >= Operands.size())
7268 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
7269 // Any time we get here, there's nothing fancy to do. Just get the
7270 // operand SMLoc and display the diagnostic.
7271 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7272 if (ErrorLoc == SMLoc())
7273 ErrorLoc = IDLoc;
7274 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
7275 }
7276 }
7277
7278 llvm_unreachable("Implement any new match types added!");
7279}
7280
7281/// ParseDirective parses the arm specific directives
7282bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
7283 const MCContext::Environment Format = getContext().getObjectFileType();
7284 bool IsMachO = Format == MCContext::IsMachO;
7285 bool IsCOFF = Format == MCContext::IsCOFF;
7286 bool IsELF = Format == MCContext::IsELF;
7287
7288 auto IDVal = DirectiveID.getIdentifier().lower();
7289 SMLoc Loc = DirectiveID.getLoc();
7290 if (IDVal == ".arch")
7291 parseDirectiveArch(Loc);
7292 else if (IDVal == ".cpu")
7293 parseDirectiveCPU(Loc);
7294 else if (IDVal == ".tlsdesccall")
7295 parseDirectiveTLSDescCall(Loc);
7296 else if (IDVal == ".ltorg" || IDVal == ".pool")
7297 parseDirectiveLtorg(Loc);
7298 else if (IDVal == ".unreq")
7299 parseDirectiveUnreq(Loc);
7300 else if (IDVal == ".inst")
7301 parseDirectiveInst(Loc);
7302 else if (IDVal == ".cfi_negate_ra_state")
7303 parseDirectiveCFINegateRAState();
7304 else if (IDVal == ".cfi_negate_ra_state_with_pc")
7305 parseDirectiveCFINegateRAStateWithPC();
7306 else if (IDVal == ".cfi_b_key_frame")
7307 parseDirectiveCFIBKeyFrame();
7308 else if (IDVal == ".cfi_mte_tagged_frame")
7309 parseDirectiveCFIMTETaggedFrame();
7310 else if (IDVal == ".arch_extension")
7311 parseDirectiveArchExtension(Loc);
7312 else if (IDVal == ".variant_pcs")
7313 parseDirectiveVariantPCS(Loc);
7314 else if (IsMachO) {
7315 if (IDVal == MCLOHDirectiveName())
7316 parseDirectiveLOH(IDVal, Loc);
7317 else
7318 return true;
7319 } else if (IsCOFF) {
7320 if (IDVal == ".seh_stackalloc")
7321 parseDirectiveSEHAllocStack(Loc);
7322 else if (IDVal == ".seh_endprologue")
7323 parseDirectiveSEHPrologEnd(Loc);
7324 else if (IDVal == ".seh_save_r19r20_x")
7325 parseDirectiveSEHSaveR19R20X(Loc);
7326 else if (IDVal == ".seh_save_fplr")
7327 parseDirectiveSEHSaveFPLR(Loc);
7328 else if (IDVal == ".seh_save_fplr_x")
7329 parseDirectiveSEHSaveFPLRX(Loc);
7330 else if (IDVal == ".seh_save_reg")
7331 parseDirectiveSEHSaveReg(Loc);
7332 else if (IDVal == ".seh_save_reg_x")
7333 parseDirectiveSEHSaveRegX(Loc);
7334 else if (IDVal == ".seh_save_regp")
7335 parseDirectiveSEHSaveRegP(Loc);
7336 else if (IDVal == ".seh_save_regp_x")
7337 parseDirectiveSEHSaveRegPX(Loc);
7338 else if (IDVal == ".seh_save_lrpair")
7339 parseDirectiveSEHSaveLRPair(Loc);
7340 else if (IDVal == ".seh_save_freg")
7341 parseDirectiveSEHSaveFReg(Loc);
7342 else if (IDVal == ".seh_save_freg_x")
7343 parseDirectiveSEHSaveFRegX(Loc);
7344 else if (IDVal == ".seh_save_fregp")
7345 parseDirectiveSEHSaveFRegP(Loc);
7346 else if (IDVal == ".seh_save_fregp_x")
7347 parseDirectiveSEHSaveFRegPX(Loc);
7348 else if (IDVal == ".seh_set_fp")
7349 parseDirectiveSEHSetFP(Loc);
7350 else if (IDVal == ".seh_add_fp")
7351 parseDirectiveSEHAddFP(Loc);
7352 else if (IDVal == ".seh_nop")
7353 parseDirectiveSEHNop(Loc);
7354 else if (IDVal == ".seh_save_next")
7355 parseDirectiveSEHSaveNext(Loc);
7356 else if (IDVal == ".seh_startepilogue")
7357 parseDirectiveSEHEpilogStart(Loc);
7358 else if (IDVal == ".seh_endepilogue")
7359 parseDirectiveSEHEpilogEnd(Loc);
7360 else if (IDVal == ".seh_trap_frame")
7361 parseDirectiveSEHTrapFrame(Loc);
7362 else if (IDVal == ".seh_pushframe")
7363 parseDirectiveSEHMachineFrame(Loc);
7364 else if (IDVal == ".seh_context")
7365 parseDirectiveSEHContext(Loc);
7366 else if (IDVal == ".seh_ec_context")
7367 parseDirectiveSEHECContext(Loc);
7368 else if (IDVal == ".seh_clear_unwound_to_call")
7369 parseDirectiveSEHClearUnwoundToCall(Loc);
7370 else if (IDVal == ".seh_pac_sign_lr")
7371 parseDirectiveSEHPACSignLR(Loc);
7372 else if (IDVal == ".seh_save_any_reg")
7373 parseDirectiveSEHSaveAnyReg(Loc, false, false);
7374 else if (IDVal == ".seh_save_any_reg_p")
7375 parseDirectiveSEHSaveAnyReg(Loc, true, false);
7376 else if (IDVal == ".seh_save_any_reg_x")
7377 parseDirectiveSEHSaveAnyReg(Loc, false, true);
7378 else if (IDVal == ".seh_save_any_reg_px")
7379 parseDirectiveSEHSaveAnyReg(Loc, true, true);
7380 else if (IDVal == ".seh_allocz")
7381 parseDirectiveSEHAllocZ(Loc);
7382 else if (IDVal == ".seh_save_zreg")
7383 parseDirectiveSEHSaveZReg(Loc);
7384 else if (IDVal == ".seh_save_preg")
7385 parseDirectiveSEHSavePReg(Loc);
7386 else
7387 return true;
7388 } else if (IsELF) {
7389 if (IDVal == ".aeabi_subsection")
7390 parseDirectiveAeabiSubSectionHeader(Loc);
7391 else if (IDVal == ".aeabi_attribute")
7392 parseDirectiveAeabiAArch64Attr(Loc);
7393 else
7394 return true;
7395 } else
7396 return true;
7397 return false;
7398}
7399
7400static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
7401 SmallVector<StringRef, 4> &RequestedExtensions) {
7402 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
7403 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
7404
7405 if (!NoCrypto && Crypto) {
7406 // Map 'generic' (and others) to sha2 and aes, because
7407 // that was the traditional meaning of crypto.
7408 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7409 ArchInfo == AArch64::ARMV8_3A) {
7410 RequestedExtensions.push_back("sha2");
7411 RequestedExtensions.push_back("aes");
7412 }
7413 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7414 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7415 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7416 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7417 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7418 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
7419 RequestedExtensions.push_back("sm4");
7420 RequestedExtensions.push_back("sha3");
7421 RequestedExtensions.push_back("sha2");
7422 RequestedExtensions.push_back("aes");
7423 }
7424 } else if (NoCrypto) {
7425 // Map 'generic' (and others) to sha2 and aes, because
7426 // that was the traditional meaning of crypto.
7427 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7428 ArchInfo == AArch64::ARMV8_3A) {
7429 RequestedExtensions.push_back("nosha2");
7430 RequestedExtensions.push_back("noaes");
7431 }
7432 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7433 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7434 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7435 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7436 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7437 ArchInfo == AArch64::ARMV9_4A) {
7438 RequestedExtensions.push_back("nosm4");
7439 RequestedExtensions.push_back("nosha3");
7440 RequestedExtensions.push_back("nosha2");
7441 RequestedExtensions.push_back("noaes");
7442 }
7443 }
7444}
7445
7447 return SMLoc::getFromPointer(L.getPointer() + Offset);
7448}
7449
7450/// parseDirectiveArch
7451/// ::= .arch token
7452bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
7453 SMLoc CurLoc = getLoc();
7454
7455 StringRef Name = getParser().parseStringToEndOfStatement().trim();
7456 StringRef Arch, ExtensionString;
7457 std::tie(Arch, ExtensionString) = Name.split('+');
7458
7459 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
7460 if (!ArchInfo)
7461 return Error(CurLoc, "unknown arch name");
7462
7463 if (parseToken(AsmToken::EndOfStatement))
7464 return true;
7465
7466 // Get the architecture and extension features.
7467 std::vector<StringRef> AArch64Features;
7468 AArch64Features.push_back(ArchInfo->ArchFeature);
7469 AArch64::getExtensionFeatures(ArchInfo->DefaultExts, AArch64Features);
7470
7471 MCSubtargetInfo &STI = copySTI();
7472 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
7473 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
7474 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
7475
7476 SmallVector<StringRef, 4> RequestedExtensions;
7477 if (!ExtensionString.empty())
7478 ExtensionString.split(RequestedExtensions, '+');
7479
7480 ExpandCryptoAEK(*ArchInfo, RequestedExtensions);
7481 CurLoc = incrementLoc(CurLoc, Arch.size());
7482
7483 for (auto Name : RequestedExtensions) {
7484 // Advance source location past '+'.
7485 CurLoc = incrementLoc(CurLoc, 1);
7486
7487 bool EnableFeature = !Name.consume_front_insensitive("no");
7488
7489 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7490 return Extension.Name == Name;
7491 });
7492
7493 if (It == std::end(ExtensionMap))
7494 return Error(CurLoc, "unsupported architectural extension: " + Name);
7495
7496 if (EnableFeature)
7497 STI.SetFeatureBitsTransitively(It->Features);
7498 else
7499 STI.ClearFeatureBitsTransitively(It->Features);
7500 CurLoc = incrementLoc(CurLoc, Name.size());
7501 }
7502 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7503 setAvailableFeatures(Features);
7504
7505 getTargetStreamer().emitDirectiveArch(Name);
7506 return false;
7507}
7508
7509/// parseDirectiveArchExtension
7510/// ::= .arch_extension [no]feature
7511bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7512 SMLoc ExtLoc = getLoc();
7513
7514 StringRef FullName = getParser().parseStringToEndOfStatement().trim();
7515
7516 if (parseEOL())
7517 return true;
7518
7519 bool EnableFeature = true;
7520 StringRef Name = FullName;
7521 if (Name.starts_with_insensitive("no")) {
7522 EnableFeature = false;
7523 Name = Name.substr(2);
7524 }
7525
7526 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7527 return Extension.Name == Name;
7528 });
7529
7530 if (It == std::end(ExtensionMap))
7531 return Error(ExtLoc, "unsupported architectural extension: " + Name);
7532
7533 MCSubtargetInfo &STI = copySTI();
7534 if (EnableFeature)
7535 STI.SetFeatureBitsTransitively(It->Features);
7536 else
7537 STI.ClearFeatureBitsTransitively(It->Features);
7538 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7539 setAvailableFeatures(Features);
7540
7541 getTargetStreamer().emitDirectiveArchExtension(FullName);
7542 return false;
7543}
7544
7545/// parseDirectiveCPU
7546/// ::= .cpu id
7547bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7548 SMLoc CurLoc = getLoc();
7549
7550 StringRef CPU, ExtensionString;
7551 std::tie(CPU, ExtensionString) =
7552 getParser().parseStringToEndOfStatement().trim().split('+');
7553
7554 if (parseToken(AsmToken::EndOfStatement))
7555 return true;
7556
7557 SmallVector<StringRef, 4> RequestedExtensions;
7558 if (!ExtensionString.empty())
7559 ExtensionString.split(RequestedExtensions, '+');
7560
7561 const llvm::AArch64::ArchInfo *CpuArch = llvm::AArch64::getArchForCpu(CPU);
7562 if (!CpuArch) {
7563 Error(CurLoc, "unknown CPU name");
7564 return false;
7565 }
7566 ExpandCryptoAEK(*CpuArch, RequestedExtensions);
7567
7568 MCSubtargetInfo &STI = copySTI();
7569 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
7570 CurLoc = incrementLoc(CurLoc, CPU.size());
7571
7572 for (auto Name : RequestedExtensions) {
7573 // Advance source location past '+'.
7574 CurLoc = incrementLoc(CurLoc, 1);
7575
7576 bool EnableFeature = !Name.consume_front_insensitive("no");
7577
7578 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7579 return Extension.Name == Name;
7580 });
7581
7582 if (It == std::end(ExtensionMap))
7583 return Error(CurLoc, "unsupported architectural extension: " + Name);
7584
7585 if (EnableFeature)
7586 STI.SetFeatureBitsTransitively(It->Features);
7587 else
7588 STI.ClearFeatureBitsTransitively(It->Features);
7589 CurLoc = incrementLoc(CurLoc, Name.size());
7590 }
7591 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7592 setAvailableFeatures(Features);
7593 return false;
7594}
7595
7596/// parseDirectiveInst
7597/// ::= .inst opcode [, ...]
7598bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7599 if (getLexer().is(AsmToken::EndOfStatement))
7600 return Error(Loc, "expected expression following '.inst' directive");
7601
7602 auto parseOp = [&]() -> bool {
7603 SMLoc L = getLoc();
7604 const MCExpr *Expr = nullptr;
7605 if (check(getParser().parseExpression(Expr), L, "expected expression"))
7606 return true;
7607 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
7608 if (check(!Value, L, "expected constant expression"))
7609 return true;
7610 getTargetStreamer().emitInst(Value->getValue());
7611 return false;
7612 };
7613
7614 return parseMany(parseOp);
7615}
7616
7617// parseDirectiveTLSDescCall:
7618// ::= .tlsdesccall symbol
7619bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7620 StringRef Name;
7621 if (check(getParser().parseIdentifier(Name), L, "expected symbol") ||
7622 parseToken(AsmToken::EndOfStatement))
7623 return true;
7624
7625 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7626 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
7628
7629 MCInst Inst;
7630 Inst.setOpcode(AArch64::TLSDESCCALL);
7632
7633 getParser().getStreamer().emitInstruction(Inst, getSTI());
7634 return false;
7635}
7636
7637/// ::= .loh <lohName | lohId> label1, ..., labelN
7638/// The number of arguments depends on the loh identifier.
7639bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7641 if (getTok().isNot(AsmToken::Identifier)) {
7642 if (getTok().isNot(AsmToken::Integer))
7643 return TokError("expected an identifier or a number in directive");
7644 // We successfully get a numeric value for the identifier.
7645 // Check if it is valid.
7646 int64_t Id = getTok().getIntVal();
7647 if (Id <= -1U && !isValidMCLOHType(Id))
7648 return TokError("invalid numeric identifier in directive");
7649 Kind = (MCLOHType)Id;
7650 } else {
7651 StringRef Name = getTok().getIdentifier();
7652 // We successfully parse an identifier.
7653 // Check if it is a recognized one.
7654 int Id = MCLOHNameToId(Name);
7655
7656 if (Id == -1)
7657 return TokError("invalid identifier in directive");
7658 Kind = (MCLOHType)Id;
7659 }
7660 // Consume the identifier.
7661 Lex();
7662 // Get the number of arguments of this LOH.
7663 int NbArgs = MCLOHIdToNbArgs(Kind);
7664
7665 assert(NbArgs != -1 && "Invalid number of arguments");
7666
7668 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7669 StringRef Name;
7670 if (getParser().parseIdentifier(Name))
7671 return TokError("expected identifier in directive");
7672 Args.push_back(getContext().getOrCreateSymbol(Name));
7673
7674 if (Idx + 1 == NbArgs)
7675 break;
7676 if (parseComma())
7677 return true;
7678 }
7679 if (parseEOL())
7680 return true;
7681
7682 getStreamer().emitLOHDirective(Kind, Args);
7683 return false;
7684}
7685
7686/// parseDirectiveLtorg
7687/// ::= .ltorg | .pool
7688bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7689 if (parseEOL())
7690 return true;
7691 getTargetStreamer().emitCurrentConstantPool();
7692 return false;
7693}
7694
7695/// parseDirectiveReq
7696/// ::= name .req registername
7697bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7698 Lex(); // Eat the '.req' token.
7699 SMLoc SRegLoc = getLoc();
7700 RegKind RegisterKind = RegKind::Scalar;
7701 MCRegister RegNum;
7702 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7703
7704 if (!ParseRes.isSuccess()) {
7705 StringRef Kind;
7706 RegisterKind = RegKind::NeonVector;
7707 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7708
7709 if (ParseRes.isFailure())
7710 return true;
7711
7712 if (ParseRes.isSuccess() && !Kind.empty())
7713 return Error(SRegLoc, "vector register without type specifier expected");
7714 }
7715
7716 if (!ParseRes.isSuccess()) {
7717 StringRef Kind;
7718 RegisterKind = RegKind::SVEDataVector;
7719 ParseRes =
7720 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7721
7722 if (ParseRes.isFailure())
7723 return true;
7724
7725 if (ParseRes.isSuccess() && !Kind.empty())
7726 return Error(SRegLoc,
7727 "sve vector register without type specifier expected");
7728 }
7729
7730 if (!ParseRes.isSuccess()) {
7731 StringRef Kind;
7732 RegisterKind = RegKind::SVEPredicateVector;
7733 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7734
7735 if (ParseRes.isFailure())
7736 return true;
7737
7738 if (ParseRes.isSuccess() && !Kind.empty())
7739 return Error(SRegLoc,
7740 "sve predicate register without type specifier expected");
7741 }
7742
7743 if (!ParseRes.isSuccess())
7744 return Error(SRegLoc, "register name or alias expected");
7745
7746 // Shouldn't be anything else.
7747 if (parseEOL())
7748 return true;
7749
7750 auto pair = std::make_pair(RegisterKind, RegNum);
7751 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
7752 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
7753
7754 return false;
7755}
7756
7757/// parseDirectiveUneq
7758/// ::= .unreq registername
7759bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7760 if (getTok().isNot(AsmToken::Identifier))
7761 return TokError("unexpected input in .unreq directive.");
7762 RegisterReqs.erase(getTok().getIdentifier().lower());
7763 Lex(); // Eat the identifier.
7764 return parseToken(AsmToken::EndOfStatement);
7765}
7766
7767bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7768 if (parseEOL())
7769 return true;
7770 getStreamer().emitCFINegateRAState();
7771 return false;
7772}
7773
7774bool AArch64AsmParser::parseDirectiveCFINegateRAStateWithPC() {
7775 if (parseEOL())
7776 return true;
7777 getStreamer().emitCFINegateRAStateWithPC();
7778 return false;
7779}
7780
7781/// parseDirectiveCFIBKeyFrame
7782/// ::= .cfi_b_key
7783bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7784 if (parseEOL())
7785 return true;
7786 getStreamer().emitCFIBKeyFrame();
7787 return false;
7788}
7789
7790/// parseDirectiveCFIMTETaggedFrame
7791/// ::= .cfi_mte_tagged_frame
7792bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7793 if (parseEOL())
7794 return true;
7795 getStreamer().emitCFIMTETaggedFrame();
7796 return false;
7797}
7798
7799/// parseDirectiveVariantPCS
7800/// ::= .variant_pcs symbolname
7801bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7802 StringRef Name;
7803 if (getParser().parseIdentifier(Name))
7804 return TokError("expected symbol name");
7805 if (parseEOL())
7806 return true;
7807 getTargetStreamer().emitDirectiveVariantPCS(
7808 getContext().getOrCreateSymbol(Name));
7809 return false;
7810}
7811
7812/// parseDirectiveSEHAllocStack
7813/// ::= .seh_stackalloc
7814bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7815 int64_t Size;
7816 if (parseImmExpr(Size))
7817 return true;
7818 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7819 return false;
7820}
7821
7822/// parseDirectiveSEHPrologEnd
7823/// ::= .seh_endprologue
7824bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7825 getTargetStreamer().emitARM64WinCFIPrologEnd();
7826 return false;
7827}
7828
7829/// parseDirectiveSEHSaveR19R20X
7830/// ::= .seh_save_r19r20_x
7831bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7832 int64_t Offset;
7833 if (parseImmExpr(Offset))
7834 return true;
7835 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7836 return false;
7837}
7838
7839/// parseDirectiveSEHSaveFPLR
7840/// ::= .seh_save_fplr
7841bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7842 int64_t Offset;
7843 if (parseImmExpr(Offset))
7844 return true;
7845 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7846 return false;
7847}
7848
7849/// parseDirectiveSEHSaveFPLRX
7850/// ::= .seh_save_fplr_x
7851bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7852 int64_t Offset;
7853 if (parseImmExpr(Offset))
7854 return true;
7855 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7856 return false;
7857}
7858
7859/// parseDirectiveSEHSaveReg
7860/// ::= .seh_save_reg
7861bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7862 unsigned Reg;
7863 int64_t Offset;
7864 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7865 parseComma() || parseImmExpr(Offset))
7866 return true;
7867 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7868 return false;
7869}
7870
7871/// parseDirectiveSEHSaveRegX
7872/// ::= .seh_save_reg_x
7873bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7874 unsigned Reg;
7875 int64_t Offset;
7876 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7877 parseComma() || parseImmExpr(Offset))
7878 return true;
7879 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7880 return false;
7881}
7882
7883/// parseDirectiveSEHSaveRegP
7884/// ::= .seh_save_regp
7885bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7886 unsigned Reg;
7887 int64_t Offset;
7888 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7889 parseComma() || parseImmExpr(Offset))
7890 return true;
7891 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7892 return false;
7893}
7894
7895/// parseDirectiveSEHSaveRegPX
7896/// ::= .seh_save_regp_x
7897bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7898 unsigned Reg;
7899 int64_t Offset;
7900 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7901 parseComma() || parseImmExpr(Offset))
7902 return true;
7903 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7904 return false;
7905}
7906
7907/// parseDirectiveSEHSaveLRPair
7908/// ::= .seh_save_lrpair
7909bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7910 unsigned Reg;
7911 int64_t Offset;
7912 L = getLoc();
7913 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7914 parseComma() || parseImmExpr(Offset))
7915 return true;
7916 if (check(((Reg - 19) % 2 != 0), L,
7917 "expected register with even offset from x19"))
7918 return true;
7919 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7920 return false;
7921}
7922
7923/// parseDirectiveSEHSaveFReg
7924/// ::= .seh_save_freg
7925bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7926 unsigned Reg;
7927 int64_t Offset;
7928 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7929 parseComma() || parseImmExpr(Offset))
7930 return true;
7931 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7932 return false;
7933}
7934
7935/// parseDirectiveSEHSaveFRegX
7936/// ::= .seh_save_freg_x
7937bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7938 unsigned Reg;
7939 int64_t Offset;
7940 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7941 parseComma() || parseImmExpr(Offset))
7942 return true;
7943 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7944 return false;
7945}
7946
7947/// parseDirectiveSEHSaveFRegP
7948/// ::= .seh_save_fregp
7949bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7950 unsigned Reg;
7951 int64_t Offset;
7952 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7953 parseComma() || parseImmExpr(Offset))
7954 return true;
7955 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7956 return false;
7957}
7958
7959/// parseDirectiveSEHSaveFRegPX
7960/// ::= .seh_save_fregp_x
7961bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7962 unsigned Reg;
7963 int64_t Offset;
7964 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7965 parseComma() || parseImmExpr(Offset))
7966 return true;
7967 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7968 return false;
7969}
7970
7971/// parseDirectiveSEHSetFP
7972/// ::= .seh_set_fp
7973bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7974 getTargetStreamer().emitARM64WinCFISetFP();
7975 return false;
7976}
7977
7978/// parseDirectiveSEHAddFP
7979/// ::= .seh_add_fp
7980bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7981 int64_t Size;
7982 if (parseImmExpr(Size))
7983 return true;
7984 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7985 return false;
7986}
7987
7988/// parseDirectiveSEHNop
7989/// ::= .seh_nop
7990bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7991 getTargetStreamer().emitARM64WinCFINop();
7992 return false;
7993}
7994
7995/// parseDirectiveSEHSaveNext
7996/// ::= .seh_save_next
7997bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7998 getTargetStreamer().emitARM64WinCFISaveNext();
7999 return false;
8000}
8001
8002/// parseDirectiveSEHEpilogStart
8003/// ::= .seh_startepilogue
8004bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
8005 getTargetStreamer().emitARM64WinCFIEpilogStart();
8006 return false;
8007}
8008
8009/// parseDirectiveSEHEpilogEnd
8010/// ::= .seh_endepilogue
8011bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
8012 getTargetStreamer().emitARM64WinCFIEpilogEnd();
8013 return false;
8014}
8015
8016/// parseDirectiveSEHTrapFrame
8017/// ::= .seh_trap_frame
8018bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
8019 getTargetStreamer().emitARM64WinCFITrapFrame();
8020 return false;
8021}
8022
8023/// parseDirectiveSEHMachineFrame
8024/// ::= .seh_pushframe
8025bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
8026 getTargetStreamer().emitARM64WinCFIMachineFrame();
8027 return false;
8028}
8029
8030/// parseDirectiveSEHContext
8031/// ::= .seh_context
8032bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
8033 getTargetStreamer().emitARM64WinCFIContext();
8034 return false;
8035}
8036
8037/// parseDirectiveSEHECContext
8038/// ::= .seh_ec_context
8039bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
8040 getTargetStreamer().emitARM64WinCFIECContext();
8041 return false;
8042}
8043
8044/// parseDirectiveSEHClearUnwoundToCall
8045/// ::= .seh_clear_unwound_to_call
8046bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
8047 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
8048 return false;
8049}
8050
8051/// parseDirectiveSEHPACSignLR
8052/// ::= .seh_pac_sign_lr
8053bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
8054 getTargetStreamer().emitARM64WinCFIPACSignLR();
8055 return false;
8056}
8057
8058/// parseDirectiveSEHSaveAnyReg
8059/// ::= .seh_save_any_reg
8060/// ::= .seh_save_any_reg_p
8061/// ::= .seh_save_any_reg_x
8062/// ::= .seh_save_any_reg_px
8063bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
8064 bool Writeback) {
8065 MCRegister Reg;
8066 SMLoc Start, End;
8067 int64_t Offset;
8068 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") ||
8069 parseComma() || parseImmExpr(Offset))
8070 return true;
8071
8072 if (Reg == AArch64::FP || Reg == AArch64::LR ||
8073 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
8074 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8075 return Error(L, "invalid save_any_reg offset");
8076 unsigned EncodedReg;
8077 if (Reg == AArch64::FP)
8078 EncodedReg = 29;
8079 else if (Reg == AArch64::LR)
8080 EncodedReg = 30;
8081 else
8082 EncodedReg = Reg - AArch64::X0;
8083 if (Paired) {
8084 if (Reg == AArch64::LR)
8085 return Error(Start, "lr cannot be paired with another register");
8086 if (Writeback)
8087 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset);
8088 else
8089 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset);
8090 } else {
8091 if (Writeback)
8092 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset);
8093 else
8094 getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset);
8095 }
8096 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
8097 unsigned EncodedReg = Reg - AArch64::D0;
8098 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8099 return Error(L, "invalid save_any_reg offset");
8100 if (Paired) {
8101 if (Reg == AArch64::D31)
8102 return Error(Start, "d31 cannot be paired with another register");
8103 if (Writeback)
8104 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset);
8105 else
8106 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset);
8107 } else {
8108 if (Writeback)
8109 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset);
8110 else
8111 getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset);
8112 }
8113 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
8114 unsigned EncodedReg = Reg - AArch64::Q0;
8115 if (Offset < 0 || Offset % 16)
8116 return Error(L, "invalid save_any_reg offset");
8117 if (Paired) {
8118 if (Reg == AArch64::Q31)
8119 return Error(Start, "q31 cannot be paired with another register");
8120 if (Writeback)
8121 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset);
8122 else
8123 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset);
8124 } else {
8125 if (Writeback)
8126 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset);
8127 else
8128 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset);
8129 }
8130 } else {
8131 return Error(Start, "save_any_reg register must be x, q or d register");
8132 }
8133 return false;
8134}
8135
8136/// parseDirectiveAllocZ
8137/// ::= .seh_allocz
8138bool AArch64AsmParser::parseDirectiveSEHAllocZ(SMLoc L) {
8139 int64_t Offset;
8140 if (parseImmExpr(Offset))
8141 return true;
8142 getTargetStreamer().emitARM64WinCFIAllocZ(Offset);
8143 return false;
8144}
8145
8146/// parseDirectiveSEHSaveZReg
8147/// ::= .seh_save_zreg
8148bool AArch64AsmParser::parseDirectiveSEHSaveZReg(SMLoc L) {
8149 MCRegister RegNum;
8150 StringRef Kind;
8151 int64_t Offset;
8152 ParseStatus Res =
8153 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8154 if (!Res.isSuccess())
8155 return true;
8156 if (check(RegNum < AArch64::Z8 || RegNum > AArch64::Z23, L,
8157 "expected register in range z8 to z23"))
8158 return true;
8159 if (parseComma() || parseImmExpr(Offset))
8160 return true;
8161 getTargetStreamer().emitARM64WinCFISaveZReg(RegNum - AArch64::Z0, Offset);
8162 return false;
8163}
8164
8165/// parseDirectiveSEHSavePReg
8166/// ::= .seh_save_preg
8167bool AArch64AsmParser::parseDirectiveSEHSavePReg(SMLoc L) {
8168 MCRegister RegNum;
8169 StringRef Kind;
8170 int64_t Offset;
8171 ParseStatus Res =
8172 tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
8173 if (!Res.isSuccess())
8174 return true;
8175 if (check(RegNum < AArch64::P4 || RegNum > AArch64::P15, L,
8176 "expected register in range p4 to p15"))
8177 return true;
8178 if (parseComma() || parseImmExpr(Offset))
8179 return true;
8180 getTargetStreamer().emitARM64WinCFISavePReg(RegNum - AArch64::P0, Offset);
8181 return false;
8182}
8183
8184bool AArch64AsmParser::parseDirectiveAeabiSubSectionHeader(SMLoc L) {
8185 // Handle parsing of .aeabi_subsection directives
8186 // - On first declaration of a subsection, expect exactly three identifiers
8187 // after `.aeabi_subsection`: the subsection name and two parameters.
8188 // - When switching to an existing subsection, it is valid to provide only
8189 // the subsection name, or the name together with the two parameters.
8190 MCAsmParser &Parser = getParser();
8191
8192 // Consume the name (subsection name)
8193 StringRef SubsectionName;
8194 AArch64BuildAttributes::VendorID SubsectionNameID;
8195 if (Parser.getTok().is(AsmToken::Identifier)) {
8196 SubsectionName = Parser.getTok().getIdentifier();
8197 SubsectionNameID = AArch64BuildAttributes::getVendorID(SubsectionName);
8198 } else {
8199 Error(Parser.getTok().getLoc(), "subsection name not found");
8200 return true;
8201 }
8202 Parser.Lex();
8203
8204 std::unique_ptr<MCELFStreamer::AttributeSubSection> SubsectionExists =
8205 getTargetStreamer().getAttributesSubsectionByName(SubsectionName);
8206 // Check whether only the subsection name was provided.
8207 // If so, the user is trying to switch to a subsection that should have been
8208 // declared before.
8210 if (SubsectionExists) {
8211 getTargetStreamer().emitAttributesSubsection(
8212 SubsectionName,
8214 SubsectionExists->IsOptional),
8216 SubsectionExists->ParameterType));
8217 return false;
8218 }
8219 // If subsection does not exists, report error.
8220 else {
8221 Error(Parser.getTok().getLoc(),
8222 "Could not switch to subsection '" + SubsectionName +
8223 "' using subsection name, subsection has not been defined");
8224 return true;
8225 }
8226 }
8227
8228 // Otherwise, expecting 2 more parameters: consume a comma
8229 // parseComma() return *false* on success, and call Lex(), no need to call
8230 // Lex() again.
8231 if (Parser.parseComma()) {
8232 return true;
8233 }
8234
8235 // Consume the first parameter (optionality parameter)
8237 // options: optional/required
8238 if (Parser.getTok().is(AsmToken::Identifier)) {
8239 StringRef Optionality = Parser.getTok().getIdentifier();
8240 IsOptional = AArch64BuildAttributes::getOptionalID(Optionality);
8242 Error(Parser.getTok().getLoc(),
8244 return true;
8245 }
8246 if (SubsectionExists) {
8247 if (IsOptional != SubsectionExists->IsOptional) {
8248 Error(Parser.getTok().getLoc(),
8249 "optionality mismatch! subsection '" + SubsectionName +
8250 "' already exists with optionality defined as '" +
8252 SubsectionExists->IsOptional) +
8253 "' and not '" +
8254 AArch64BuildAttributes::getOptionalStr(IsOptional) + "'");
8255 return true;
8256 }
8257 }
8258 } else {
8259 Error(Parser.getTok().getLoc(),
8260 "optionality parameter not found, expected required|optional");
8261 return true;
8262 }
8263 // Check for possible IsOptional unaccepted values for known subsections
8264 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID) {
8265 if (AArch64BuildAttributes::REQUIRED == IsOptional) {
8266 Error(Parser.getTok().getLoc(),
8267 "aeabi_feature_and_bits must be marked as optional");
8268 return true;
8269 }
8270 }
8271 if (AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8272 if (AArch64BuildAttributes::OPTIONAL == IsOptional) {
8273 Error(Parser.getTok().getLoc(),
8274 "aeabi_pauthabi must be marked as required");
8275 return true;
8276 }
8277 }
8278 Parser.Lex();
8279 // consume a comma
8280 if (Parser.parseComma()) {
8281 return true;
8282 }
8283
8284 // Consume the second parameter (type parameter)
8286 if (Parser.getTok().is(AsmToken::Identifier)) {
8287 StringRef Name = Parser.getTok().getIdentifier();
8290 Error(Parser.getTok().getLoc(),
8292 return true;
8293 }
8294 if (SubsectionExists) {
8295 if (Type != SubsectionExists->ParameterType) {
8296 Error(Parser.getTok().getLoc(),
8297 "type mismatch! subsection '" + SubsectionName +
8298 "' already exists with type defined as '" +
8300 SubsectionExists->ParameterType) +
8301 "' and not '" + AArch64BuildAttributes::getTypeStr(Type) +
8302 "'");
8303 return true;
8304 }
8305 }
8306 } else {
8307 Error(Parser.getTok().getLoc(),
8308 "type parameter not found, expected uleb128|ntbs");
8309 return true;
8310 }
8311 // Check for possible unaccepted 'type' values for known subsections
8312 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID ||
8313 AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8315 Error(Parser.getTok().getLoc(),
8316 SubsectionName + " must be marked as ULEB128");
8317 return true;
8318 }
8319 }
8320 Parser.Lex();
8321
8322 // Parsing finished, check for trailing tokens.
8324 Error(Parser.getTok().getLoc(), "unexpected token for AArch64 build "
8325 "attributes subsection header directive");
8326 return true;
8327 }
8328
8329 getTargetStreamer().emitAttributesSubsection(SubsectionName, IsOptional, Type);
8330
8331 return false;
8332}
8333
8334bool AArch64AsmParser::parseDirectiveAeabiAArch64Attr(SMLoc L) {
8335 // Expecting 2 Tokens: after '.aeabi_attribute', e.g.:
8336 // .aeabi_attribute (1)Tag_Feature_BTI, (2)[uleb128|ntbs]
8337 // separated by a comma.
8338 MCAsmParser &Parser = getParser();
8339
8340 std::unique_ptr<MCELFStreamer::AttributeSubSection> ActiveSubsection =
8341 getTargetStreamer().getActiveAttributesSubsection();
8342 if (nullptr == ActiveSubsection) {
8343 Error(Parser.getTok().getLoc(),
8344 "no active subsection, build attribute can not be added");
8345 return true;
8346 }
8347 StringRef ActiveSubsectionName = ActiveSubsection->VendorName;
8348 unsigned ActiveSubsectionType = ActiveSubsection->ParameterType;
8349
8350 unsigned ActiveSubsectionID = AArch64BuildAttributes::VENDOR_UNKNOWN;
8352 AArch64BuildAttributes::AEABI_PAUTHABI) == ActiveSubsectionName)
8353 ActiveSubsectionID = AArch64BuildAttributes::AEABI_PAUTHABI;
8356 ActiveSubsectionName)
8358
8359 StringRef TagStr = "";
8360 unsigned Tag;
8361 if (Parser.getTok().is(AsmToken::Integer)) {
8362 Tag = getTok().getIntVal();
8363 } else if (Parser.getTok().is(AsmToken::Identifier)) {
8364 TagStr = Parser.getTok().getIdentifier();
8365 switch (ActiveSubsectionID) {
8367 // Tag was provided as an unrecognized string instead of an unsigned
8368 // integer
8369 Error(Parser.getTok().getLoc(), "unrecognized Tag: '" + TagStr +
8370 "' \nExcept for public subsections, "
8371 "tags have to be an unsigned int.");
8372 return true;
8373 break;
8377 Error(Parser.getTok().getLoc(), "unknown AArch64 build attribute '" +
8378 TagStr + "' for subsection '" +
8379 ActiveSubsectionName + "'");
8380 return true;
8381 }
8382 break;
8386 Error(Parser.getTok().getLoc(), "unknown AArch64 build attribute '" +
8387 TagStr + "' for subsection '" +
8388 ActiveSubsectionName + "'");
8389 return true;
8390 }
8391 break;
8392 }
8393 } else {
8394 Error(Parser.getTok().getLoc(), "AArch64 build attributes tag not found");
8395 return true;
8396 }
8397 Parser.Lex();
8398 // consume a comma
8399 // parseComma() return *false* on success, and call Lex(), no need to call
8400 // Lex() again.
8401 if (Parser.parseComma()) {
8402 return true;
8403 }
8404
8405 // Consume the second parameter (attribute value)
8406 unsigned ValueInt = unsigned(-1);
8407 std::string ValueStr = "";
8408 if (Parser.getTok().is(AsmToken::Integer)) {
8409 if (AArch64BuildAttributes::NTBS == ActiveSubsectionType) {
8410 Error(
8411 Parser.getTok().getLoc(),
8412 "active subsection type is NTBS (string), found ULEB128 (unsigned)");
8413 return true;
8414 }
8415 ValueInt = getTok().getIntVal();
8416 } else if (Parser.getTok().is(AsmToken::Identifier)) {
8417 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8418 Error(
8419 Parser.getTok().getLoc(),
8420 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8421 return true;
8422 }
8423 ValueStr = Parser.getTok().getIdentifier();
8424 } else if (Parser.getTok().is(AsmToken::String)) {
8425 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8426 Error(
8427 Parser.getTok().getLoc(),
8428 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8429 return true;
8430 }
8431 ValueStr = Parser.getTok().getString();
8432 } else {
8433 Error(Parser.getTok().getLoc(), "AArch64 build attributes value not found");
8434 return true;
8435 }
8436 // Check for possible unaccepted values for known tags
8437 // (AEABI_FEATURE_AND_BITS)
8438 if (ActiveSubsectionID == AArch64BuildAttributes::AEABI_FEATURE_AND_BITS) {
8439 if (0 != ValueInt && 1 != ValueInt) {
8440 Error(Parser.getTok().getLoc(),
8441 "unknown AArch64 build attributes Value for Tag '" + TagStr +
8442 "' options are 0|1");
8443 return true;
8444 }
8445 }
8446 Parser.Lex();
8447
8448 // Parsing finished. Check for trailing tokens.
8450 Error(Parser.getTok().getLoc(),
8451 "unexpected token for AArch64 build attributes tag and value "
8452 "attribute directive");
8453 return true;
8454 }
8455
8456 if (unsigned(-1) != ValueInt) {
8457 getTargetStreamer().emitAttribute(ActiveSubsectionName, Tag, ValueInt, "");
8458 }
8459 if ("" != ValueStr) {
8460 getTargetStreamer().emitAttribute(ActiveSubsectionName, Tag, unsigned(-1),
8461 ValueStr);
8462 }
8463 return false;
8464}
8465
8466bool AArch64AsmParser::parseDataExpr(const MCExpr *&Res) {
8467 SMLoc EndLoc;
8468
8469 if (getParser().parseExpression(Res))
8470 return true;
8471 MCAsmParser &Parser = getParser();
8472 if (!parseOptionalToken(AsmToken::At))
8473 return false;
8474 if (getLexer().getKind() != AsmToken::Identifier)
8475 return Error(getLoc(), "expected relocation specifier");
8476
8477 std::string Identifier = Parser.getTok().getIdentifier().lower();
8478 SMLoc Loc = getLoc();
8479 Lex();
8480 if (Identifier == "auth")
8481 return parseAuthExpr(Res, EndLoc);
8482
8483 auto Spec = AArch64::S_None;
8484 if (STI->getTargetTriple().isOSBinFormatMachO()) {
8485 if (Identifier == "got")
8486 Spec = AArch64::S_MACHO_GOT;
8487 } else {
8488 // Unofficial, experimental syntax that will be changed.
8489 if (Identifier == "gotpcrel")
8490 Spec = AArch64::S_GOTPCREL;
8491 else if (Identifier == "plt")
8492 Spec = AArch64::S_PLT;
8493 else if (Identifier == "funcinit")
8494 Spec = AArch64::S_FUNCINIT;
8495 }
8496 if (Spec == AArch64::S_None)
8497 return Error(Loc, "invalid relocation specifier");
8498 if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Res))
8499 Res = MCSymbolRefExpr::create(&SRE->getSymbol(), Spec, getContext(),
8500 SRE->getLoc());
8501 else
8502 return Error(Loc, "@ specifier only allowed after a symbol");
8503
8504 for (;;) {
8505 std::optional<MCBinaryExpr::Opcode> Opcode;
8506 if (parseOptionalToken(AsmToken::Plus))
8507 Opcode = MCBinaryExpr::Add;
8508 else if (parseOptionalToken(AsmToken::Minus))
8509 Opcode = MCBinaryExpr::Sub;
8510 else
8511 break;
8512 const MCExpr *Term;
8513 if (getParser().parsePrimaryExpr(Term, EndLoc, nullptr))
8514 return true;
8515 Res = MCBinaryExpr::create(*Opcode, Res, Term, getContext(), Res->getLoc());
8516 }
8517 return false;
8518}
8519
8520/// parseAuthExpr
8521/// ::= _sym@AUTH(ib,123[,addr])
8522/// ::= (_sym + 5)@AUTH(ib,123[,addr])
8523/// ::= (_sym - 5)@AUTH(ib,123[,addr])
8524bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
8525 MCAsmParser &Parser = getParser();
8526 MCContext &Ctx = getContext();
8527 AsmToken Tok = Parser.getTok();
8528
8529 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
8530 if (parseToken(AsmToken::LParen, "expected '('"))
8531 return true;
8532
8533 if (Parser.getTok().isNot(AsmToken::Identifier))
8534 return TokError("expected key name");
8535
8536 StringRef KeyStr = Parser.getTok().getIdentifier();
8537 auto KeyIDOrNone = AArch64StringToPACKeyID(KeyStr);
8538 if (!KeyIDOrNone)
8539 return TokError("invalid key '" + KeyStr + "'");
8540 Parser.Lex();
8541
8542 if (parseToken(AsmToken::Comma, "expected ','"))
8543 return true;
8544
8545 if (Parser.getTok().isNot(AsmToken::Integer))
8546 return TokError("expected integer discriminator");
8547 int64_t Discriminator = Parser.getTok().getIntVal();
8548
8549 if (!isUInt<16>(Discriminator))
8550 return TokError("integer discriminator " + Twine(Discriminator) +
8551 " out of range [0, 0xFFFF]");
8552 Parser.Lex();
8553
8554 bool UseAddressDiversity = false;
8555 if (Parser.getTok().is(AsmToken::Comma)) {
8556 Parser.Lex();
8557 if (Parser.getTok().isNot(AsmToken::Identifier) ||
8558 Parser.getTok().getIdentifier() != "addr")
8559 return TokError("expected 'addr'");
8560 UseAddressDiversity = true;
8561 Parser.Lex();
8562 }
8563
8564 EndLoc = Parser.getTok().getEndLoc();
8565 if (parseToken(AsmToken::RParen, "expected ')'"))
8566 return true;
8567
8568 Res = AArch64AuthMCExpr::create(Res, Discriminator, *KeyIDOrNone,
8569 UseAddressDiversity, Ctx, Res->getLoc());
8570 return false;
8571}
8572
8573bool AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
8574 AArch64::Specifier &ELFSpec,
8575 AArch64::Specifier &DarwinSpec,
8576 int64_t &Addend) {
8577 ELFSpec = AArch64::S_INVALID;
8578 DarwinSpec = AArch64::S_None;
8579 Addend = 0;
8580
8581 if (auto *AE = dyn_cast<MCSpecifierExpr>(Expr)) {
8582 ELFSpec = AE->getSpecifier();
8583 Expr = AE->getSubExpr();
8584 }
8585
8586 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
8587 if (SE) {
8588 // It's a simple symbol reference with no addend.
8589 DarwinSpec = AArch64::Specifier(SE->getKind());
8590 return true;
8591 }
8592
8593 // Check that it looks like a symbol + an addend
8594 MCValue Res;
8595 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr);
8596 if (!Relocatable || Res.getSubSym())
8597 return false;
8598
8599 // Treat expressions with an ELFSpec (like ":abs_g1:3", or
8600 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
8601 if (!Res.getAddSym() && ELFSpec == AArch64::S_INVALID)
8602 return false;
8603
8604 if (Res.getAddSym())
8605 DarwinSpec = AArch64::Specifier(Res.getSpecifier());
8606 Addend = Res.getConstant();
8607
8608 // It's some symbol reference + a constant addend, but really
8609 // shouldn't use both Darwin and ELF syntax.
8610 return ELFSpec == AArch64::S_INVALID || DarwinSpec == AArch64::S_None;
8611}
8612
8613/// Force static initialization.
8614extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
8622
8623#define GET_REGISTER_MATCHER
8624#define GET_SUBTARGET_FEATURE_NAME
8625#define GET_MATCHER_IMPLEMENTATION
8626#define GET_MNEMONIC_SPELL_CHECKER
8627#include "AArch64GenAsmMatcher.inc"
8628
8629// Define this matcher function after the auto-generated include so we
8630// have the match class enum definitions.
8631unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
8632 unsigned Kind) {
8633 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
8634
8635 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
8636 if (!Op.isImm())
8637 return Match_InvalidOperand;
8638 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
8639 if (!CE)
8640 return Match_InvalidOperand;
8641 if (CE->getValue() == ExpectedVal)
8642 return Match_Success;
8643 return Match_InvalidOperand;
8644 };
8645
8646 switch (Kind) {
8647 default:
8648 return Match_InvalidOperand;
8649 case MCK_MPR:
8650 // If the Kind is a token for the MPR register class which has the "za"
8651 // register (SME accumulator array), check if the asm is a literal "za"
8652 // token. This is for the "smstart za" alias that defines the register
8653 // as a literal token.
8654 if (Op.isTokenEqual("za"))
8655 return Match_Success;
8656 return Match_InvalidOperand;
8657
8658 // If the kind is a token for a literal immediate, check if our asm operand
8659 // matches. This is for InstAliases which have a fixed-value immediate in
8660 // the asm string, such as hints which are parsed into a specific
8661 // instruction definition.
8662#define MATCH_HASH(N) \
8663 case MCK__HASH_##N: \
8664 return MatchesOpImmediate(N);
8665 MATCH_HASH(0)
8666 MATCH_HASH(1)
8667 MATCH_HASH(2)
8668 MATCH_HASH(3)
8669 MATCH_HASH(4)
8670 MATCH_HASH(6)
8671 MATCH_HASH(7)
8672 MATCH_HASH(8)
8673 MATCH_HASH(10)
8674 MATCH_HASH(12)
8675 MATCH_HASH(14)
8676 MATCH_HASH(16)
8677 MATCH_HASH(24)
8678 MATCH_HASH(25)
8679 MATCH_HASH(26)
8680 MATCH_HASH(27)
8681 MATCH_HASH(28)
8682 MATCH_HASH(29)
8683 MATCH_HASH(30)
8684 MATCH_HASH(31)
8685 MATCH_HASH(32)
8686 MATCH_HASH(40)
8687 MATCH_HASH(48)
8688 MATCH_HASH(64)
8689#undef MATCH_HASH
8690#define MATCH_HASH_MINUS(N) \
8691 case MCK__HASH__MINUS_##N: \
8692 return MatchesOpImmediate(-N);
8696#undef MATCH_HASH_MINUS
8697 }
8698}
8699
8700ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
8701
8702 SMLoc S = getLoc();
8703
8704 if (getTok().isNot(AsmToken::Identifier))
8705 return Error(S, "expected register");
8706
8707 MCRegister FirstReg;
8708 ParseStatus Res = tryParseScalarRegister(FirstReg);
8709 if (!Res.isSuccess())
8710 return Error(S, "expected first even register of a consecutive same-size "
8711 "even/odd register pair");
8712
8713 const MCRegisterClass &WRegClass =
8714 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
8715 const MCRegisterClass &XRegClass =
8716 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
8717
8718 bool isXReg = XRegClass.contains(FirstReg),
8719 isWReg = WRegClass.contains(FirstReg);
8720 if (!isXReg && !isWReg)
8721 return Error(S, "expected first even register of a consecutive same-size "
8722 "even/odd register pair");
8723
8724 const MCRegisterInfo *RI = getContext().getRegisterInfo();
8725 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
8726
8727 if (FirstEncoding & 0x1)
8728 return Error(S, "expected first even register of a consecutive same-size "
8729 "even/odd register pair");
8730
8731 if (getTok().isNot(AsmToken::Comma))
8732 return Error(getLoc(), "expected comma");
8733 // Eat the comma
8734 Lex();
8735
8736 SMLoc E = getLoc();
8737 MCRegister SecondReg;
8738 Res = tryParseScalarRegister(SecondReg);
8739 if (!Res.isSuccess())
8740 return Error(E, "expected second odd register of a consecutive same-size "
8741 "even/odd register pair");
8742
8743 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
8744 (isXReg && !XRegClass.contains(SecondReg)) ||
8745 (isWReg && !WRegClass.contains(SecondReg)))
8746 return Error(E, "expected second odd register of a consecutive same-size "
8747 "even/odd register pair");
8748
8749 MCRegister Pair;
8750 if (isXReg) {
8751 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
8752 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
8753 } else {
8754 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
8755 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
8756 }
8757
8758 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
8759 getLoc(), getContext()));
8760
8761 return ParseStatus::Success;
8762}
8763
8764template <bool ParseShiftExtend, bool ParseSuffix>
8765ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
8766 const SMLoc S = getLoc();
8767 // Check for a SVE vector register specifier first.
8768 MCRegister RegNum;
8769 StringRef Kind;
8770
8771 ParseStatus Res =
8772 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8773
8774 if (!Res.isSuccess())
8775 return Res;
8776
8777 if (ParseSuffix && Kind.empty())
8778 return ParseStatus::NoMatch;
8779
8780 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
8781 if (!KindRes)
8782 return ParseStatus::NoMatch;
8783
8784 unsigned ElementWidth = KindRes->second;
8785
8786 // No shift/extend is the default.
8787 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
8788 Operands.push_back(AArch64Operand::CreateVectorReg(
8789 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
8790
8791 ParseStatus Res = tryParseVectorIndex(Operands);
8792 if (Res.isFailure())
8793 return ParseStatus::Failure;
8794 return ParseStatus::Success;
8795 }
8796
8797 // Eat the comma
8798 Lex();
8799
8800 // Match the shift
8802 Res = tryParseOptionalShiftExtend(ExtOpnd);
8803 if (!Res.isSuccess())
8804 return Res;
8805
8806 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
8807 Operands.push_back(AArch64Operand::CreateVectorReg(
8808 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
8809 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
8810 Ext->hasShiftExtendAmount()));
8811
8812 return ParseStatus::Success;
8813}
8814
8815ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
8816 MCAsmParser &Parser = getParser();
8817
8818 SMLoc SS = getLoc();
8819 const AsmToken &TokE = getTok();
8820 bool IsHash = TokE.is(AsmToken::Hash);
8821
8822 if (!IsHash && TokE.isNot(AsmToken::Identifier))
8823 return ParseStatus::NoMatch;
8824
8825 int64_t Pattern;
8826 if (IsHash) {
8827 Lex(); // Eat hash
8828
8829 // Parse the immediate operand.
8830 const MCExpr *ImmVal;
8831 SS = getLoc();
8832 if (Parser.parseExpression(ImmVal))
8833 return ParseStatus::Failure;
8834
8835 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
8836 if (!MCE)
8837 return TokError("invalid operand for instruction");
8838
8839 Pattern = MCE->getValue();
8840 } else {
8841 // Parse the pattern
8842 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
8843 if (!Pat)
8844 return ParseStatus::NoMatch;
8845
8846 Lex();
8847 Pattern = Pat->Encoding;
8848 assert(Pattern >= 0 && Pattern < 32);
8849 }
8850
8851 Operands.push_back(
8852 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8853 SS, getLoc(), getContext()));
8854
8855 return ParseStatus::Success;
8856}
8857
8858ParseStatus
8859AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8860 int64_t Pattern;
8861 SMLoc SS = getLoc();
8862 const AsmToken &TokE = getTok();
8863 // Parse the pattern
8864 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8865 TokE.getString());
8866 if (!Pat)
8867 return ParseStatus::NoMatch;
8868
8869 Lex();
8870 Pattern = Pat->Encoding;
8871 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8872
8873 Operands.push_back(
8874 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8875 SS, getLoc(), getContext()));
8876
8877 return ParseStatus::Success;
8878}
8879
8880ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8881 SMLoc SS = getLoc();
8882
8883 MCRegister XReg;
8884 if (!tryParseScalarRegister(XReg).isSuccess())
8885 return ParseStatus::NoMatch;
8886
8887 MCContext &ctx = getContext();
8888 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8889 MCRegister X8Reg = RI->getMatchingSuperReg(
8890 XReg, AArch64::x8sub_0,
8891 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8892 if (!X8Reg)
8893 return Error(SS,
8894 "expected an even-numbered x-register in the range [x0,x22]");
8895
8896 Operands.push_back(
8897 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
8898 return ParseStatus::Success;
8899}
8900
8901ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8902 SMLoc S = getLoc();
8903
8904 if (getTok().isNot(AsmToken::Integer))
8905 return ParseStatus::NoMatch;
8906
8907 if (getLexer().peekTok().isNot(AsmToken::Colon))
8908 return ParseStatus::NoMatch;
8909
8910 const MCExpr *ImmF;
8911 if (getParser().parseExpression(ImmF))
8912 return ParseStatus::NoMatch;
8913
8914 if (getTok().isNot(AsmToken::Colon))
8915 return ParseStatus::NoMatch;
8916
8917 Lex(); // Eat ':'
8918 if (getTok().isNot(AsmToken::Integer))
8919 return ParseStatus::NoMatch;
8920
8921 SMLoc E = getTok().getLoc();
8922 const MCExpr *ImmL;
8923 if (getParser().parseExpression(ImmL))
8924 return ParseStatus::NoMatch;
8925
8926 unsigned ImmFVal = cast<MCConstantExpr>(ImmF)->getValue();
8927 unsigned ImmLVal = cast<MCConstantExpr>(ImmL)->getValue();
8928
8929 Operands.push_back(
8930 AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext()));
8931 return ParseStatus::Success;
8932}
8933
8934template <int Adj>
8935ParseStatus AArch64AsmParser::tryParseAdjImm0_63(OperandVector &Operands) {
8936 SMLoc S = getLoc();
8937
8938 parseOptionalToken(AsmToken::Hash);
8939 bool IsNegative = parseOptionalToken(AsmToken::Minus);
8940
8941 if (getTok().isNot(AsmToken::Integer))
8942 return ParseStatus::NoMatch;
8943
8944 const MCExpr *Ex;
8945 if (getParser().parseExpression(Ex))
8946 return ParseStatus::NoMatch;
8947
8948 int64_t Imm = dyn_cast<MCConstantExpr>(Ex)->getValue();
8949 if (IsNegative)
8950 Imm = -Imm;
8951
8952 // We want an adjusted immediate in the range [0, 63]. If we don't have one,
8953 // return a value, which is certain to trigger a error message about invalid
8954 // immediate range instead of a non-descriptive invalid operand error.
8955 static_assert(Adj == 1 || Adj == -1, "Unsafe immediate adjustment");
8956 if (Imm == INT64_MIN || Imm == INT64_MAX || Imm + Adj < 0 || Imm + Adj > 63)
8957 Imm = -2;
8958 else
8959 Imm += Adj;
8960
8961 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
8962 Operands.push_back(AArch64Operand::CreateImm(
8964
8965 return ParseStatus::Success;
8966}
#define MATCH_HASH_MINUS(N)
static unsigned matchSVEDataVectorRegName(StringRef Name)
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind)
static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo, SmallVector< StringRef, 4 > &RequestedExtensions)
static unsigned matchSVEPredicateAsCounterRegName(StringRef Name)
static MCRegister MatchRegisterName(StringRef Name)
static bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg)
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser()
Force static initialization.
static const char * getSubtargetFeatureName(uint64_t Val)
static unsigned MatchNeonVectorRegName(StringRef Name)
}
static std::optional< std::pair< int, int > > parseVectorKind(StringRef Suffix, RegKind VectorKind)
Returns an optional pair of (elements, element-width) if Suffix is a valid vector kind.
static unsigned matchMatrixRegName(StringRef Name)
static unsigned matchMatrixTileListRegName(StringRef Name)
static std::string AArch64MnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static SMLoc incrementLoc(SMLoc L, int Offset)
#define MATCH_HASH(N)
static const struct Extension ExtensionMap[]
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
static unsigned matchSVEPredicateVectorRegName(StringRef Name)
static SDValue getCondCode(SelectionDAG &DAG, AArch64CC::CondCode CC)
Like SelectionDAG::getCondCode(), but for AArch64 condition codes.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
@ Default
Value * getPointer(Value *Ptr)
static LVOptions Options
Definition LVOptions.cpp:25
Live Register Matrix
loop data Loop Data Prefetch
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
#define T
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
APInt bitcastToAPInt() const
Definition APFloat.h:1335
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition APInt.h:436
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition APInt.h:433
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1563
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition AsmLexer.h:121
void UnLex(AsmToken const &Token)
Definition AsmLexer.h:106
LLVM_ABI SMLoc getLoc() const
Definition AsmLexer.cpp:31
int64_t getIntVal() const
Definition MCAsmMacro.h:108
bool isNot(TokenKind K) const
Definition MCAsmMacro.h:76
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition MCAsmMacro.h:103
bool is(TokenKind K) const
Definition MCAsmMacro.h:75
LLVM_ABI SMLoc getEndLoc() const
Definition AsmLexer.cpp:33
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Definition MCAsmMacro.h:92
Base class for user error types.
Definition Error.h:354
Container class for subtarget features.
constexpr size_t size() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
void printExpr(raw_ostream &, const MCExpr &) const
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
AsmLexer & getLexer()
const AsmToken & getTok() const
Get the current AsmToken from the stream.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
static LLVM_ABI const MCBinaryExpr * create(Opcode Op, const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:201
@ Sub
Subtraction.
Definition MCExpr.h:324
@ Add
Addition.
Definition MCExpr.h:302
int64_t getValue() const
Definition MCExpr.h:171
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
const MCRegisterInfo * getRegisterInfo() const
Definition MCContext.h:414
LLVM_ABI bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm) const
Try to evaluate the expression to a relocatable value, i.e.
Definition MCExpr.cpp:450
SMLoc getLoc() const
Definition MCExpr.h:86
unsigned getNumOperands() const
Definition MCInst.h:212
void setLoc(SMLoc loc)
Definition MCInst.h:207
unsigned getOpcode() const
Definition MCInst.h:202
void addOperand(const MCOperand Op)
Definition MCInst.h:215
void setOpcode(unsigned Op)
Definition MCInst.h:201
const MCOperand & getOperand(unsigned i) const
Definition MCInst.h:210
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
int64_t getImm() const
Definition MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
bool isImm() const
Definition MCInst.h:66
bool isReg() const
Definition MCInst.h:65
MCRegister getReg() const
Returns the register number.
Definition MCInst.h:73
const MCExpr * getExpr() const
Definition MCInst.h:118
bool isExpr() const
Definition MCInst.h:69
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual MCRegister getReg() const =0
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
constexpr unsigned id() const
Definition MCRegister.h:82
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:743
Streaming machine code generation interface.
Definition MCStreamer.h:220
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
MCTargetStreamer * getTargetStreamer()
Definition MCStreamer.h:324
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
FeatureBitset SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
FeatureBitset ClearFeatureBitsTransitively(const FeatureBitset &FB)
VariantKind getKind() const
Definition MCExpr.h:232
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual bool areEqualRegs(const MCParsedAsmOperand &Op1, const MCParsedAsmOperand &Op2) const
Returns whether two operands are registers and are equal.
const MCSymbol * getAddSym() const
Definition MCValue.h:49
int64_t getConstant() const
Definition MCValue.h:44
uint32_t getSpecifier() const
Definition MCValue.h:46
const MCSymbol * getSubSym() const
Definition MCValue.h:51
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
constexpr bool isNoMatch() const
constexpr unsigned id() const
Definition Register.h:100
Represents a location in source code.
Definition SMLoc.h:22
static SMLoc getFromPointer(const char *Ptr)
Definition SMLoc.h:35
constexpr const char * getPointer() const
Definition SMLoc.h:33
void insert_range(Range &&R)
Definition SmallSet.h:195
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:228
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:183
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
iterator end()
Definition StringMap.h:224
iterator find(StringRef Key)
Definition StringMap.h:237
void erase(iterator I)
Definition StringMap.h:427
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
Definition StringMap.h:321
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:702
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
Definition StringRef.h:611
LLVM_ABI std::string upper() const
Convert the given ASCII string to uppercase.
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
StringRef take_back(size_t N=1) const
Return a StringRef equal to 'this' but with only the last N elements remaining.
Definition StringRef.h:591
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition StringRef.h:816
LLVM_ABI std::string lower() const
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
Definition StringRef.h:172
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition Triple.h:798
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define INT64_MIN
Definition DataTypes.h:74
#define INT64_MAX
Definition DataTypes.h:71
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SubsectionType getTypeID(StringRef Type)
StringRef getVendorName(unsigned const Vendor)
StringRef getOptionalStr(unsigned Optional)
VendorID
AArch64 build attributes vendors IDs (a.k.a subsection name)
SubsectionOptional getOptionalID(StringRef Optional)
FeatureAndBitsTags getFeatureAndBitsTagsID(StringRef FeatureAndBitsTag)
VendorID getVendorID(StringRef const Vendor)
PauthABITags getPauthABITagsID(StringRef PauthABITag)
StringRef getTypeStr(unsigned Type)
static CondCode getInvertedCondCode(CondCode Code)
const PHint * lookupPHintByName(StringRef)
uint32_t parseGenericRegister(StringRef Name)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static bool isSVEAddSubImm(int64_t Imm)
Returns true if Imm is valid for ADD/SUB.
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static float getFPImmFloat(unsigned Imm)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
static bool isSVECpyImm(int64_t Imm)
Returns true if Imm is valid for CPY/DUP.
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
LLVM_ABI const ArchInfo * parseArch(StringRef Arch)
LLVM_ABI const ArchInfo * getArchForCpu(StringRef CPU)
LLVM_ABI bool getExtensionFeatures(const AArch64::ExtensionBitset &Extensions, std::vector< StringRef > &Features)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool isPredicated(const MCInst &MI, const MCInstrInfo *MCII)
@ Entry
Definition COFF.h:862
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
float getFPImm(unsigned Imm)
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
constexpr double e
NodeAddr< CodeNode * > Code
Definition RDFGraph.h:388
Context & getContext() const
Definition BasicBlock.h:99
This is an optimization pass for GlobalISel generic memory operations.
static std::optional< AArch64PACKey::ID > AArch64StringToPACKeyID(StringRef Name)
Return numeric key ID for 2-letter identifier string.
bool errorToBool(Error Err)
Helper for converting an Error to a bool.
Definition Error.h:1113
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
static int MCLOHNameToId(StringRef Name)
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
static bool isMem(const MachineInstr &MI, unsigned Op)
LLVM_ABI std::pair< StringRef, StringRef > getToken(StringRef Source, StringRef Delimiters=" \t\n\v\f\r")
getToken - This function extracts one token from source, ignoring any leading characters that appear ...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
Target & getTheAArch64beTarget()
static StringRef MCLOHDirectiveName()
std::string utostr(uint64_t X, bool isNeg=false)
static bool isValidMCLOHType(unsigned Kind)
Op::Description Desc
Target & getTheAArch64leTarget()
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
SmallVectorImpl< std::unique_ptr< MCParsedAsmOperand > > OperandVector
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
Target & getTheAArch64_32Target()
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
Target & getTheARM64_32Target()
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
static int MCLOHIdToNbArgs(MCLOHType Kind)
std::string join(IteratorT Begin, IteratorT End, StringRef Separator)
Joins the strings in the range [Begin, End), adding Separator between the elements.
static MCRegister getXRegFromWReg(MCRegister Reg)
MCLOHType
Linker Optimization Hint Type.
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
Target & getTheARM64Target()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static MCRegister getWRegFromXReg(MCRegister Reg)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1770
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1909
#define N
const FeatureBitset Features
const char * Name
AArch64::ExtensionBitset DefaultExts
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
bool haveFeatures(FeatureBitset ActiveFeatures) const
FeatureBitset getRequiredFeatures() const
const char * Name
FeatureBitset FeaturesRequired