LLVM 20.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
39#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSymbol.h"
43#include "llvm/MC/MCValue.h"
49#include "llvm/Support/SMLoc.h"
53#include <cassert>
54#include <cctype>
55#include <cstdint>
56#include <cstdio>
57#include <optional>
58#include <string>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65namespace {
66
67enum class RegKind {
68 Scalar,
69 NeonVector,
70 SVEDataVector,
71 SVEPredicateAsCounter,
72 SVEPredicateVector,
73 Matrix,
74 LookupTable
75};
76
77enum class MatrixKind { Array, Tile, Row, Col };
78
79enum RegConstraintEqualityTy {
80 EqualsReg,
81 EqualsSuperReg,
82 EqualsSubReg
83};
84
85class AArch64AsmParser : public MCTargetAsmParser {
86private:
87 StringRef Mnemonic; ///< Instruction mnemonic.
88
89 // Map of register aliases registers via the .req directive.
91
92 class PrefixInfo {
93 public:
94 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
95 PrefixInfo Prefix;
96 switch (Inst.getOpcode()) {
97 case AArch64::MOVPRFX_ZZ:
98 Prefix.Active = true;
99 Prefix.Dst = Inst.getOperand(0).getReg();
100 break;
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
105 Prefix.Active = true;
106 Prefix.Predicated = true;
107 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
108 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
109 "No destructive element size set for movprfx");
110 Prefix.Dst = Inst.getOperand(0).getReg();
111 Prefix.Pg = Inst.getOperand(2).getReg();
112 break;
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
117 Prefix.Active = true;
118 Prefix.Predicated = true;
119 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
120 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
121 "No destructive element size set for movprfx");
122 Prefix.Dst = Inst.getOperand(0).getReg();
123 Prefix.Pg = Inst.getOperand(1).getReg();
124 break;
125 default:
126 break;
127 }
128
129 return Prefix;
130 }
131
132 PrefixInfo() = default;
133 bool isActive() const { return Active; }
134 bool isPredicated() const { return Predicated; }
135 unsigned getElementSize() const {
136 assert(Predicated);
137 return ElementSize;
138 }
139 unsigned getDstReg() const { return Dst; }
140 unsigned getPgReg() const {
141 assert(Predicated);
142 return Pg;
143 }
144
145 private:
146 bool Active = false;
147 bool Predicated = false;
148 unsigned ElementSize;
149 unsigned Dst;
150 unsigned Pg;
151 } NextPrefix;
152
153 AArch64TargetStreamer &getTargetStreamer() {
155 return static_cast<AArch64TargetStreamer &>(TS);
156 }
157
158 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
159
160 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
161 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
163 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
164 std::string &Suggestion);
165 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
166 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
168 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
169 bool parseNeonVectorList(OperandVector &Operands);
170 bool parseOptionalMulOperand(OperandVector &Operands);
171 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
172 bool parseKeywordOperand(OperandVector &Operands);
173 bool parseOperand(OperandVector &Operands, bool isCondCode,
174 bool invertCondCode);
175 bool parseImmExpr(int64_t &Out);
176 bool parseComma();
177 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
178 unsigned Last);
179
180 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
182
183 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
184
185 bool parseDirectiveArch(SMLoc L);
186 bool parseDirectiveArchExtension(SMLoc L);
187 bool parseDirectiveCPU(SMLoc L);
188 bool parseDirectiveInst(SMLoc L);
189
190 bool parseDirectiveTLSDescCall(SMLoc L);
191
192 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
193 bool parseDirectiveLtorg(SMLoc L);
194
195 bool parseDirectiveReq(StringRef Name, SMLoc L);
196 bool parseDirectiveUnreq(SMLoc L);
197 bool parseDirectiveCFINegateRAState();
198 bool parseDirectiveCFIBKeyFrame();
199 bool parseDirectiveCFIMTETaggedFrame();
200
201 bool parseDirectiveVariantPCS(SMLoc L);
202
203 bool parseDirectiveSEHAllocStack(SMLoc L);
204 bool parseDirectiveSEHPrologEnd(SMLoc L);
205 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
206 bool parseDirectiveSEHSaveFPLR(SMLoc L);
207 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
208 bool parseDirectiveSEHSaveReg(SMLoc L);
209 bool parseDirectiveSEHSaveRegX(SMLoc L);
210 bool parseDirectiveSEHSaveRegP(SMLoc L);
211 bool parseDirectiveSEHSaveRegPX(SMLoc L);
212 bool parseDirectiveSEHSaveLRPair(SMLoc L);
213 bool parseDirectiveSEHSaveFReg(SMLoc L);
214 bool parseDirectiveSEHSaveFRegX(SMLoc L);
215 bool parseDirectiveSEHSaveFRegP(SMLoc L);
216 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
217 bool parseDirectiveSEHSetFP(SMLoc L);
218 bool parseDirectiveSEHAddFP(SMLoc L);
219 bool parseDirectiveSEHNop(SMLoc L);
220 bool parseDirectiveSEHSaveNext(SMLoc L);
221 bool parseDirectiveSEHEpilogStart(SMLoc L);
222 bool parseDirectiveSEHEpilogEnd(SMLoc L);
223 bool parseDirectiveSEHTrapFrame(SMLoc L);
224 bool parseDirectiveSEHMachineFrame(SMLoc L);
225 bool parseDirectiveSEHContext(SMLoc L);
226 bool parseDirectiveSEHECContext(SMLoc L);
227 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
228 bool parseDirectiveSEHPACSignLR(SMLoc L);
229 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
230
231 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
233 unsigned getNumRegsForRegKind(RegKind K);
234 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
237 bool MatchingInlineAsm) override;
238/// @name Auto-generated Match Functions
239/// {
240
241#define GET_ASSEMBLER_HEADER
242#include "AArch64GenAsmMatcher.inc"
243
244 /// }
245
246 ParseStatus tryParseScalarRegister(MCRegister &Reg);
247 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
248 RegKind MatchKind);
249 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
250 ParseStatus tryParseSVCR(OperandVector &Operands);
251 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
252 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
253 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
254 ParseStatus tryParseSysReg(OperandVector &Operands);
255 ParseStatus tryParseSysCROperand(OperandVector &Operands);
256 template <bool IsSVEPrefetch = false>
257 ParseStatus tryParsePrefetch(OperandVector &Operands);
258 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
259 ParseStatus tryParsePSBHint(OperandVector &Operands);
260 ParseStatus tryParseBTIHint(OperandVector &Operands);
261 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
262 ParseStatus tryParseAdrLabel(OperandVector &Operands);
263 template <bool AddFPZeroAsLiteral>
264 ParseStatus tryParseFPImm(OperandVector &Operands);
265 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
266 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
267 bool tryParseNeonVectorRegister(OperandVector &Operands);
268 ParseStatus tryParseVectorIndex(OperandVector &Operands);
269 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
270 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
271 template <bool ParseShiftExtend,
272 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
273 ParseStatus tryParseGPROperand(OperandVector &Operands);
274 ParseStatus tryParseZTOperand(OperandVector &Operands);
275 template <bool ParseShiftExtend, bool ParseSuffix>
276 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
277 template <RegKind RK>
278 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
280 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
281 template <RegKind VectorKind>
282 ParseStatus tryParseVectorList(OperandVector &Operands,
283 bool ExpectMatch = false);
284 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
285 ParseStatus tryParseSVEPattern(OperandVector &Operands);
286 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
287 ParseStatus tryParseGPR64x8(OperandVector &Operands);
288 ParseStatus tryParseImmRange(OperandVector &Operands);
289
290public:
291 enum AArch64MatchResultTy {
292 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
293#define GET_OPERAND_DIAGNOSTIC_TYPES
294#include "AArch64GenAsmMatcher.inc"
295 };
296 bool IsILP32;
297 bool IsWindowsArm64EC;
298
299 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
300 const MCInstrInfo &MII, const MCTargetOptions &Options)
301 : MCTargetAsmParser(Options, STI, MII) {
303 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
306 if (S.getTargetStreamer() == nullptr)
308
309 // Alias .hword/.word/.[dx]word to the target-independent
310 // .2byte/.4byte/.8byte directives as they have the same form and
311 // semantics:
312 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
313 Parser.addAliasForDirective(".hword", ".2byte");
314 Parser.addAliasForDirective(".word", ".4byte");
315 Parser.addAliasForDirective(".dword", ".8byte");
316 Parser.addAliasForDirective(".xword", ".8byte");
317
318 // Initialize the set of available features.
319 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
320 }
321
322 bool areEqualRegs(const MCParsedAsmOperand &Op1,
323 const MCParsedAsmOperand &Op2) const override;
325 SMLoc NameLoc, OperandVector &Operands) override;
326 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
328 SMLoc &EndLoc) override;
329 bool ParseDirective(AsmToken DirectiveID) override;
331 unsigned Kind) override;
332
333 bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
334
335 static bool classifySymbolRef(const MCExpr *Expr,
336 AArch64MCExpr::VariantKind &ELFRefKind,
337 MCSymbolRefExpr::VariantKind &DarwinRefKind,
338 int64_t &Addend);
339};
340
341/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
342/// instruction.
343class AArch64Operand : public MCParsedAsmOperand {
344private:
345 enum KindTy {
346 k_Immediate,
347 k_ShiftedImm,
348 k_ImmRange,
349 k_CondCode,
350 k_Register,
351 k_MatrixRegister,
352 k_MatrixTileList,
353 k_SVCR,
354 k_VectorList,
355 k_VectorIndex,
356 k_Token,
357 k_SysReg,
358 k_SysCR,
359 k_Prefetch,
360 k_ShiftExtend,
361 k_FPImm,
362 k_Barrier,
363 k_PSBHint,
364 k_BTIHint,
365 } Kind;
366
367 SMLoc StartLoc, EndLoc;
368
369 struct TokOp {
370 const char *Data;
371 unsigned Length;
372 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
373 };
374
375 // Separate shift/extend operand.
376 struct ShiftExtendOp {
378 unsigned Amount;
379 bool HasExplicitAmount;
380 };
381
382 struct RegOp {
383 unsigned RegNum;
384 RegKind Kind;
385 int ElementWidth;
386
387 // The register may be allowed as a different register class,
388 // e.g. for GPR64as32 or GPR32as64.
389 RegConstraintEqualityTy EqualityTy;
390
391 // In some cases the shift/extend needs to be explicitly parsed together
392 // with the register, rather than as a separate operand. This is needed
393 // for addressing modes where the instruction as a whole dictates the
394 // scaling/extend, rather than specific bits in the instruction.
395 // By parsing them as a single operand, we avoid the need to pass an
396 // extra operand in all CodeGen patterns (because all operands need to
397 // have an associated value), and we avoid the need to update TableGen to
398 // accept operands that have no associated bits in the instruction.
399 //
400 // An added benefit of parsing them together is that the assembler
401 // can give a sensible diagnostic if the scaling is not correct.
402 //
403 // The default is 'lsl #0' (HasExplicitAmount = false) if no
404 // ShiftExtend is specified.
405 ShiftExtendOp ShiftExtend;
406 };
407
408 struct MatrixRegOp {
409 unsigned RegNum;
410 unsigned ElementWidth;
411 MatrixKind Kind;
412 };
413
414 struct MatrixTileListOp {
415 unsigned RegMask = 0;
416 };
417
418 struct VectorListOp {
419 unsigned RegNum;
420 unsigned Count;
421 unsigned Stride;
422 unsigned NumElements;
423 unsigned ElementWidth;
424 RegKind RegisterKind;
425 };
426
427 struct VectorIndexOp {
428 int Val;
429 };
430
431 struct ImmOp {
432 const MCExpr *Val;
433 };
434
435 struct ShiftedImmOp {
436 const MCExpr *Val;
437 unsigned ShiftAmount;
438 };
439
440 struct ImmRangeOp {
441 unsigned First;
442 unsigned Last;
443 };
444
445 struct CondCodeOp {
447 };
448
449 struct FPImmOp {
450 uint64_t Val; // APFloat value bitcasted to uint64_t.
451 bool IsExact; // describes whether parsed value was exact.
452 };
453
454 struct BarrierOp {
455 const char *Data;
456 unsigned Length;
457 unsigned Val; // Not the enum since not all values have names.
458 bool HasnXSModifier;
459 };
460
461 struct SysRegOp {
462 const char *Data;
463 unsigned Length;
464 uint32_t MRSReg;
465 uint32_t MSRReg;
466 uint32_t PStateField;
467 };
468
469 struct SysCRImmOp {
470 unsigned Val;
471 };
472
473 struct PrefetchOp {
474 const char *Data;
475 unsigned Length;
476 unsigned Val;
477 };
478
479 struct PSBHintOp {
480 const char *Data;
481 unsigned Length;
482 unsigned Val;
483 };
484
485 struct BTIHintOp {
486 const char *Data;
487 unsigned Length;
488 unsigned Val;
489 };
490
491 struct SVCROp {
492 const char *Data;
493 unsigned Length;
494 unsigned PStateField;
495 };
496
497 union {
498 struct TokOp Tok;
499 struct RegOp Reg;
500 struct MatrixRegOp MatrixReg;
501 struct MatrixTileListOp MatrixTileList;
502 struct VectorListOp VectorList;
503 struct VectorIndexOp VectorIndex;
504 struct ImmOp Imm;
505 struct ShiftedImmOp ShiftedImm;
506 struct ImmRangeOp ImmRange;
507 struct CondCodeOp CondCode;
508 struct FPImmOp FPImm;
509 struct BarrierOp Barrier;
510 struct SysRegOp SysReg;
511 struct SysCRImmOp SysCRImm;
512 struct PrefetchOp Prefetch;
513 struct PSBHintOp PSBHint;
514 struct BTIHintOp BTIHint;
515 struct ShiftExtendOp ShiftExtend;
516 struct SVCROp SVCR;
517 };
518
519 // Keep the MCContext around as the MCExprs may need manipulated during
520 // the add<>Operands() calls.
521 MCContext &Ctx;
522
523public:
524 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
525
526 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
527 Kind = o.Kind;
528 StartLoc = o.StartLoc;
529 EndLoc = o.EndLoc;
530 switch (Kind) {
531 case k_Token:
532 Tok = o.Tok;
533 break;
534 case k_Immediate:
535 Imm = o.Imm;
536 break;
537 case k_ShiftedImm:
538 ShiftedImm = o.ShiftedImm;
539 break;
540 case k_ImmRange:
541 ImmRange = o.ImmRange;
542 break;
543 case k_CondCode:
544 CondCode = o.CondCode;
545 break;
546 case k_FPImm:
547 FPImm = o.FPImm;
548 break;
549 case k_Barrier:
550 Barrier = o.Barrier;
551 break;
552 case k_Register:
553 Reg = o.Reg;
554 break;
555 case k_MatrixRegister:
556 MatrixReg = o.MatrixReg;
557 break;
558 case k_MatrixTileList:
559 MatrixTileList = o.MatrixTileList;
560 break;
561 case k_VectorList:
562 VectorList = o.VectorList;
563 break;
564 case k_VectorIndex:
565 VectorIndex = o.VectorIndex;
566 break;
567 case k_SysReg:
568 SysReg = o.SysReg;
569 break;
570 case k_SysCR:
571 SysCRImm = o.SysCRImm;
572 break;
573 case k_Prefetch:
574 Prefetch = o.Prefetch;
575 break;
576 case k_PSBHint:
577 PSBHint = o.PSBHint;
578 break;
579 case k_BTIHint:
580 BTIHint = o.BTIHint;
581 break;
582 case k_ShiftExtend:
583 ShiftExtend = o.ShiftExtend;
584 break;
585 case k_SVCR:
586 SVCR = o.SVCR;
587 break;
588 }
589 }
590
591 /// getStartLoc - Get the location of the first token of this operand.
592 SMLoc getStartLoc() const override { return StartLoc; }
593 /// getEndLoc - Get the location of the last token of this operand.
594 SMLoc getEndLoc() const override { return EndLoc; }
595
596 StringRef getToken() const {
597 assert(Kind == k_Token && "Invalid access!");
598 return StringRef(Tok.Data, Tok.Length);
599 }
600
601 bool isTokenSuffix() const {
602 assert(Kind == k_Token && "Invalid access!");
603 return Tok.IsSuffix;
604 }
605
606 const MCExpr *getImm() const {
607 assert(Kind == k_Immediate && "Invalid access!");
608 return Imm.Val;
609 }
610
611 const MCExpr *getShiftedImmVal() const {
612 assert(Kind == k_ShiftedImm && "Invalid access!");
613 return ShiftedImm.Val;
614 }
615
616 unsigned getShiftedImmShift() const {
617 assert(Kind == k_ShiftedImm && "Invalid access!");
618 return ShiftedImm.ShiftAmount;
619 }
620
621 unsigned getFirstImmVal() const {
622 assert(Kind == k_ImmRange && "Invalid access!");
623 return ImmRange.First;
624 }
625
626 unsigned getLastImmVal() const {
627 assert(Kind == k_ImmRange && "Invalid access!");
628 return ImmRange.Last;
629 }
630
632 assert(Kind == k_CondCode && "Invalid access!");
633 return CondCode.Code;
634 }
635
636 APFloat getFPImm() const {
637 assert (Kind == k_FPImm && "Invalid access!");
638 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
639 }
640
641 bool getFPImmIsExact() const {
642 assert (Kind == k_FPImm && "Invalid access!");
643 return FPImm.IsExact;
644 }
645
646 unsigned getBarrier() const {
647 assert(Kind == k_Barrier && "Invalid access!");
648 return Barrier.Val;
649 }
650
651 StringRef getBarrierName() const {
652 assert(Kind == k_Barrier && "Invalid access!");
653 return StringRef(Barrier.Data, Barrier.Length);
654 }
655
656 bool getBarriernXSModifier() const {
657 assert(Kind == k_Barrier && "Invalid access!");
658 return Barrier.HasnXSModifier;
659 }
660
661 MCRegister getReg() const override {
662 assert(Kind == k_Register && "Invalid access!");
663 return Reg.RegNum;
664 }
665
666 unsigned getMatrixReg() const {
667 assert(Kind == k_MatrixRegister && "Invalid access!");
668 return MatrixReg.RegNum;
669 }
670
671 unsigned getMatrixElementWidth() const {
672 assert(Kind == k_MatrixRegister && "Invalid access!");
673 return MatrixReg.ElementWidth;
674 }
675
676 MatrixKind getMatrixKind() const {
677 assert(Kind == k_MatrixRegister && "Invalid access!");
678 return MatrixReg.Kind;
679 }
680
681 unsigned getMatrixTileListRegMask() const {
682 assert(isMatrixTileList() && "Invalid access!");
683 return MatrixTileList.RegMask;
684 }
685
686 RegConstraintEqualityTy getRegEqualityTy() const {
687 assert(Kind == k_Register && "Invalid access!");
688 return Reg.EqualityTy;
689 }
690
691 unsigned getVectorListStart() const {
692 assert(Kind == k_VectorList && "Invalid access!");
693 return VectorList.RegNum;
694 }
695
696 unsigned getVectorListCount() const {
697 assert(Kind == k_VectorList && "Invalid access!");
698 return VectorList.Count;
699 }
700
701 unsigned getVectorListStride() const {
702 assert(Kind == k_VectorList && "Invalid access!");
703 return VectorList.Stride;
704 }
705
706 int getVectorIndex() const {
707 assert(Kind == k_VectorIndex && "Invalid access!");
708 return VectorIndex.Val;
709 }
710
711 StringRef getSysReg() const {
712 assert(Kind == k_SysReg && "Invalid access!");
713 return StringRef(SysReg.Data, SysReg.Length);
714 }
715
716 unsigned getSysCR() const {
717 assert(Kind == k_SysCR && "Invalid access!");
718 return SysCRImm.Val;
719 }
720
721 unsigned getPrefetch() const {
722 assert(Kind == k_Prefetch && "Invalid access!");
723 return Prefetch.Val;
724 }
725
726 unsigned getPSBHint() const {
727 assert(Kind == k_PSBHint && "Invalid access!");
728 return PSBHint.Val;
729 }
730
731 StringRef getPSBHintName() const {
732 assert(Kind == k_PSBHint && "Invalid access!");
733 return StringRef(PSBHint.Data, PSBHint.Length);
734 }
735
736 unsigned getBTIHint() const {
737 assert(Kind == k_BTIHint && "Invalid access!");
738 return BTIHint.Val;
739 }
740
741 StringRef getBTIHintName() const {
742 assert(Kind == k_BTIHint && "Invalid access!");
743 return StringRef(BTIHint.Data, BTIHint.Length);
744 }
745
746 StringRef getSVCR() const {
747 assert(Kind == k_SVCR && "Invalid access!");
748 return StringRef(SVCR.Data, SVCR.Length);
749 }
750
751 StringRef getPrefetchName() const {
752 assert(Kind == k_Prefetch && "Invalid access!");
753 return StringRef(Prefetch.Data, Prefetch.Length);
754 }
755
756 AArch64_AM::ShiftExtendType getShiftExtendType() const {
757 if (Kind == k_ShiftExtend)
758 return ShiftExtend.Type;
759 if (Kind == k_Register)
760 return Reg.ShiftExtend.Type;
761 llvm_unreachable("Invalid access!");
762 }
763
764 unsigned getShiftExtendAmount() const {
765 if (Kind == k_ShiftExtend)
766 return ShiftExtend.Amount;
767 if (Kind == k_Register)
768 return Reg.ShiftExtend.Amount;
769 llvm_unreachable("Invalid access!");
770 }
771
772 bool hasShiftExtendAmount() const {
773 if (Kind == k_ShiftExtend)
774 return ShiftExtend.HasExplicitAmount;
775 if (Kind == k_Register)
776 return Reg.ShiftExtend.HasExplicitAmount;
777 llvm_unreachable("Invalid access!");
778 }
779
780 bool isImm() const override { return Kind == k_Immediate; }
781 bool isMem() const override { return false; }
782
783 bool isUImm6() const {
784 if (!isImm())
785 return false;
786 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
787 if (!MCE)
788 return false;
789 int64_t Val = MCE->getValue();
790 return (Val >= 0 && Val < 64);
791 }
792
793 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
794
795 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
796 return isImmScaled<Bits, Scale>(true);
797 }
798
799 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
800 DiagnosticPredicate isUImmScaled() const {
801 if (IsRange && isImmRange() &&
802 (getLastImmVal() != getFirstImmVal() + Offset))
803 return DiagnosticPredicateTy::NoMatch;
804
805 return isImmScaled<Bits, Scale, IsRange>(false);
806 }
807
808 template <int Bits, int Scale, bool IsRange = false>
809 DiagnosticPredicate isImmScaled(bool Signed) const {
810 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
811 (isImmRange() && !IsRange))
812 return DiagnosticPredicateTy::NoMatch;
813
814 int64_t Val;
815 if (isImmRange())
816 Val = getFirstImmVal();
817 else {
818 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
819 if (!MCE)
820 return DiagnosticPredicateTy::NoMatch;
821 Val = MCE->getValue();
822 }
823
824 int64_t MinVal, MaxVal;
825 if (Signed) {
826 int64_t Shift = Bits - 1;
827 MinVal = (int64_t(1) << Shift) * -Scale;
828 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
829 } else {
830 MinVal = 0;
831 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
832 }
833
834 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
835 return DiagnosticPredicateTy::Match;
836
837 return DiagnosticPredicateTy::NearMatch;
838 }
839
840 DiagnosticPredicate isSVEPattern() const {
841 if (!isImm())
842 return DiagnosticPredicateTy::NoMatch;
843 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
844 if (!MCE)
845 return DiagnosticPredicateTy::NoMatch;
846 int64_t Val = MCE->getValue();
847 if (Val >= 0 && Val < 32)
848 return DiagnosticPredicateTy::Match;
849 return DiagnosticPredicateTy::NearMatch;
850 }
851
852 DiagnosticPredicate isSVEVecLenSpecifier() const {
853 if (!isImm())
854 return DiagnosticPredicateTy::NoMatch;
855 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
856 if (!MCE)
857 return DiagnosticPredicateTy::NoMatch;
858 int64_t Val = MCE->getValue();
859 if (Val >= 0 && Val <= 1)
860 return DiagnosticPredicateTy::Match;
861 return DiagnosticPredicateTy::NearMatch;
862 }
863
864 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
866 MCSymbolRefExpr::VariantKind DarwinRefKind;
867 int64_t Addend;
868 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
869 Addend)) {
870 // If we don't understand the expression, assume the best and
871 // let the fixup and relocation code deal with it.
872 return true;
873 }
874
875 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
876 ELFRefKind == AArch64MCExpr::VK_LO12 ||
877 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
878 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
879 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
880 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
881 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
883 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
884 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
885 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
886 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
887 // Note that we don't range-check the addend. It's adjusted modulo page
888 // size when converted, so there is no "out of range" condition when using
889 // @pageoff.
890 return true;
891 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
892 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
893 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
894 return Addend == 0;
895 }
896
897 return false;
898 }
899
900 template <int Scale> bool isUImm12Offset() const {
901 if (!isImm())
902 return false;
903
904 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
905 if (!MCE)
906 return isSymbolicUImm12Offset(getImm());
907
908 int64_t Val = MCE->getValue();
909 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
910 }
911
912 template <int N, int M>
913 bool isImmInRange() const {
914 if (!isImm())
915 return false;
916 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
917 if (!MCE)
918 return false;
919 int64_t Val = MCE->getValue();
920 return (Val >= N && Val <= M);
921 }
922
923 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
924 // a logical immediate can always be represented when inverted.
925 template <typename T>
926 bool isLogicalImm() const {
927 if (!isImm())
928 return false;
929 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
930 if (!MCE)
931 return false;
932
933 int64_t Val = MCE->getValue();
934 // Avoid left shift by 64 directly.
935 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
936 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
937 if ((Val & Upper) && (Val & Upper) != Upper)
938 return false;
939
940 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
941 }
942
943 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
944
945 bool isImmRange() const { return Kind == k_ImmRange; }
946
947 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
948 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
949 /// immediate that can be shifted by 'Shift'.
950 template <unsigned Width>
951 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
952 if (isShiftedImm() && Width == getShiftedImmShift())
953 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
954 return std::make_pair(CE->getValue(), Width);
955
956 if (isImm())
957 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
958 int64_t Val = CE->getValue();
959 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
960 return std::make_pair(Val >> Width, Width);
961 else
962 return std::make_pair(Val, 0u);
963 }
964
965 return {};
966 }
967
968 bool isAddSubImm() const {
969 if (!isShiftedImm() && !isImm())
970 return false;
971
972 const MCExpr *Expr;
973
974 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
975 if (isShiftedImm()) {
976 unsigned Shift = ShiftedImm.ShiftAmount;
977 Expr = ShiftedImm.Val;
978 if (Shift != 0 && Shift != 12)
979 return false;
980 } else {
981 Expr = getImm();
982 }
983
985 MCSymbolRefExpr::VariantKind DarwinRefKind;
986 int64_t Addend;
987 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
988 DarwinRefKind, Addend)) {
989 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
990 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
991 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
992 || ELFRefKind == AArch64MCExpr::VK_LO12
993 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
994 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
995 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
996 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
997 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
998 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
999 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
1000 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
1001 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
1002 }
1003
1004 // If it's a constant, it should be a real immediate in range.
1005 if (auto ShiftedVal = getShiftedVal<12>())
1006 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1007
1008 // If it's an expression, we hope for the best and let the fixup/relocation
1009 // code deal with it.
1010 return true;
1011 }
1012
1013 bool isAddSubImmNeg() const {
1014 if (!isShiftedImm() && !isImm())
1015 return false;
1016
1017 // Otherwise it should be a real negative immediate in range.
1018 if (auto ShiftedVal = getShiftedVal<12>())
1019 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1020
1021 return false;
1022 }
1023
1024 // Signed value in the range -128 to +127. For element widths of
1025 // 16 bits or higher it may also be a signed multiple of 256 in the
1026 // range -32768 to +32512.
1027 // For element-width of 8 bits a range of -128 to 255 is accepted,
1028 // since a copy of a byte can be either signed/unsigned.
1029 template <typename T>
1030 DiagnosticPredicate isSVECpyImm() const {
1031 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1032 return DiagnosticPredicateTy::NoMatch;
1033
1034 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1035 std::is_same<int8_t, T>::value;
1036 if (auto ShiftedImm = getShiftedVal<8>())
1037 if (!(IsByte && ShiftedImm->second) &&
1038 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1039 << ShiftedImm->second))
1040 return DiagnosticPredicateTy::Match;
1041
1042 return DiagnosticPredicateTy::NearMatch;
1043 }
1044
1045 // Unsigned value in the range 0 to 255. For element widths of
1046 // 16 bits or higher it may also be a signed multiple of 256 in the
1047 // range 0 to 65280.
1048 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1049 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1050 return DiagnosticPredicateTy::NoMatch;
1051
1052 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1053 std::is_same<int8_t, T>::value;
1054 if (auto ShiftedImm = getShiftedVal<8>())
1055 if (!(IsByte && ShiftedImm->second) &&
1056 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1057 << ShiftedImm->second))
1058 return DiagnosticPredicateTy::Match;
1059
1060 return DiagnosticPredicateTy::NearMatch;
1061 }
1062
1063 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1064 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1065 return DiagnosticPredicateTy::Match;
1066 return DiagnosticPredicateTy::NoMatch;
1067 }
1068
1069 bool isCondCode() const { return Kind == k_CondCode; }
1070
1071 bool isSIMDImmType10() const {
1072 if (!isImm())
1073 return false;
1074 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1075 if (!MCE)
1076 return false;
1078 }
1079
1080 template<int N>
1081 bool isBranchTarget() const {
1082 if (!isImm())
1083 return false;
1084 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1085 if (!MCE)
1086 return true;
1087 int64_t Val = MCE->getValue();
1088 if (Val & 0x3)
1089 return false;
1090 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1091 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1092 }
1093
1094 bool
1095 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1096 if (!isImm())
1097 return false;
1098
1099 AArch64MCExpr::VariantKind ELFRefKind;
1100 MCSymbolRefExpr::VariantKind DarwinRefKind;
1101 int64_t Addend;
1102 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1103 DarwinRefKind, Addend)) {
1104 return false;
1105 }
1106 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1107 return false;
1108
1109 return llvm::is_contained(AllowedModifiers, ELFRefKind);
1110 }
1111
1112 bool isMovWSymbolG3() const {
1114 }
1115
1116 bool isMovWSymbolG2() const {
1117 return isMovWSymbol(
1122 }
1123
1124 bool isMovWSymbolG1() const {
1125 return isMovWSymbol(
1131 }
1132
1133 bool isMovWSymbolG0() const {
1134 return isMovWSymbol(
1140 }
1141
1142 template<int RegWidth, int Shift>
1143 bool isMOVZMovAlias() const {
1144 if (!isImm()) return false;
1145
1146 const MCExpr *E = getImm();
1147 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1148 uint64_t Value = CE->getValue();
1149
1150 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1151 }
1152 // Only supports the case of Shift being 0 if an expression is used as an
1153 // operand
1154 return !Shift && E;
1155 }
1156
1157 template<int RegWidth, int Shift>
1158 bool isMOVNMovAlias() const {
1159 if (!isImm()) return false;
1160
1161 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1162 if (!CE) return false;
1163 uint64_t Value = CE->getValue();
1164
1165 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1166 }
1167
1168 bool isFPImm() const {
1169 return Kind == k_FPImm &&
1170 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1171 }
1172
1173 bool isBarrier() const {
1174 return Kind == k_Barrier && !getBarriernXSModifier();
1175 }
1176 bool isBarriernXS() const {
1177 return Kind == k_Barrier && getBarriernXSModifier();
1178 }
1179 bool isSysReg() const { return Kind == k_SysReg; }
1180
1181 bool isMRSSystemRegister() const {
1182 if (!isSysReg()) return false;
1183
1184 return SysReg.MRSReg != -1U;
1185 }
1186
1187 bool isMSRSystemRegister() const {
1188 if (!isSysReg()) return false;
1189 return SysReg.MSRReg != -1U;
1190 }
1191
1192 bool isSystemPStateFieldWithImm0_1() const {
1193 if (!isSysReg()) return false;
1194 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1195 }
1196
1197 bool isSystemPStateFieldWithImm0_15() const {
1198 if (!isSysReg())
1199 return false;
1200 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1201 }
1202
1203 bool isSVCR() const {
1204 if (Kind != k_SVCR)
1205 return false;
1206 return SVCR.PStateField != -1U;
1207 }
1208
1209 bool isReg() const override {
1210 return Kind == k_Register;
1211 }
1212
1213 bool isVectorList() const { return Kind == k_VectorList; }
1214
1215 bool isScalarReg() const {
1216 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1217 }
1218
1219 bool isNeonVectorReg() const {
1220 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1221 }
1222
1223 bool isNeonVectorRegLo() const {
1224 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1225 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1226 Reg.RegNum) ||
1227 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1228 Reg.RegNum));
1229 }
1230
1231 bool isNeonVectorReg0to7() const {
1232 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1233 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1234 Reg.RegNum));
1235 }
1236
1237 bool isMatrix() const { return Kind == k_MatrixRegister; }
1238 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1239
1240 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1241 RegKind RK;
1242 switch (Class) {
1243 case AArch64::PPRRegClassID:
1244 case AArch64::PPR_3bRegClassID:
1245 case AArch64::PPR_p8to15RegClassID:
1246 case AArch64::PNRRegClassID:
1247 case AArch64::PNR_p8to15RegClassID:
1248 case AArch64::PPRorPNRRegClassID:
1249 RK = RegKind::SVEPredicateAsCounter;
1250 break;
1251 default:
1252 llvm_unreachable("Unsupport register class");
1253 }
1254
1255 return (Kind == k_Register && Reg.Kind == RK) &&
1256 AArch64MCRegisterClasses[Class].contains(getReg());
1257 }
1258
1259 template <unsigned Class> bool isSVEVectorReg() const {
1260 RegKind RK;
1261 switch (Class) {
1262 case AArch64::ZPRRegClassID:
1263 case AArch64::ZPR_3bRegClassID:
1264 case AArch64::ZPR_4bRegClassID:
1265 RK = RegKind::SVEDataVector;
1266 break;
1267 case AArch64::PPRRegClassID:
1268 case AArch64::PPR_3bRegClassID:
1269 case AArch64::PPR_p8to15RegClassID:
1270 case AArch64::PNRRegClassID:
1271 case AArch64::PNR_p8to15RegClassID:
1272 case AArch64::PPRorPNRRegClassID:
1273 RK = RegKind::SVEPredicateVector;
1274 break;
1275 default:
1276 llvm_unreachable("Unsupport register class");
1277 }
1278
1279 return (Kind == k_Register && Reg.Kind == RK) &&
1280 AArch64MCRegisterClasses[Class].contains(getReg());
1281 }
1282
1283 template <unsigned Class> bool isFPRasZPR() const {
1284 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1285 AArch64MCRegisterClasses[Class].contains(getReg());
1286 }
1287
1288 template <int ElementWidth, unsigned Class>
1289 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1290 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1291 return DiagnosticPredicateTy::NoMatch;
1292
1293 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1294 return DiagnosticPredicateTy::Match;
1295
1296 return DiagnosticPredicateTy::NearMatch;
1297 }
1298
1299 template <int ElementWidth, unsigned Class>
1300 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1301 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1302 Reg.Kind != RegKind::SVEPredicateVector))
1303 return DiagnosticPredicateTy::NoMatch;
1304
1305 if ((isSVEPredicateAsCounterReg<Class>() ||
1306 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1307 Reg.ElementWidth == ElementWidth)
1308 return DiagnosticPredicateTy::Match;
1309
1310 return DiagnosticPredicateTy::NearMatch;
1311 }
1312
1313 template <int ElementWidth, unsigned Class>
1314 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1315 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1316 return DiagnosticPredicateTy::NoMatch;
1317
1318 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1319 return DiagnosticPredicateTy::Match;
1320
1321 return DiagnosticPredicateTy::NearMatch;
1322 }
1323
1324 template <int ElementWidth, unsigned Class>
1325 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1326 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1327 return DiagnosticPredicateTy::NoMatch;
1328
1329 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1330 return DiagnosticPredicateTy::Match;
1331
1332 return DiagnosticPredicateTy::NearMatch;
1333 }
1334
1335 template <int ElementWidth, unsigned Class,
1336 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1337 bool ShiftWidthAlwaysSame>
1338 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1339 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1340 if (!VectorMatch.isMatch())
1341 return DiagnosticPredicateTy::NoMatch;
1342
1343 // Give a more specific diagnostic when the user has explicitly typed in
1344 // a shift-amount that does not match what is expected, but for which
1345 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1346 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1347 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1348 ShiftExtendTy == AArch64_AM::SXTW) &&
1349 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1350 return DiagnosticPredicateTy::NoMatch;
1351
1352 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1353 return DiagnosticPredicateTy::Match;
1354
1355 return DiagnosticPredicateTy::NearMatch;
1356 }
1357
1358 bool isGPR32as64() const {
1359 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1360 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1361 }
1362
1363 bool isGPR64as32() const {
1364 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1365 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1366 }
1367
1368 bool isGPR64x8() const {
1369 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1370 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1371 Reg.RegNum);
1372 }
1373
1374 bool isWSeqPair() const {
1375 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1376 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1377 Reg.RegNum);
1378 }
1379
1380 bool isXSeqPair() const {
1381 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1382 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1383 Reg.RegNum);
1384 }
1385
1386 bool isSyspXzrPair() const {
1387 return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1388 }
1389
1390 template<int64_t Angle, int64_t Remainder>
1391 DiagnosticPredicate isComplexRotation() const {
1392 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1393
1394 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1395 if (!CE) return DiagnosticPredicateTy::NoMatch;
1396 uint64_t Value = CE->getValue();
1397
1398 if (Value % Angle == Remainder && Value <= 270)
1399 return DiagnosticPredicateTy::Match;
1400 return DiagnosticPredicateTy::NearMatch;
1401 }
1402
1403 template <unsigned RegClassID> bool isGPR64() const {
1404 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1405 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1406 }
1407
1408 template <unsigned RegClassID, int ExtWidth>
1409 DiagnosticPredicate isGPR64WithShiftExtend() const {
1410 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1411 return DiagnosticPredicateTy::NoMatch;
1412
1413 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1414 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1415 return DiagnosticPredicateTy::Match;
1416 return DiagnosticPredicateTy::NearMatch;
1417 }
1418
1419 /// Is this a vector list with the type implicit (presumably attached to the
1420 /// instruction itself)?
1421 template <RegKind VectorKind, unsigned NumRegs>
1422 bool isImplicitlyTypedVectorList() const {
1423 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1424 VectorList.NumElements == 0 &&
1425 VectorList.RegisterKind == VectorKind;
1426 }
1427
1428 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1429 unsigned ElementWidth, unsigned Stride = 1>
1430 bool isTypedVectorList() const {
1431 if (Kind != k_VectorList)
1432 return false;
1433 if (VectorList.Count != NumRegs)
1434 return false;
1435 if (VectorList.RegisterKind != VectorKind)
1436 return false;
1437 if (VectorList.ElementWidth != ElementWidth)
1438 return false;
1439 if (VectorList.Stride != Stride)
1440 return false;
1441 return VectorList.NumElements == NumElements;
1442 }
1443
1444 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1445 unsigned ElementWidth>
1446 DiagnosticPredicate isTypedVectorListMultiple() const {
1447 bool Res =
1448 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1449 if (!Res)
1450 return DiagnosticPredicateTy::NoMatch;
1451 if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0)
1452 return DiagnosticPredicateTy::NearMatch;
1453 return DiagnosticPredicateTy::Match;
1454 }
1455
1456 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1457 unsigned ElementWidth>
1458 DiagnosticPredicate isTypedVectorListStrided() const {
1459 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1460 ElementWidth, Stride>();
1461 if (!Res)
1462 return DiagnosticPredicateTy::NoMatch;
1463 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1464 ((VectorList.RegNum >= AArch64::Z16) &&
1465 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1466 return DiagnosticPredicateTy::Match;
1467 return DiagnosticPredicateTy::NoMatch;
1468 }
1469
1470 template <int Min, int Max>
1471 DiagnosticPredicate isVectorIndex() const {
1472 if (Kind != k_VectorIndex)
1473 return DiagnosticPredicateTy::NoMatch;
1474 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1475 return DiagnosticPredicateTy::Match;
1476 return DiagnosticPredicateTy::NearMatch;
1477 }
1478
1479 bool isToken() const override { return Kind == k_Token; }
1480
1481 bool isTokenEqual(StringRef Str) const {
1482 return Kind == k_Token && getToken() == Str;
1483 }
1484 bool isSysCR() const { return Kind == k_SysCR; }
1485 bool isPrefetch() const { return Kind == k_Prefetch; }
1486 bool isPSBHint() const { return Kind == k_PSBHint; }
1487 bool isBTIHint() const { return Kind == k_BTIHint; }
1488 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1489 bool isShifter() const {
1490 if (!isShiftExtend())
1491 return false;
1492
1493 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1494 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1495 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1496 ST == AArch64_AM::MSL);
1497 }
1498
1499 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1500 if (Kind != k_FPImm)
1501 return DiagnosticPredicateTy::NoMatch;
1502
1503 if (getFPImmIsExact()) {
1504 // Lookup the immediate from table of supported immediates.
1505 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1506 assert(Desc && "Unknown enum value");
1507
1508 // Calculate its FP value.
1509 APFloat RealVal(APFloat::IEEEdouble());
1510 auto StatusOrErr =
1511 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1512 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1513 llvm_unreachable("FP immediate is not exact");
1514
1515 if (getFPImm().bitwiseIsEqual(RealVal))
1516 return DiagnosticPredicateTy::Match;
1517 }
1518
1519 return DiagnosticPredicateTy::NearMatch;
1520 }
1521
1522 template <unsigned ImmA, unsigned ImmB>
1523 DiagnosticPredicate isExactFPImm() const {
1524 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1525 if ((Res = isExactFPImm<ImmA>()))
1526 return DiagnosticPredicateTy::Match;
1527 if ((Res = isExactFPImm<ImmB>()))
1528 return DiagnosticPredicateTy::Match;
1529 return Res;
1530 }
1531
1532 bool isExtend() const {
1533 if (!isShiftExtend())
1534 return false;
1535
1536 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1537 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1538 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1539 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1540 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1541 ET == AArch64_AM::LSL) &&
1542 getShiftExtendAmount() <= 4;
1543 }
1544
1545 bool isExtend64() const {
1546 if (!isExtend())
1547 return false;
1548 // Make sure the extend expects a 32-bit source register.
1549 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1550 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1551 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1552 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1553 }
1554
1555 bool isExtendLSL64() const {
1556 if (!isExtend())
1557 return false;
1558 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1559 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1560 ET == AArch64_AM::LSL) &&
1561 getShiftExtendAmount() <= 4;
1562 }
1563
1564 bool isLSLImm3Shift() const {
1565 if (!isShiftExtend())
1566 return false;
1567 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1568 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1569 }
1570
1571 template<int Width> bool isMemXExtend() const {
1572 if (!isExtend())
1573 return false;
1574 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1575 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1576 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1577 getShiftExtendAmount() == 0);
1578 }
1579
1580 template<int Width> bool isMemWExtend() const {
1581 if (!isExtend())
1582 return false;
1583 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1584 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1585 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1586 getShiftExtendAmount() == 0);
1587 }
1588
1589 template <unsigned width>
1590 bool isArithmeticShifter() const {
1591 if (!isShifter())
1592 return false;
1593
1594 // An arithmetic shifter is LSL, LSR, or ASR.
1595 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1596 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1597 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1598 }
1599
1600 template <unsigned width>
1601 bool isLogicalShifter() const {
1602 if (!isShifter())
1603 return false;
1604
1605 // A logical shifter is LSL, LSR, ASR or ROR.
1606 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1607 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1608 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1609 getShiftExtendAmount() < width;
1610 }
1611
1612 bool isMovImm32Shifter() const {
1613 if (!isShifter())
1614 return false;
1615
1616 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1617 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1618 if (ST != AArch64_AM::LSL)
1619 return false;
1620 uint64_t Val = getShiftExtendAmount();
1621 return (Val == 0 || Val == 16);
1622 }
1623
1624 bool isMovImm64Shifter() const {
1625 if (!isShifter())
1626 return false;
1627
1628 // A MOVi shifter is LSL of 0 or 16.
1629 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1630 if (ST != AArch64_AM::LSL)
1631 return false;
1632 uint64_t Val = getShiftExtendAmount();
1633 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1634 }
1635
1636 bool isLogicalVecShifter() const {
1637 if (!isShifter())
1638 return false;
1639
1640 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1641 unsigned Shift = getShiftExtendAmount();
1642 return getShiftExtendType() == AArch64_AM::LSL &&
1643 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1644 }
1645
1646 bool isLogicalVecHalfWordShifter() const {
1647 if (!isLogicalVecShifter())
1648 return false;
1649
1650 // A logical vector shifter is a left shift by 0 or 8.
1651 unsigned Shift = getShiftExtendAmount();
1652 return getShiftExtendType() == AArch64_AM::LSL &&
1653 (Shift == 0 || Shift == 8);
1654 }
1655
1656 bool isMoveVecShifter() const {
1657 if (!isShiftExtend())
1658 return false;
1659
1660 // A logical vector shifter is a left shift by 8 or 16.
1661 unsigned Shift = getShiftExtendAmount();
1662 return getShiftExtendType() == AArch64_AM::MSL &&
1663 (Shift == 8 || Shift == 16);
1664 }
1665
1666 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1667 // to LDUR/STUR when the offset is not legal for the former but is for
1668 // the latter. As such, in addition to checking for being a legal unscaled
1669 // address, also check that it is not a legal scaled address. This avoids
1670 // ambiguity in the matcher.
1671 template<int Width>
1672 bool isSImm9OffsetFB() const {
1673 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1674 }
1675
1676 bool isAdrpLabel() const {
1677 // Validation was handled during parsing, so we just verify that
1678 // something didn't go haywire.
1679 if (!isImm())
1680 return false;
1681
1682 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1683 int64_t Val = CE->getValue();
1684 int64_t Min = - (4096 * (1LL << (21 - 1)));
1685 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1686 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1687 }
1688
1689 return true;
1690 }
1691
1692 bool isAdrLabel() const {
1693 // Validation was handled during parsing, so we just verify that
1694 // something didn't go haywire.
1695 if (!isImm())
1696 return false;
1697
1698 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1699 int64_t Val = CE->getValue();
1700 int64_t Min = - (1LL << (21 - 1));
1701 int64_t Max = ((1LL << (21 - 1)) - 1);
1702 return Val >= Min && Val <= Max;
1703 }
1704
1705 return true;
1706 }
1707
1708 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1709 DiagnosticPredicate isMatrixRegOperand() const {
1710 if (!isMatrix())
1711 return DiagnosticPredicateTy::NoMatch;
1712 if (getMatrixKind() != Kind ||
1713 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1714 EltSize != getMatrixElementWidth())
1715 return DiagnosticPredicateTy::NearMatch;
1716 return DiagnosticPredicateTy::Match;
1717 }
1718
1719 bool isPAuthPCRelLabel16Operand() const {
1720 // PAuth PCRel16 operands are similar to regular branch targets, but only
1721 // negative values are allowed for concrete immediates as signing instr
1722 // should be in a lower address.
1723 if (!isImm())
1724 return false;
1725 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1726 if (!MCE)
1727 return true;
1728 int64_t Val = MCE->getValue();
1729 if (Val & 0b11)
1730 return false;
1731 return (Val <= 0) && (Val > -(1 << 18));
1732 }
1733
1734 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1735 // Add as immediates when possible. Null MCExpr = 0.
1736 if (!Expr)
1738 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1739 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1740 else
1742 }
1743
1744 void addRegOperands(MCInst &Inst, unsigned N) const {
1745 assert(N == 1 && "Invalid number of operands!");
1747 }
1748
1749 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1750 assert(N == 1 && "Invalid number of operands!");
1751 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1752 }
1753
1754 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1755 assert(N == 1 && "Invalid number of operands!");
1756 assert(
1757 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1758
1759 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1760 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1761 RI->getEncodingValue(getReg()));
1762
1764 }
1765
1766 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1767 assert(N == 1 && "Invalid number of operands!");
1768 assert(
1769 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1770
1771 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1772 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1773 RI->getEncodingValue(getReg()));
1774
1776 }
1777
1778 template <int Width>
1779 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1780 unsigned Base;
1781 switch (Width) {
1782 case 8: Base = AArch64::B0; break;
1783 case 16: Base = AArch64::H0; break;
1784 case 32: Base = AArch64::S0; break;
1785 case 64: Base = AArch64::D0; break;
1786 case 128: Base = AArch64::Q0; break;
1787 default:
1788 llvm_unreachable("Unsupported width");
1789 }
1790 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1791 }
1792
1793 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1794 assert(N == 1 && "Invalid number of operands!");
1795 unsigned Reg = getReg();
1796 // Normalise to PPR
1797 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1798 Reg = Reg - AArch64::PN0 + AArch64::P0;
1800 }
1801
1802 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1803 assert(N == 1 && "Invalid number of operands!");
1804 Inst.addOperand(
1805 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1806 }
1807
1808 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1809 assert(N == 1 && "Invalid number of operands!");
1810 assert(
1811 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1812 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1813 }
1814
1815 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1816 assert(N == 1 && "Invalid number of operands!");
1817 assert(
1818 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1820 }
1821
1822 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1823 assert(N == 1 && "Invalid number of operands!");
1825 }
1826
1827 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1828 assert(N == 1 && "Invalid number of operands!");
1830 }
1831
1832 enum VecListIndexType {
1833 VecListIdx_DReg = 0,
1834 VecListIdx_QReg = 1,
1835 VecListIdx_ZReg = 2,
1836 VecListIdx_PReg = 3,
1837 };
1838
1839 template <VecListIndexType RegTy, unsigned NumRegs>
1840 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1841 assert(N == 1 && "Invalid number of operands!");
1842 static const unsigned FirstRegs[][5] = {
1843 /* DReg */ { AArch64::Q0,
1844 AArch64::D0, AArch64::D0_D1,
1845 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1846 /* QReg */ { AArch64::Q0,
1847 AArch64::Q0, AArch64::Q0_Q1,
1848 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1849 /* ZReg */ { AArch64::Z0,
1850 AArch64::Z0, AArch64::Z0_Z1,
1851 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1852 /* PReg */ { AArch64::P0,
1853 AArch64::P0, AArch64::P0_P1 }
1854 };
1855
1856 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1857 " NumRegs must be <= 4 for ZRegs");
1858
1859 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1860 " NumRegs must be <= 2 for PRegs");
1861
1862 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1863 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1864 FirstRegs[(unsigned)RegTy][0]));
1865 }
1866
1867 template <unsigned NumRegs>
1868 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1869 assert(N == 1 && "Invalid number of operands!");
1870 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1871
1872 switch (NumRegs) {
1873 case 2:
1874 if (getVectorListStart() < AArch64::Z16) {
1875 assert((getVectorListStart() < AArch64::Z8) &&
1876 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1878 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1879 } else {
1880 assert((getVectorListStart() < AArch64::Z24) &&
1881 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1883 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1884 }
1885 break;
1886 case 4:
1887 if (getVectorListStart() < AArch64::Z16) {
1888 assert((getVectorListStart() < AArch64::Z4) &&
1889 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1891 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1892 } else {
1893 assert((getVectorListStart() < AArch64::Z20) &&
1894 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1896 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1897 }
1898 break;
1899 default:
1900 llvm_unreachable("Unsupported number of registers for strided vec list");
1901 }
1902 }
1903
1904 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1905 assert(N == 1 && "Invalid number of operands!");
1906 unsigned RegMask = getMatrixTileListRegMask();
1907 assert(RegMask <= 0xFF && "Invalid mask!");
1908 Inst.addOperand(MCOperand::createImm(RegMask));
1909 }
1910
1911 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1912 assert(N == 1 && "Invalid number of operands!");
1913 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1914 }
1915
1916 template <unsigned ImmIs0, unsigned ImmIs1>
1917 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1918 assert(N == 1 && "Invalid number of operands!");
1919 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1920 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1921 }
1922
1923 void addImmOperands(MCInst &Inst, unsigned N) const {
1924 assert(N == 1 && "Invalid number of operands!");
1925 // If this is a pageoff symrefexpr with an addend, adjust the addend
1926 // to be only the page-offset portion. Otherwise, just add the expr
1927 // as-is.
1928 addExpr(Inst, getImm());
1929 }
1930
1931 template <int Shift>
1932 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1933 assert(N == 2 && "Invalid number of operands!");
1934 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1935 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1936 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1937 } else if (isShiftedImm()) {
1938 addExpr(Inst, getShiftedImmVal());
1939 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1940 } else {
1941 addExpr(Inst, getImm());
1943 }
1944 }
1945
1946 template <int Shift>
1947 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1948 assert(N == 2 && "Invalid number of operands!");
1949 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1950 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1951 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1952 } else
1953 llvm_unreachable("Not a shifted negative immediate");
1954 }
1955
1956 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1957 assert(N == 1 && "Invalid number of operands!");
1959 }
1960
1961 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1962 assert(N == 1 && "Invalid number of operands!");
1963 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1964 if (!MCE)
1965 addExpr(Inst, getImm());
1966 else
1967 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1968 }
1969
1970 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1971 addImmOperands(Inst, N);
1972 }
1973
1974 template<int Scale>
1975 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1976 assert(N == 1 && "Invalid number of operands!");
1977 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1978
1979 if (!MCE) {
1980 Inst.addOperand(MCOperand::createExpr(getImm()));
1981 return;
1982 }
1983 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1984 }
1985
1986 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1987 assert(N == 1 && "Invalid number of operands!");
1988 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1990 }
1991
1992 template <int Scale>
1993 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1994 assert(N == 1 && "Invalid number of operands!");
1995 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1996 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1997 }
1998
1999 template <int Scale>
2000 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2001 assert(N == 1 && "Invalid number of operands!");
2002 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
2003 }
2004
2005 template <typename T>
2006 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2007 assert(N == 1 && "Invalid number of operands!");
2008 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2009 std::make_unsigned_t<T> Val = MCE->getValue();
2010 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2011 Inst.addOperand(MCOperand::createImm(encoding));
2012 }
2013
2014 template <typename T>
2015 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2016 assert(N == 1 && "Invalid number of operands!");
2017 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2018 std::make_unsigned_t<T> Val = ~MCE->getValue();
2019 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2020 Inst.addOperand(MCOperand::createImm(encoding));
2021 }
2022
2023 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2024 assert(N == 1 && "Invalid number of operands!");
2025 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2027 Inst.addOperand(MCOperand::createImm(encoding));
2028 }
2029
2030 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2031 // Branch operands don't encode the low bits, so shift them off
2032 // here. If it's a label, however, just put it on directly as there's
2033 // not enough information now to do anything.
2034 assert(N == 1 && "Invalid number of operands!");
2035 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2036 if (!MCE) {
2037 addExpr(Inst, getImm());
2038 return;
2039 }
2040 assert(MCE && "Invalid constant immediate operand!");
2041 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2042 }
2043
2044 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2045 // PC-relative operands don't encode the low bits, so shift them off
2046 // here. If it's a label, however, just put it on directly as there's
2047 // not enough information now to do anything.
2048 assert(N == 1 && "Invalid number of operands!");
2049 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2050 if (!MCE) {
2051 addExpr(Inst, getImm());
2052 return;
2053 }
2054 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2055 }
2056
2057 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2058 // Branch operands don't encode the low bits, so shift them off
2059 // here. If it's a label, however, just put it on directly as there's
2060 // not enough information now to do anything.
2061 assert(N == 1 && "Invalid number of operands!");
2062 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2063 if (!MCE) {
2064 addExpr(Inst, getImm());
2065 return;
2066 }
2067 assert(MCE && "Invalid constant immediate operand!");
2068 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2069 }
2070
2071 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2072 // Branch operands don't encode the low bits, so shift them off
2073 // here. If it's a label, however, just put it on directly as there's
2074 // not enough information now to do anything.
2075 assert(N == 1 && "Invalid number of operands!");
2076 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2077 if (!MCE) {
2078 addExpr(Inst, getImm());
2079 return;
2080 }
2081 assert(MCE && "Invalid constant immediate operand!");
2082 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2083 }
2084
2085 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2086 assert(N == 1 && "Invalid number of operands!");
2088 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2089 }
2090
2091 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2092 assert(N == 1 && "Invalid number of operands!");
2093 Inst.addOperand(MCOperand::createImm(getBarrier()));
2094 }
2095
2096 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2097 assert(N == 1 && "Invalid number of operands!");
2098 Inst.addOperand(MCOperand::createImm(getBarrier()));
2099 }
2100
2101 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2102 assert(N == 1 && "Invalid number of operands!");
2103
2104 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2105 }
2106
2107 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2108 assert(N == 1 && "Invalid number of operands!");
2109
2110 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2111 }
2112
2113 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2114 assert(N == 1 && "Invalid number of operands!");
2115
2116 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2117 }
2118
2119 void addSVCROperands(MCInst &Inst, unsigned N) const {
2120 assert(N == 1 && "Invalid number of operands!");
2121
2122 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2123 }
2124
2125 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2126 assert(N == 1 && "Invalid number of operands!");
2127
2128 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2129 }
2130
2131 void addSysCROperands(MCInst &Inst, unsigned N) const {
2132 assert(N == 1 && "Invalid number of operands!");
2133 Inst.addOperand(MCOperand::createImm(getSysCR()));
2134 }
2135
2136 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2137 assert(N == 1 && "Invalid number of operands!");
2138 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2139 }
2140
2141 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2142 assert(N == 1 && "Invalid number of operands!");
2143 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2144 }
2145
2146 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2147 assert(N == 1 && "Invalid number of operands!");
2148 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2149 }
2150
2151 void addShifterOperands(MCInst &Inst, unsigned N) const {
2152 assert(N == 1 && "Invalid number of operands!");
2153 unsigned Imm =
2154 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2156 }
2157
2158 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2159 assert(N == 1 && "Invalid number of operands!");
2160 unsigned Imm = getShiftExtendAmount();
2162 }
2163
2164 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2165 assert(N == 1 && "Invalid number of operands!");
2166
2167 if (!isScalarReg())
2168 return;
2169
2170 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2171 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2173 if (Reg != AArch64::XZR)
2174 llvm_unreachable("wrong register");
2175
2176 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2177 }
2178
2179 void addExtendOperands(MCInst &Inst, unsigned N) const {
2180 assert(N == 1 && "Invalid number of operands!");
2181 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2182 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2183 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2185 }
2186
2187 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2188 assert(N == 1 && "Invalid number of operands!");
2189 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2190 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2191 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2193 }
2194
2195 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2196 assert(N == 2 && "Invalid number of operands!");
2197 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2198 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2199 Inst.addOperand(MCOperand::createImm(IsSigned));
2200 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2201 }
2202
2203 // For 8-bit load/store instructions with a register offset, both the
2204 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2205 // they're disambiguated by whether the shift was explicit or implicit rather
2206 // than its size.
2207 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2208 assert(N == 2 && "Invalid number of operands!");
2209 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2210 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2211 Inst.addOperand(MCOperand::createImm(IsSigned));
2212 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2213 }
2214
2215 template<int Shift>
2216 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2217 assert(N == 1 && "Invalid number of operands!");
2218
2219 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2220 if (CE) {
2221 uint64_t Value = CE->getValue();
2222 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2223 } else {
2224 addExpr(Inst, getImm());
2225 }
2226 }
2227
2228 template<int Shift>
2229 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2230 assert(N == 1 && "Invalid number of operands!");
2231
2232 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2233 uint64_t Value = CE->getValue();
2234 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2235 }
2236
2237 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2238 assert(N == 1 && "Invalid number of operands!");
2239 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2240 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2241 }
2242
2243 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2244 assert(N == 1 && "Invalid number of operands!");
2245 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2246 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2247 }
2248
2249 void print(raw_ostream &OS) const override;
2250
2251 static std::unique_ptr<AArch64Operand>
2252 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2253 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2254 Op->Tok.Data = Str.data();
2255 Op->Tok.Length = Str.size();
2256 Op->Tok.IsSuffix = IsSuffix;
2257 Op->StartLoc = S;
2258 Op->EndLoc = S;
2259 return Op;
2260 }
2261
2262 static std::unique_ptr<AArch64Operand>
2263 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2264 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2266 unsigned ShiftAmount = 0,
2267 unsigned HasExplicitAmount = false) {
2268 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2269 Op->Reg.RegNum = RegNum;
2270 Op->Reg.Kind = Kind;
2271 Op->Reg.ElementWidth = 0;
2272 Op->Reg.EqualityTy = EqTy;
2273 Op->Reg.ShiftExtend.Type = ExtTy;
2274 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2275 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2276 Op->StartLoc = S;
2277 Op->EndLoc = E;
2278 return Op;
2279 }
2280
2281 static std::unique_ptr<AArch64Operand>
2282 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2283 SMLoc S, SMLoc E, MCContext &Ctx,
2285 unsigned ShiftAmount = 0,
2286 unsigned HasExplicitAmount = false) {
2287 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2288 Kind == RegKind::SVEPredicateVector ||
2289 Kind == RegKind::SVEPredicateAsCounter) &&
2290 "Invalid vector kind");
2291 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2292 HasExplicitAmount);
2293 Op->Reg.ElementWidth = ElementWidth;
2294 return Op;
2295 }
2296
2297 static std::unique_ptr<AArch64Operand>
2298 CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2299 unsigned NumElements, unsigned ElementWidth,
2300 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2301 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2302 Op->VectorList.RegNum = RegNum;
2303 Op->VectorList.Count = Count;
2304 Op->VectorList.Stride = Stride;
2305 Op->VectorList.NumElements = NumElements;
2306 Op->VectorList.ElementWidth = ElementWidth;
2307 Op->VectorList.RegisterKind = RegisterKind;
2308 Op->StartLoc = S;
2309 Op->EndLoc = E;
2310 return Op;
2311 }
2312
2313 static std::unique_ptr<AArch64Operand>
2314 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2315 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2316 Op->VectorIndex.Val = Idx;
2317 Op->StartLoc = S;
2318 Op->EndLoc = E;
2319 return Op;
2320 }
2321
2322 static std::unique_ptr<AArch64Operand>
2323 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2324 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2325 Op->MatrixTileList.RegMask = RegMask;
2326 Op->StartLoc = S;
2327 Op->EndLoc = E;
2328 return Op;
2329 }
2330
2331 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2332 const unsigned ElementWidth) {
2333 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2334 RegMap = {
2335 {{0, AArch64::ZAB0},
2336 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2337 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2338 {{8, AArch64::ZAB0},
2339 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2340 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2341 {{16, AArch64::ZAH0},
2342 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2343 {{16, AArch64::ZAH1},
2344 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2345 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2346 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2347 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2348 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2349 };
2350
2351 if (ElementWidth == 64)
2352 OutRegs.insert(Reg);
2353 else {
2354 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2355 assert(!Regs.empty() && "Invalid tile or element width!");
2356 for (auto OutReg : Regs)
2357 OutRegs.insert(OutReg);
2358 }
2359 }
2360
2361 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2362 SMLoc E, MCContext &Ctx) {
2363 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2364 Op->Imm.Val = Val;
2365 Op->StartLoc = S;
2366 Op->EndLoc = E;
2367 return Op;
2368 }
2369
2370 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2371 unsigned ShiftAmount,
2372 SMLoc S, SMLoc E,
2373 MCContext &Ctx) {
2374 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2375 Op->ShiftedImm .Val = Val;
2376 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2377 Op->StartLoc = S;
2378 Op->EndLoc = E;
2379 return Op;
2380 }
2381
2382 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2383 unsigned Last, SMLoc S,
2384 SMLoc E,
2385 MCContext &Ctx) {
2386 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2387 Op->ImmRange.First = First;
2388 Op->ImmRange.Last = Last;
2389 Op->EndLoc = E;
2390 return Op;
2391 }
2392
2393 static std::unique_ptr<AArch64Operand>
2394 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2395 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2396 Op->CondCode.Code = Code;
2397 Op->StartLoc = S;
2398 Op->EndLoc = E;
2399 return Op;
2400 }
2401
2402 static std::unique_ptr<AArch64Operand>
2403 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2404 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2405 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2406 Op->FPImm.IsExact = IsExact;
2407 Op->StartLoc = S;
2408 Op->EndLoc = S;
2409 return Op;
2410 }
2411
2412 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2413 StringRef Str,
2414 SMLoc S,
2415 MCContext &Ctx,
2416 bool HasnXSModifier) {
2417 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2418 Op->Barrier.Val = Val;
2419 Op->Barrier.Data = Str.data();
2420 Op->Barrier.Length = Str.size();
2421 Op->Barrier.HasnXSModifier = HasnXSModifier;
2422 Op->StartLoc = S;
2423 Op->EndLoc = S;
2424 return Op;
2425 }
2426
2427 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2428 uint32_t MRSReg,
2429 uint32_t MSRReg,
2430 uint32_t PStateField,
2431 MCContext &Ctx) {
2432 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2433 Op->SysReg.Data = Str.data();
2434 Op->SysReg.Length = Str.size();
2435 Op->SysReg.MRSReg = MRSReg;
2436 Op->SysReg.MSRReg = MSRReg;
2437 Op->SysReg.PStateField = PStateField;
2438 Op->StartLoc = S;
2439 Op->EndLoc = S;
2440 return Op;
2441 }
2442
2443 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2444 SMLoc E, MCContext &Ctx) {
2445 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2446 Op->SysCRImm.Val = Val;
2447 Op->StartLoc = S;
2448 Op->EndLoc = E;
2449 return Op;
2450 }
2451
2452 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2453 StringRef Str,
2454 SMLoc S,
2455 MCContext &Ctx) {
2456 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2457 Op->Prefetch.Val = Val;
2458 Op->Barrier.Data = Str.data();
2459 Op->Barrier.Length = Str.size();
2460 Op->StartLoc = S;
2461 Op->EndLoc = S;
2462 return Op;
2463 }
2464
2465 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2466 StringRef Str,
2467 SMLoc S,
2468 MCContext &Ctx) {
2469 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2470 Op->PSBHint.Val = Val;
2471 Op->PSBHint.Data = Str.data();
2472 Op->PSBHint.Length = Str.size();
2473 Op->StartLoc = S;
2474 Op->EndLoc = S;
2475 return Op;
2476 }
2477
2478 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2479 StringRef Str,
2480 SMLoc S,
2481 MCContext &Ctx) {
2482 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2483 Op->BTIHint.Val = Val | 32;
2484 Op->BTIHint.Data = Str.data();
2485 Op->BTIHint.Length = Str.size();
2486 Op->StartLoc = S;
2487 Op->EndLoc = S;
2488 return Op;
2489 }
2490
2491 static std::unique_ptr<AArch64Operand>
2492 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2493 SMLoc S, SMLoc E, MCContext &Ctx) {
2494 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2495 Op->MatrixReg.RegNum = RegNum;
2496 Op->MatrixReg.ElementWidth = ElementWidth;
2497 Op->MatrixReg.Kind = Kind;
2498 Op->StartLoc = S;
2499 Op->EndLoc = E;
2500 return Op;
2501 }
2502
2503 static std::unique_ptr<AArch64Operand>
2504 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2505 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2506 Op->SVCR.PStateField = PStateField;
2507 Op->SVCR.Data = Str.data();
2508 Op->SVCR.Length = Str.size();
2509 Op->StartLoc = S;
2510 Op->EndLoc = S;
2511 return Op;
2512 }
2513
2514 static std::unique_ptr<AArch64Operand>
2515 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2516 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2517 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2518 Op->ShiftExtend.Type = ShOp;
2519 Op->ShiftExtend.Amount = Val;
2520 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2521 Op->StartLoc = S;
2522 Op->EndLoc = E;
2523 return Op;
2524 }
2525};
2526
2527} // end anonymous namespace.
2528
2529void AArch64Operand::print(raw_ostream &OS) const {
2530 switch (Kind) {
2531 case k_FPImm:
2532 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2533 if (!getFPImmIsExact())
2534 OS << " (inexact)";
2535 OS << ">";
2536 break;
2537 case k_Barrier: {
2538 StringRef Name = getBarrierName();
2539 if (!Name.empty())
2540 OS << "<barrier " << Name << ">";
2541 else
2542 OS << "<barrier invalid #" << getBarrier() << ">";
2543 break;
2544 }
2545 case k_Immediate:
2546 OS << *getImm();
2547 break;
2548 case k_ShiftedImm: {
2549 unsigned Shift = getShiftedImmShift();
2550 OS << "<shiftedimm ";
2551 OS << *getShiftedImmVal();
2552 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2553 break;
2554 }
2555 case k_ImmRange: {
2556 OS << "<immrange ";
2557 OS << getFirstImmVal();
2558 OS << ":" << getLastImmVal() << ">";
2559 break;
2560 }
2561 case k_CondCode:
2562 OS << "<condcode " << getCondCode() << ">";
2563 break;
2564 case k_VectorList: {
2565 OS << "<vectorlist ";
2566 unsigned Reg = getVectorListStart();
2567 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2568 OS << Reg + i * getVectorListStride() << " ";
2569 OS << ">";
2570 break;
2571 }
2572 case k_VectorIndex:
2573 OS << "<vectorindex " << getVectorIndex() << ">";
2574 break;
2575 case k_SysReg:
2576 OS << "<sysreg: " << getSysReg() << '>';
2577 break;
2578 case k_Token:
2579 OS << "'" << getToken() << "'";
2580 break;
2581 case k_SysCR:
2582 OS << "c" << getSysCR();
2583 break;
2584 case k_Prefetch: {
2585 StringRef Name = getPrefetchName();
2586 if (!Name.empty())
2587 OS << "<prfop " << Name << ">";
2588 else
2589 OS << "<prfop invalid #" << getPrefetch() << ">";
2590 break;
2591 }
2592 case k_PSBHint:
2593 OS << getPSBHintName();
2594 break;
2595 case k_BTIHint:
2596 OS << getBTIHintName();
2597 break;
2598 case k_MatrixRegister:
2599 OS << "<matrix " << getMatrixReg() << ">";
2600 break;
2601 case k_MatrixTileList: {
2602 OS << "<matrixlist ";
2603 unsigned RegMask = getMatrixTileListRegMask();
2604 unsigned MaxBits = 8;
2605 for (unsigned I = MaxBits; I > 0; --I)
2606 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2607 OS << '>';
2608 break;
2609 }
2610 case k_SVCR: {
2611 OS << getSVCR();
2612 break;
2613 }
2614 case k_Register:
2615 OS << "<register " << getReg() << ">";
2616 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2617 break;
2618 [[fallthrough]];
2619 case k_ShiftExtend:
2620 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2621 << getShiftExtendAmount();
2622 if (!hasShiftExtendAmount())
2623 OS << "<imp>";
2624 OS << '>';
2625 break;
2626 }
2627}
2628
2629/// @name Auto-generated Match Functions
2630/// {
2631
2633
2634/// }
2635
2637 return StringSwitch<unsigned>(Name.lower())
2638 .Case("v0", AArch64::Q0)
2639 .Case("v1", AArch64::Q1)
2640 .Case("v2", AArch64::Q2)
2641 .Case("v3", AArch64::Q3)
2642 .Case("v4", AArch64::Q4)
2643 .Case("v5", AArch64::Q5)
2644 .Case("v6", AArch64::Q6)
2645 .Case("v7", AArch64::Q7)
2646 .Case("v8", AArch64::Q8)
2647 .Case("v9", AArch64::Q9)
2648 .Case("v10", AArch64::Q10)
2649 .Case("v11", AArch64::Q11)
2650 .Case("v12", AArch64::Q12)
2651 .Case("v13", AArch64::Q13)
2652 .Case("v14", AArch64::Q14)
2653 .Case("v15", AArch64::Q15)
2654 .Case("v16", AArch64::Q16)
2655 .Case("v17", AArch64::Q17)
2656 .Case("v18", AArch64::Q18)
2657 .Case("v19", AArch64::Q19)
2658 .Case("v20", AArch64::Q20)
2659 .Case("v21", AArch64::Q21)
2660 .Case("v22", AArch64::Q22)
2661 .Case("v23", AArch64::Q23)
2662 .Case("v24", AArch64::Q24)
2663 .Case("v25", AArch64::Q25)
2664 .Case("v26", AArch64::Q26)
2665 .Case("v27", AArch64::Q27)
2666 .Case("v28", AArch64::Q28)
2667 .Case("v29", AArch64::Q29)
2668 .Case("v30", AArch64::Q30)
2669 .Case("v31", AArch64::Q31)
2670 .Default(0);
2671}
2672
2673/// Returns an optional pair of (#elements, element-width) if Suffix
2674/// is a valid vector kind. Where the number of elements in a vector
2675/// or the vector width is implicit or explicitly unknown (but still a
2676/// valid suffix kind), 0 is used.
2677static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2678 RegKind VectorKind) {
2679 std::pair<int, int> Res = {-1, -1};
2680
2681 switch (VectorKind) {
2682 case RegKind::NeonVector:
2684 .Case("", {0, 0})
2685 .Case(".1d", {1, 64})
2686 .Case(".1q", {1, 128})
2687 // '.2h' needed for fp16 scalar pairwise reductions
2688 .Case(".2h", {2, 16})
2689 .Case(".2b", {2, 8})
2690 .Case(".2s", {2, 32})
2691 .Case(".2d", {2, 64})
2692 // '.4b' is another special case for the ARMv8.2a dot product
2693 // operand
2694 .Case(".4b", {4, 8})
2695 .Case(".4h", {4, 16})
2696 .Case(".4s", {4, 32})
2697 .Case(".8b", {8, 8})
2698 .Case(".8h", {8, 16})
2699 .Case(".16b", {16, 8})
2700 // Accept the width neutral ones, too, for verbose syntax. If
2701 // those aren't used in the right places, the token operand won't
2702 // match so all will work out.
2703 .Case(".b", {0, 8})
2704 .Case(".h", {0, 16})
2705 .Case(".s", {0, 32})
2706 .Case(".d", {0, 64})
2707 .Default({-1, -1});
2708 break;
2709 case RegKind::SVEPredicateAsCounter:
2710 case RegKind::SVEPredicateVector:
2711 case RegKind::SVEDataVector:
2712 case RegKind::Matrix:
2714 .Case("", {0, 0})
2715 .Case(".b", {0, 8})
2716 .Case(".h", {0, 16})
2717 .Case(".s", {0, 32})
2718 .Case(".d", {0, 64})
2719 .Case(".q", {0, 128})
2720 .Default({-1, -1});
2721 break;
2722 default:
2723 llvm_unreachable("Unsupported RegKind");
2724 }
2725
2726 if (Res == std::make_pair(-1, -1))
2727 return std::nullopt;
2728
2729 return std::optional<std::pair<int, int>>(Res);
2730}
2731
2732static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2733 return parseVectorKind(Suffix, VectorKind).has_value();
2734}
2735
2737 return StringSwitch<unsigned>(Name.lower())
2738 .Case("z0", AArch64::Z0)
2739 .Case("z1", AArch64::Z1)
2740 .Case("z2", AArch64::Z2)
2741 .Case("z3", AArch64::Z3)
2742 .Case("z4", AArch64::Z4)
2743 .Case("z5", AArch64::Z5)
2744 .Case("z6", AArch64::Z6)
2745 .Case("z7", AArch64::Z7)
2746 .Case("z8", AArch64::Z8)
2747 .Case("z9", AArch64::Z9)
2748 .Case("z10", AArch64::Z10)
2749 .Case("z11", AArch64::Z11)
2750 .Case("z12", AArch64::Z12)
2751 .Case("z13", AArch64::Z13)
2752 .Case("z14", AArch64::Z14)
2753 .Case("z15", AArch64::Z15)
2754 .Case("z16", AArch64::Z16)
2755 .Case("z17", AArch64::Z17)
2756 .Case("z18", AArch64::Z18)
2757 .Case("z19", AArch64::Z19)
2758 .Case("z20", AArch64::Z20)
2759 .Case("z21", AArch64::Z21)
2760 .Case("z22", AArch64::Z22)
2761 .Case("z23", AArch64::Z23)
2762 .Case("z24", AArch64::Z24)
2763 .Case("z25", AArch64::Z25)
2764 .Case("z26", AArch64::Z26)
2765 .Case("z27", AArch64::Z27)
2766 .Case("z28", AArch64::Z28)
2767 .Case("z29", AArch64::Z29)
2768 .Case("z30", AArch64::Z30)
2769 .Case("z31", AArch64::Z31)
2770 .Default(0);
2771}
2772
2774 return StringSwitch<unsigned>(Name.lower())
2775 .Case("p0", AArch64::P0)
2776 .Case("p1", AArch64::P1)
2777 .Case("p2", AArch64::P2)
2778 .Case("p3", AArch64::P3)
2779 .Case("p4", AArch64::P4)
2780 .Case("p5", AArch64::P5)
2781 .Case("p6", AArch64::P6)
2782 .Case("p7", AArch64::P7)
2783 .Case("p8", AArch64::P8)
2784 .Case("p9", AArch64::P9)
2785 .Case("p10", AArch64::P10)
2786 .Case("p11", AArch64::P11)
2787 .Case("p12", AArch64::P12)
2788 .Case("p13", AArch64::P13)
2789 .Case("p14", AArch64::P14)
2790 .Case("p15", AArch64::P15)
2791 .Default(0);
2792}
2793
2795 return StringSwitch<unsigned>(Name.lower())
2796 .Case("pn0", AArch64::PN0)
2797 .Case("pn1", AArch64::PN1)
2798 .Case("pn2", AArch64::PN2)
2799 .Case("pn3", AArch64::PN3)
2800 .Case("pn4", AArch64::PN4)
2801 .Case("pn5", AArch64::PN5)
2802 .Case("pn6", AArch64::PN6)
2803 .Case("pn7", AArch64::PN7)
2804 .Case("pn8", AArch64::PN8)
2805 .Case("pn9", AArch64::PN9)
2806 .Case("pn10", AArch64::PN10)
2807 .Case("pn11", AArch64::PN11)
2808 .Case("pn12", AArch64::PN12)
2809 .Case("pn13", AArch64::PN13)
2810 .Case("pn14", AArch64::PN14)
2811 .Case("pn15", AArch64::PN15)
2812 .Default(0);
2813}
2814
2816 return StringSwitch<unsigned>(Name.lower())
2817 .Case("za0.d", AArch64::ZAD0)
2818 .Case("za1.d", AArch64::ZAD1)
2819 .Case("za2.d", AArch64::ZAD2)
2820 .Case("za3.d", AArch64::ZAD3)
2821 .Case("za4.d", AArch64::ZAD4)
2822 .Case("za5.d", AArch64::ZAD5)
2823 .Case("za6.d", AArch64::ZAD6)
2824 .Case("za7.d", AArch64::ZAD7)
2825 .Case("za0.s", AArch64::ZAS0)
2826 .Case("za1.s", AArch64::ZAS1)
2827 .Case("za2.s", AArch64::ZAS2)
2828 .Case("za3.s", AArch64::ZAS3)
2829 .Case("za0.h", AArch64::ZAH0)
2830 .Case("za1.h", AArch64::ZAH1)
2831 .Case("za0.b", AArch64::ZAB0)
2832 .Default(0);
2833}
2834
2836 return StringSwitch<unsigned>(Name.lower())
2837 .Case("za", AArch64::ZA)
2838 .Case("za0.q", AArch64::ZAQ0)
2839 .Case("za1.q", AArch64::ZAQ1)
2840 .Case("za2.q", AArch64::ZAQ2)
2841 .Case("za3.q", AArch64::ZAQ3)
2842 .Case("za4.q", AArch64::ZAQ4)
2843 .Case("za5.q", AArch64::ZAQ5)
2844 .Case("za6.q", AArch64::ZAQ6)
2845 .Case("za7.q", AArch64::ZAQ7)
2846 .Case("za8.q", AArch64::ZAQ8)
2847 .Case("za9.q", AArch64::ZAQ9)
2848 .Case("za10.q", AArch64::ZAQ10)
2849 .Case("za11.q", AArch64::ZAQ11)
2850 .Case("za12.q", AArch64::ZAQ12)
2851 .Case("za13.q", AArch64::ZAQ13)
2852 .Case("za14.q", AArch64::ZAQ14)
2853 .Case("za15.q", AArch64::ZAQ15)
2854 .Case("za0.d", AArch64::ZAD0)
2855 .Case("za1.d", AArch64::ZAD1)
2856 .Case("za2.d", AArch64::ZAD2)
2857 .Case("za3.d", AArch64::ZAD3)
2858 .Case("za4.d", AArch64::ZAD4)
2859 .Case("za5.d", AArch64::ZAD5)
2860 .Case("za6.d", AArch64::ZAD6)
2861 .Case("za7.d", AArch64::ZAD7)
2862 .Case("za0.s", AArch64::ZAS0)
2863 .Case("za1.s", AArch64::ZAS1)
2864 .Case("za2.s", AArch64::ZAS2)
2865 .Case("za3.s", AArch64::ZAS3)
2866 .Case("za0.h", AArch64::ZAH0)
2867 .Case("za1.h", AArch64::ZAH1)
2868 .Case("za0.b", AArch64::ZAB0)
2869 .Case("za0h.q", AArch64::ZAQ0)
2870 .Case("za1h.q", AArch64::ZAQ1)
2871 .Case("za2h.q", AArch64::ZAQ2)
2872 .Case("za3h.q", AArch64::ZAQ3)
2873 .Case("za4h.q", AArch64::ZAQ4)
2874 .Case("za5h.q", AArch64::ZAQ5)
2875 .Case("za6h.q", AArch64::ZAQ6)
2876 .Case("za7h.q", AArch64::ZAQ7)
2877 .Case("za8h.q", AArch64::ZAQ8)
2878 .Case("za9h.q", AArch64::ZAQ9)
2879 .Case("za10h.q", AArch64::ZAQ10)
2880 .Case("za11h.q", AArch64::ZAQ11)
2881 .Case("za12h.q", AArch64::ZAQ12)
2882 .Case("za13h.q", AArch64::ZAQ13)
2883 .Case("za14h.q", AArch64::ZAQ14)
2884 .Case("za15h.q", AArch64::ZAQ15)
2885 .Case("za0h.d", AArch64::ZAD0)
2886 .Case("za1h.d", AArch64::ZAD1)
2887 .Case("za2h.d", AArch64::ZAD2)
2888 .Case("za3h.d", AArch64::ZAD3)
2889 .Case("za4h.d", AArch64::ZAD4)
2890 .Case("za5h.d", AArch64::ZAD5)
2891 .Case("za6h.d", AArch64::ZAD6)
2892 .Case("za7h.d", AArch64::ZAD7)
2893 .Case("za0h.s", AArch64::ZAS0)
2894 .Case("za1h.s", AArch64::ZAS1)
2895 .Case("za2h.s", AArch64::ZAS2)
2896 .Case("za3h.s", AArch64::ZAS3)
2897 .Case("za0h.h", AArch64::ZAH0)
2898 .Case("za1h.h", AArch64::ZAH1)
2899 .Case("za0h.b", AArch64::ZAB0)
2900 .Case("za0v.q", AArch64::ZAQ0)
2901 .Case("za1v.q", AArch64::ZAQ1)
2902 .Case("za2v.q", AArch64::ZAQ2)
2903 .Case("za3v.q", AArch64::ZAQ3)
2904 .Case("za4v.q", AArch64::ZAQ4)
2905 .Case("za5v.q", AArch64::ZAQ5)
2906 .Case("za6v.q", AArch64::ZAQ6)
2907 .Case("za7v.q", AArch64::ZAQ7)
2908 .Case("za8v.q", AArch64::ZAQ8)
2909 .Case("za9v.q", AArch64::ZAQ9)
2910 .Case("za10v.q", AArch64::ZAQ10)
2911 .Case("za11v.q", AArch64::ZAQ11)
2912 .Case("za12v.q", AArch64::ZAQ12)
2913 .Case("za13v.q", AArch64::ZAQ13)
2914 .Case("za14v.q", AArch64::ZAQ14)
2915 .Case("za15v.q", AArch64::ZAQ15)
2916 .Case("za0v.d", AArch64::ZAD0)
2917 .Case("za1v.d", AArch64::ZAD1)
2918 .Case("za2v.d", AArch64::ZAD2)
2919 .Case("za3v.d", AArch64::ZAD3)
2920 .Case("za4v.d", AArch64::ZAD4)
2921 .Case("za5v.d", AArch64::ZAD5)
2922 .Case("za6v.d", AArch64::ZAD6)
2923 .Case("za7v.d", AArch64::ZAD7)
2924 .Case("za0v.s", AArch64::ZAS0)
2925 .Case("za1v.s", AArch64::ZAS1)
2926 .Case("za2v.s", AArch64::ZAS2)
2927 .Case("za3v.s", AArch64::ZAS3)
2928 .Case("za0v.h", AArch64::ZAH0)
2929 .Case("za1v.h", AArch64::ZAH1)
2930 .Case("za0v.b", AArch64::ZAB0)
2931 .Default(0);
2932}
2933
2934bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
2935 SMLoc &EndLoc) {
2936 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
2937}
2938
2939ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
2940 SMLoc &EndLoc) {
2941 StartLoc = getLoc();
2942 ParseStatus Res = tryParseScalarRegister(Reg);
2943 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2944 return Res;
2945}
2946
2947// Matches a register name or register alias previously defined by '.req'
2948unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2949 RegKind Kind) {
2950 unsigned RegNum = 0;
2951 if ((RegNum = matchSVEDataVectorRegName(Name)))
2952 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2953
2954 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2955 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2956
2958 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
2959
2960 if ((RegNum = MatchNeonVectorRegName(Name)))
2961 return Kind == RegKind::NeonVector ? RegNum : 0;
2962
2963 if ((RegNum = matchMatrixRegName(Name)))
2964 return Kind == RegKind::Matrix ? RegNum : 0;
2965
2966 if (Name.equals_insensitive("zt0"))
2967 return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0;
2968
2969 // The parsed register must be of RegKind Scalar
2970 if ((RegNum = MatchRegisterName(Name)))
2971 return (Kind == RegKind::Scalar) ? RegNum : 0;
2972
2973 if (!RegNum) {
2974 // Handle a few common aliases of registers.
2975 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2976 .Case("fp", AArch64::FP)
2977 .Case("lr", AArch64::LR)
2978 .Case("x31", AArch64::XZR)
2979 .Case("w31", AArch64::WZR)
2980 .Default(0))
2981 return Kind == RegKind::Scalar ? RegNum : 0;
2982
2983 // Check for aliases registered via .req. Canonicalize to lower case.
2984 // That's more consistent since register names are case insensitive, and
2985 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2986 auto Entry = RegisterReqs.find(Name.lower());
2987 if (Entry == RegisterReqs.end())
2988 return 0;
2989
2990 // set RegNum if the match is the right kind of register
2991 if (Kind == Entry->getValue().first)
2992 RegNum = Entry->getValue().second;
2993 }
2994 return RegNum;
2995}
2996
2997unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
2998 switch (K) {
2999 case RegKind::Scalar:
3000 case RegKind::NeonVector:
3001 case RegKind::SVEDataVector:
3002 return 32;
3003 case RegKind::Matrix:
3004 case RegKind::SVEPredicateVector:
3005 case RegKind::SVEPredicateAsCounter:
3006 return 16;
3007 case RegKind::LookupTable:
3008 return 1;
3009 }
3010 llvm_unreachable("Unsupported RegKind");
3011}
3012
3013/// tryParseScalarRegister - Try to parse a register name. The token must be an
3014/// Identifier when called, and if it is a register name the token is eaten and
3015/// the register is added to the operand list.
3016ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3017 const AsmToken &Tok = getTok();
3018 if (Tok.isNot(AsmToken::Identifier))
3019 return ParseStatus::NoMatch;
3020
3021 std::string lowerCase = Tok.getString().lower();
3022 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3023 if (Reg == 0)
3024 return ParseStatus::NoMatch;
3025
3026 RegNum = Reg;
3027 Lex(); // Eat identifier token.
3028 return ParseStatus::Success;
3029}
3030
3031/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3032ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3033 SMLoc S = getLoc();
3034
3035 if (getTok().isNot(AsmToken::Identifier))
3036 return Error(S, "Expected cN operand where 0 <= N <= 15");
3037
3038 StringRef Tok = getTok().getIdentifier();
3039 if (Tok[0] != 'c' && Tok[0] != 'C')
3040 return Error(S, "Expected cN operand where 0 <= N <= 15");
3041
3042 uint32_t CRNum;
3043 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3044 if (BadNum || CRNum > 15)
3045 return Error(S, "Expected cN operand where 0 <= N <= 15");
3046
3047 Lex(); // Eat identifier token.
3048 Operands.push_back(
3049 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3050 return ParseStatus::Success;
3051}
3052
3053// Either an identifier for named values or a 6-bit immediate.
3054ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3055 SMLoc S = getLoc();
3056 const AsmToken &Tok = getTok();
3057
3058 unsigned MaxVal = 63;
3059
3060 // Immediate case, with optional leading hash:
3061 if (parseOptionalToken(AsmToken::Hash) ||
3062 Tok.is(AsmToken::Integer)) {
3063 const MCExpr *ImmVal;
3064 if (getParser().parseExpression(ImmVal))
3065 return ParseStatus::Failure;
3066
3067 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3068 if (!MCE)
3069 return TokError("immediate value expected for prefetch operand");
3070 unsigned prfop = MCE->getValue();
3071 if (prfop > MaxVal)
3072 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3073 "] expected");
3074
3075 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3076 Operands.push_back(AArch64Operand::CreatePrefetch(
3077 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3078 return ParseStatus::Success;
3079 }
3080
3081 if (Tok.isNot(AsmToken::Identifier))
3082 return TokError("prefetch hint expected");
3083
3084 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3085 if (!RPRFM)
3086 return TokError("prefetch hint expected");
3087
3088 Operands.push_back(AArch64Operand::CreatePrefetch(
3089 RPRFM->Encoding, Tok.getString(), S, getContext()));
3090 Lex(); // Eat identifier token.
3091 return ParseStatus::Success;
3092}
3093
3094/// tryParsePrefetch - Try to parse a prefetch operand.
3095template <bool IsSVEPrefetch>
3096ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3097 SMLoc S = getLoc();
3098 const AsmToken &Tok = getTok();
3099
3100 auto LookupByName = [](StringRef N) {
3101 if (IsSVEPrefetch) {
3102 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3103 return std::optional<unsigned>(Res->Encoding);
3104 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3105 return std::optional<unsigned>(Res->Encoding);
3106 return std::optional<unsigned>();
3107 };
3108
3109 auto LookupByEncoding = [](unsigned E) {
3110 if (IsSVEPrefetch) {
3111 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3112 return std::optional<StringRef>(Res->Name);
3113 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3114 return std::optional<StringRef>(Res->Name);
3115 return std::optional<StringRef>();
3116 };
3117 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3118
3119 // Either an identifier for named values or a 5-bit immediate.
3120 // Eat optional hash.
3121 if (parseOptionalToken(AsmToken::Hash) ||
3122 Tok.is(AsmToken::Integer)) {
3123 const MCExpr *ImmVal;
3124 if (getParser().parseExpression(ImmVal))
3125 return ParseStatus::Failure;
3126
3127 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3128 if (!MCE)
3129 return TokError("immediate value expected for prefetch operand");
3130 unsigned prfop = MCE->getValue();
3131 if (prfop > MaxVal)
3132 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3133 "] expected");
3134
3135 auto PRFM = LookupByEncoding(MCE->getValue());
3136 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3137 S, getContext()));
3138 return ParseStatus::Success;
3139 }
3140
3141 if (Tok.isNot(AsmToken::Identifier))
3142 return TokError("prefetch hint expected");
3143
3144 auto PRFM = LookupByName(Tok.getString());
3145 if (!PRFM)
3146 return TokError("prefetch hint expected");
3147
3148 Operands.push_back(AArch64Operand::CreatePrefetch(
3149 *PRFM, Tok.getString(), S, getContext()));
3150 Lex(); // Eat identifier token.
3151 return ParseStatus::Success;
3152}
3153
3154/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3155ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3156 SMLoc S = getLoc();
3157 const AsmToken &Tok = getTok();
3158 if (Tok.isNot(AsmToken::Identifier))
3159 return TokError("invalid operand for instruction");
3160
3161 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3162 if (!PSB)
3163 return TokError("invalid operand for instruction");
3164
3165 Operands.push_back(AArch64Operand::CreatePSBHint(
3166 PSB->Encoding, Tok.getString(), S, getContext()));
3167 Lex(); // Eat identifier token.
3168 return ParseStatus::Success;
3169}
3170
3171ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3172 SMLoc StartLoc = getLoc();
3173
3174 MCRegister RegNum;
3175
3176 // The case where xzr, xzr is not present is handled by an InstAlias.
3177
3178 auto RegTok = getTok(); // in case we need to backtrack
3179 if (!tryParseScalarRegister(RegNum).isSuccess())
3180 return ParseStatus::NoMatch;
3181
3182 if (RegNum != AArch64::XZR) {
3183 getLexer().UnLex(RegTok);
3184 return ParseStatus::NoMatch;
3185 }
3186
3187 if (parseComma())
3188 return ParseStatus::Failure;
3189
3190 if (!tryParseScalarRegister(RegNum).isSuccess())
3191 return TokError("expected register operand");
3192
3193 if (RegNum != AArch64::XZR)
3194 return TokError("xzr must be followed by xzr");
3195
3196 // We need to push something, since we claim this is an operand in .td.
3197 // See also AArch64AsmParser::parseKeywordOperand.
3198 Operands.push_back(AArch64Operand::CreateReg(
3199 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3200
3201 return ParseStatus::Success;
3202}
3203
3204/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3205ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3206 SMLoc S = getLoc();
3207 const AsmToken &Tok = getTok();
3208 if (Tok.isNot(AsmToken::Identifier))
3209 return TokError("invalid operand for instruction");
3210
3211 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3212 if (!BTI)
3213 return TokError("invalid operand for instruction");
3214
3215 Operands.push_back(AArch64Operand::CreateBTIHint(
3216 BTI->Encoding, Tok.getString(), S, getContext()));
3217 Lex(); // Eat identifier token.
3218 return ParseStatus::Success;
3219}
3220
3221/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3222/// instruction.
3223ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3224 SMLoc S = getLoc();
3225 const MCExpr *Expr = nullptr;
3226
3227 if (getTok().is(AsmToken::Hash)) {
3228 Lex(); // Eat hash token.
3229 }
3230
3231 if (parseSymbolicImmVal(Expr))
3232 return ParseStatus::Failure;
3233
3234 AArch64MCExpr::VariantKind ELFRefKind;
3235 MCSymbolRefExpr::VariantKind DarwinRefKind;
3236 int64_t Addend;
3237 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3238 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3239 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3240 // No modifier was specified at all; this is the syntax for an ELF basic
3241 // ADRP relocation (unfortunately).
3242 Expr =
3244 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3245 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3246 Addend != 0) {
3247 return Error(S, "gotpage label reference not allowed an addend");
3248 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3249 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3250 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3251 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3252 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3253 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3254 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3255 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
3256 // The operand must be an @page or @gotpage qualified symbolref.
3257 return Error(S, "page or gotpage label reference expected");
3258 }
3259 }
3260
3261 // We have either a label reference possibly with addend or an immediate. The
3262 // addend is a raw value here. The linker will adjust it to only reference the
3263 // page.
3264 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3265 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3266
3267 return ParseStatus::Success;
3268}
3269
3270/// tryParseAdrLabel - Parse and validate a source label for the ADR
3271/// instruction.
3272ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3273 SMLoc S = getLoc();
3274 const MCExpr *Expr = nullptr;
3275
3276 // Leave anything with a bracket to the default for SVE
3277 if (getTok().is(AsmToken::LBrac))
3278 return ParseStatus::NoMatch;
3279
3280 if (getTok().is(AsmToken::Hash))
3281 Lex(); // Eat hash token.
3282
3283 if (parseSymbolicImmVal(Expr))
3284 return ParseStatus::Failure;
3285
3286 AArch64MCExpr::VariantKind ELFRefKind;
3287 MCSymbolRefExpr::VariantKind DarwinRefKind;
3288 int64_t Addend;
3289 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3290 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3291 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3292 // No modifier was specified at all; this is the syntax for an ELF basic
3293 // ADR relocation (unfortunately).
3294 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3295 } else {
3296 return Error(S, "unexpected adr label");
3297 }
3298 }
3299
3300 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3301 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3302 return ParseStatus::Success;
3303}
3304
3305/// tryParseFPImm - A floating point immediate expression operand.
3306template <bool AddFPZeroAsLiteral>
3307ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3308 SMLoc S = getLoc();
3309
3310 bool Hash = parseOptionalToken(AsmToken::Hash);
3311
3312 // Handle negation, as that still comes through as a separate token.
3313 bool isNegative = parseOptionalToken(AsmToken::Minus);
3314
3315 const AsmToken &Tok = getTok();
3316 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3317 if (!Hash)
3318 return ParseStatus::NoMatch;
3319 return TokError("invalid floating point immediate");
3320 }
3321
3322 // Parse hexadecimal representation.
3323 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3324 if (Tok.getIntVal() > 255 || isNegative)
3325 return TokError("encoded floating point value out of range");
3326
3328 Operands.push_back(
3329 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3330 } else {
3331 // Parse FP representation.
3332 APFloat RealVal(APFloat::IEEEdouble());
3333 auto StatusOrErr =
3334 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3335 if (errorToBool(StatusOrErr.takeError()))
3336 return TokError("invalid floating point representation");
3337
3338 if (isNegative)
3339 RealVal.changeSign();
3340
3341 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3342 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3343 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3344 } else
3345 Operands.push_back(AArch64Operand::CreateFPImm(
3346 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3347 }
3348
3349 Lex(); // Eat the token.
3350
3351 return ParseStatus::Success;
3352}
3353
3354/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3355/// a shift suffix, for example '#1, lsl #12'.
3357AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3358 SMLoc S = getLoc();
3359
3360 if (getTok().is(AsmToken::Hash))
3361 Lex(); // Eat '#'
3362 else if (getTok().isNot(AsmToken::Integer))
3363 // Operand should start from # or should be integer, emit error otherwise.
3364 return ParseStatus::NoMatch;
3365
3366 if (getTok().is(AsmToken::Integer) &&
3367 getLexer().peekTok().is(AsmToken::Colon))
3368 return tryParseImmRange(Operands);
3369
3370 const MCExpr *Imm = nullptr;
3371 if (parseSymbolicImmVal(Imm))
3372 return ParseStatus::Failure;
3373 else if (getTok().isNot(AsmToken::Comma)) {
3374 Operands.push_back(
3375 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3376 return ParseStatus::Success;
3377 }
3378
3379 // Eat ','
3380 Lex();
3381 StringRef VecGroup;
3382 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3383 Operands.push_back(
3384 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3385 Operands.push_back(
3386 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3387 return ParseStatus::Success;
3388 }
3389
3390 // The optional operand must be "lsl #N" where N is non-negative.
3391 if (!getTok().is(AsmToken::Identifier) ||
3392 !getTok().getIdentifier().equals_insensitive("lsl"))
3393 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3394
3395 // Eat 'lsl'
3396 Lex();
3397
3398 parseOptionalToken(AsmToken::Hash);
3399
3400 if (getTok().isNot(AsmToken::Integer))
3401 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3402
3403 int64_t ShiftAmount = getTok().getIntVal();
3404
3405 if (ShiftAmount < 0)
3406 return Error(getLoc(), "positive shift amount required");
3407 Lex(); // Eat the number
3408
3409 // Just in case the optional lsl #0 is used for immediates other than zero.
3410 if (ShiftAmount == 0 && Imm != nullptr) {
3411 Operands.push_back(
3412 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3413 return ParseStatus::Success;
3414 }
3415
3416 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3417 getLoc(), getContext()));
3418 return ParseStatus::Success;
3419}
3420
3421/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3422/// suggestion to help common typos.
3424AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3426 .Case("eq", AArch64CC::EQ)
3427 .Case("ne", AArch64CC::NE)
3428 .Case("cs", AArch64CC::HS)
3429 .Case("hs", AArch64CC::HS)
3430 .Case("cc", AArch64CC::LO)
3431 .Case("lo", AArch64CC::LO)
3432 .Case("mi", AArch64CC::MI)
3433 .Case("pl", AArch64CC::PL)
3434 .Case("vs", AArch64CC::VS)
3435 .Case("vc", AArch64CC::VC)
3436 .Case("hi", AArch64CC::HI)
3437 .Case("ls", AArch64CC::LS)
3438 .Case("ge", AArch64CC::GE)
3439 .Case("lt", AArch64CC::LT)
3440 .Case("gt", AArch64CC::GT)
3441 .Case("le", AArch64CC::LE)
3442 .Case("al", AArch64CC::AL)
3443 .Case("nv", AArch64CC::NV)
3445
3446 if (CC == AArch64CC::Invalid && getSTI().hasFeature(AArch64::FeatureSVE)) {
3448 .Case("none", AArch64CC::EQ)
3449 .Case("any", AArch64CC::NE)
3450 .Case("nlast", AArch64CC::HS)
3451 .Case("last", AArch64CC::LO)
3452 .Case("first", AArch64CC::MI)
3453 .Case("nfrst", AArch64CC::PL)
3454 .Case("pmore", AArch64CC::HI)
3455 .Case("plast", AArch64CC::LS)
3456 .Case("tcont", AArch64CC::GE)
3457 .Case("tstop", AArch64CC::LT)
3459
3460 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3461 Suggestion = "nfrst";
3462 }
3463 return CC;
3464}
3465
3466/// parseCondCode - Parse a Condition Code operand.
3467bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3468 bool invertCondCode) {
3469 SMLoc S = getLoc();
3470 const AsmToken &Tok = getTok();
3471 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3472
3473 StringRef Cond = Tok.getString();
3474 std::string Suggestion;
3475 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3476 if (CC == AArch64CC::Invalid) {
3477 std::string Msg = "invalid condition code";
3478 if (!Suggestion.empty())
3479 Msg += ", did you mean " + Suggestion + "?";
3480 return TokError(Msg);
3481 }
3482 Lex(); // Eat identifier token.
3483
3484 if (invertCondCode) {
3485 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3486 return TokError("condition codes AL and NV are invalid for this instruction");
3488 }
3489
3490 Operands.push_back(
3491 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3492 return false;
3493}
3494
3495ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3496 const AsmToken &Tok = getTok();
3497 SMLoc S = getLoc();
3498
3499 if (Tok.isNot(AsmToken::Identifier))
3500 return TokError("invalid operand for instruction");
3501
3502 unsigned PStateImm = -1;
3503 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3504 if (!SVCR)
3505 return ParseStatus::NoMatch;
3506 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3507 PStateImm = SVCR->Encoding;
3508
3509 Operands.push_back(
3510 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3511 Lex(); // Eat identifier token.
3512 return ParseStatus::Success;
3513}
3514
3515ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3516 const AsmToken &Tok = getTok();
3517 SMLoc S = getLoc();
3518
3519 StringRef Name = Tok.getString();
3520
3521 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3522 Lex(); // eat "za[.(b|h|s|d)]"
3523 unsigned ElementWidth = 0;
3524 auto DotPosition = Name.find('.');
3525 if (DotPosition != StringRef::npos) {
3526 const auto &KindRes =
3527 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3528 if (!KindRes)
3529 return TokError(
3530 "Expected the register to be followed by element width suffix");
3531 ElementWidth = KindRes->second;
3532 }
3533 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3534 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3535 getContext()));
3536 if (getLexer().is(AsmToken::LBrac)) {
3537 // There's no comma after matrix operand, so we can parse the next operand
3538 // immediately.
3539 if (parseOperand(Operands, false, false))
3540 return ParseStatus::NoMatch;
3541 }
3542 return ParseStatus::Success;
3543 }
3544
3545 // Try to parse matrix register.
3546 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3547 if (!Reg)
3548 return ParseStatus::NoMatch;
3549
3550 size_t DotPosition = Name.find('.');
3551 assert(DotPosition != StringRef::npos && "Unexpected register");
3552
3553 StringRef Head = Name.take_front(DotPosition);
3554 StringRef Tail = Name.drop_front(DotPosition);
3555 StringRef RowOrColumn = Head.take_back();
3556
3557 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3558 .Case("h", MatrixKind::Row)
3559 .Case("v", MatrixKind::Col)
3560 .Default(MatrixKind::Tile);
3561
3562 // Next up, parsing the suffix
3563 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3564 if (!KindRes)
3565 return TokError(
3566 "Expected the register to be followed by element width suffix");
3567 unsigned ElementWidth = KindRes->second;
3568
3569 Lex();
3570
3571 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3572 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3573
3574 if (getLexer().is(AsmToken::LBrac)) {
3575 // There's no comma after matrix operand, so we can parse the next operand
3576 // immediately.
3577 if (parseOperand(Operands, false, false))
3578 return ParseStatus::NoMatch;
3579 }
3580 return ParseStatus::Success;
3581}
3582
3583/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3584/// them if present.
3586AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3587 const AsmToken &Tok = getTok();
3588 std::string LowerID = Tok.getString().lower();
3591 .Case("lsl", AArch64_AM::LSL)
3592 .Case("lsr", AArch64_AM::LSR)
3593 .Case("asr", AArch64_AM::ASR)
3594 .Case("ror", AArch64_AM::ROR)
3595 .Case("msl", AArch64_AM::MSL)
3596 .Case("uxtb", AArch64_AM::UXTB)
3597 .Case("uxth", AArch64_AM::UXTH)
3598 .Case("uxtw", AArch64_AM::UXTW)
3599 .Case("uxtx", AArch64_AM::UXTX)
3600 .Case("sxtb", AArch64_AM::SXTB)
3601 .Case("sxth", AArch64_AM::SXTH)
3602 .Case("sxtw", AArch64_AM::SXTW)
3603 .Case("sxtx", AArch64_AM::SXTX)
3605
3607 return ParseStatus::NoMatch;
3608
3609 SMLoc S = Tok.getLoc();
3610 Lex();
3611
3612 bool Hash = parseOptionalToken(AsmToken::Hash);
3613
3614 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3615 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3616 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3617 ShOp == AArch64_AM::MSL) {
3618 // We expect a number here.
3619 return TokError("expected #imm after shift specifier");
3620 }
3621
3622 // "extend" type operations don't need an immediate, #0 is implicit.
3623 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3624 Operands.push_back(
3625 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3626 return ParseStatus::Success;
3627 }
3628
3629 // Make sure we do actually have a number, identifier or a parenthesized
3630 // expression.
3631 SMLoc E = getLoc();
3632 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3633 !getTok().is(AsmToken::Identifier))
3634 return Error(E, "expected integer shift amount");
3635
3636 const MCExpr *ImmVal;
3637 if (getParser().parseExpression(ImmVal))
3638 return ParseStatus::Failure;
3639
3640 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3641 if (!MCE)
3642 return Error(E, "expected constant '#imm' after shift specifier");
3643
3644 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3645 Operands.push_back(AArch64Operand::CreateShiftExtend(
3646 ShOp, MCE->getValue(), true, S, E, getContext()));
3647 return ParseStatus::Success;
3648}
3649
3650static const struct Extension {
3651 const char *Name;
3653} ExtensionMap[] = {
3654 {"crc", {AArch64::FeatureCRC}},
3655 {"sm4", {AArch64::FeatureSM4}},
3656 {"sha3", {AArch64::FeatureSHA3}},
3657 {"sha2", {AArch64::FeatureSHA2}},
3658 {"aes", {AArch64::FeatureAES}},
3659 {"crypto", {AArch64::FeatureCrypto}},
3660 {"fp", {AArch64::FeatureFPARMv8}},
3661 {"simd", {AArch64::FeatureNEON}},
3662 {"ras", {AArch64::FeatureRAS}},
3663 {"rasv2", {AArch64::FeatureRASv2}},
3664 {"lse", {AArch64::FeatureLSE}},
3665 {"predres", {AArch64::FeaturePredRes}},
3666 {"predres2", {AArch64::FeatureSPECRES2}},
3667 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3668 {"mte", {AArch64::FeatureMTE}},
3669 {"memtag", {AArch64::FeatureMTE}},
3670 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3671 {"pan", {AArch64::FeaturePAN}},
3672 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3673 {"ccpp", {AArch64::FeatureCCPP}},
3674 {"rcpc", {AArch64::FeatureRCPC}},
3675 {"rng", {AArch64::FeatureRandGen}},
3676 {"sve", {AArch64::FeatureSVE}},
3677 {"sve-b16b16", {AArch64::FeatureSVEB16B16}},
3678 {"sve2", {AArch64::FeatureSVE2}},
3679 {"sve2-aes", {AArch64::FeatureSVE2AES}},
3680 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3681 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3682 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3683 {"sve2p1", {AArch64::FeatureSVE2p1}},
3684 {"ls64", {AArch64::FeatureLS64}},
3685 {"xs", {AArch64::FeatureXS}},
3686 {"pauth", {AArch64::FeaturePAuth}},
3687 {"flagm", {AArch64::FeatureFlagM}},
3688 {"rme", {AArch64::FeatureRME}},
3689 {"sme", {AArch64::FeatureSME}},
3690 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3691 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3692 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3693 {"sme2", {AArch64::FeatureSME2}},
3694 {"sme2p1", {AArch64::FeatureSME2p1}},
3695 {"sme-b16b16", {AArch64::FeatureSMEB16B16}},
3696 {"hbc", {AArch64::FeatureHBC}},
3697 {"mops", {AArch64::FeatureMOPS}},
3698 {"mec", {AArch64::FeatureMEC}},
3699 {"the", {AArch64::FeatureTHE}},
3700 {"d128", {AArch64::FeatureD128}},
3701 {"lse128", {AArch64::FeatureLSE128}},
3702 {"ite", {AArch64::FeatureITE}},
3703 {"cssc", {AArch64::FeatureCSSC}},
3704 {"rcpc3", {AArch64::FeatureRCPC3}},
3705 {"gcs", {AArch64::FeatureGCS}},
3706 {"bf16", {AArch64::FeatureBF16}},
3707 {"compnum", {AArch64::FeatureComplxNum}},
3708 {"dotprod", {AArch64::FeatureDotProd}},
3709 {"f32mm", {AArch64::FeatureMatMulFP32}},
3710 {"f64mm", {AArch64::FeatureMatMulFP64}},
3711 {"fp16", {AArch64::FeatureFullFP16}},
3712 {"fp16fml", {AArch64::FeatureFP16FML}},
3713 {"i8mm", {AArch64::FeatureMatMulInt8}},
3714 {"lor", {AArch64::FeatureLOR}},
3715 {"profile", {AArch64::FeatureSPE}},
3716 // "rdma" is the name documented by binutils for the feature, but
3717 // binutils also accepts incomplete prefixes of features, so "rdm"
3718 // works too. Support both spellings here.
3719 {"rdm", {AArch64::FeatureRDM}},
3720 {"rdma", {AArch64::FeatureRDM}},
3721 {"sb", {AArch64::FeatureSB}},
3722 {"ssbs", {AArch64::FeatureSSBS}},
3723 {"tme", {AArch64::FeatureTME}},
3724 {"fp8", {AArch64::FeatureFP8}},
3725 {"faminmax", {AArch64::FeatureFAMINMAX}},
3726 {"fp8fma", {AArch64::FeatureFP8FMA}},
3727 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3728 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3729 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3730 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3731 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3732 {"lut", {AArch64::FeatureLUT}},
3733 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3734 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3735 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3736 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3737 {"cpa", {AArch64::FeatureCPA}},
3738 {"tlbiw", {AArch64::FeatureTLBIW}},
3740
3741static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3742 if (FBS[AArch64::HasV8_0aOps])
3743 Str += "ARMv8a";
3744 if (FBS[AArch64::HasV8_1aOps])
3745 Str += "ARMv8.1a";
3746 else if (FBS[AArch64::HasV8_2aOps])
3747 Str += "ARMv8.2a";
3748 else if (FBS[AArch64::HasV8_3aOps])
3749 Str += "ARMv8.3a";
3750 else if (FBS[AArch64::HasV8_4aOps])
3751 Str += "ARMv8.4a";
3752 else if (FBS[AArch64::HasV8_5aOps])
3753 Str += "ARMv8.5a";
3754 else if (FBS[AArch64::HasV8_6aOps])
3755 Str += "ARMv8.6a";
3756 else if (FBS[AArch64::HasV8_7aOps])
3757 Str += "ARMv8.7a";
3758 else if (FBS[AArch64::HasV8_8aOps])
3759 Str += "ARMv8.8a";
3760 else if (FBS[AArch64::HasV8_9aOps])
3761 Str += "ARMv8.9a";
3762 else if (FBS[AArch64::HasV9_0aOps])
3763 Str += "ARMv9-a";
3764 else if (FBS[AArch64::HasV9_1aOps])
3765 Str += "ARMv9.1a";
3766 else if (FBS[AArch64::HasV9_2aOps])
3767 Str += "ARMv9.2a";
3768 else if (FBS[AArch64::HasV9_3aOps])
3769 Str += "ARMv9.3a";
3770 else if (FBS[AArch64::HasV9_4aOps])
3771 Str += "ARMv9.4a";
3772 else if (FBS[AArch64::HasV9_5aOps])
3773 Str += "ARMv9.5a";
3774 else if (FBS[AArch64::HasV8_0rOps])
3775 Str += "ARMv8r";
3776 else {
3777 SmallVector<std::string, 2> ExtMatches;
3778 for (const auto& Ext : ExtensionMap) {
3779 // Use & in case multiple features are enabled
3780 if ((FBS & Ext.Features) != FeatureBitset())
3781 ExtMatches.push_back(Ext.Name);
3782 }
3783 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3784 }
3785}
3786
3787void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3788 SMLoc S) {
3789 const uint16_t Op2 = Encoding & 7;
3790 const uint16_t Cm = (Encoding & 0x78) >> 3;
3791 const uint16_t Cn = (Encoding & 0x780) >> 7;
3792 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3793
3794 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3795
3796 Operands.push_back(
3797 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3798 Operands.push_back(
3799 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3800 Operands.push_back(
3801 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3802 Expr = MCConstantExpr::create(Op2, getContext());
3803 Operands.push_back(
3804 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3805}
3806
3807/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3808/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3809bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3811 if (Name.contains('.'))
3812 return TokError("invalid operand");
3813
3814 Mnemonic = Name;
3815 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3816
3817 const AsmToken &Tok = getTok();
3818 StringRef Op = Tok.getString();
3819 SMLoc S = Tok.getLoc();
3820
3821 if (Mnemonic == "ic") {
3822 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3823 if (!IC)
3824 return TokError("invalid operand for IC instruction");
3825 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3826 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3828 return TokError(Str);
3829 }
3830 createSysAlias(IC->Encoding, Operands, S);
3831 } else if (Mnemonic == "dc") {
3832 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3833 if (!DC)
3834 return TokError("invalid operand for DC instruction");
3835 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3836 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3838 return TokError(Str);
3839 }
3840 createSysAlias(DC->Encoding, Operands, S);
3841 } else if (Mnemonic == "at") {
3842 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3843 if (!AT)
3844 return TokError("invalid operand for AT instruction");
3845 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3846 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3848 return TokError(Str);
3849 }
3850 createSysAlias(AT->Encoding, Operands, S);
3851 } else if (Mnemonic == "tlbi") {
3852 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3853 if (!TLBI)
3854 return TokError("invalid operand for TLBI instruction");
3855 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3856 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3858 return TokError(Str);
3859 }
3860 createSysAlias(TLBI->Encoding, Operands, S);
3861 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3862
3863 if (Op.lower() != "rctx")
3864 return TokError("invalid operand for prediction restriction instruction");
3865
3866 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3867 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3868 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3869
3870 if (Mnemonic == "cosp" && !hasSpecres2)
3871 return TokError("COSP requires: predres2");
3872 if (!hasPredres)
3873 return TokError(Mnemonic.upper() + "RCTX requires: predres");
3874
3875 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
3876 : Mnemonic == "dvp" ? 0b101
3877 : Mnemonic == "cosp" ? 0b110
3878 : Mnemonic == "cpp" ? 0b111
3879 : 0;
3880 assert(PRCTX_Op2 &&
3881 "Invalid mnemonic for prediction restriction instruction");
3882 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3883 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3884
3885 createSysAlias(Encoding, Operands, S);
3886 }
3887
3888 Lex(); // Eat operand.
3889
3890 bool ExpectRegister = !Op.contains_insensitive("all");
3891 bool HasRegister = false;
3892
3893 // Check for the optional register operand.
3894 if (parseOptionalToken(AsmToken::Comma)) {
3895 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3896 return TokError("expected register operand");
3897 HasRegister = true;
3898 }
3899
3900 if (ExpectRegister && !HasRegister)
3901 return TokError("specified " + Mnemonic + " op requires a register");
3902 else if (!ExpectRegister && HasRegister)
3903 return TokError("specified " + Mnemonic + " op does not use a register");
3904
3905 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3906 return true;
3907
3908 return false;
3909}
3910
3911/// parseSyspAlias - The TLBIP instructions are simple aliases for
3912/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
3913bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
3915 if (Name.contains('.'))
3916 return TokError("invalid operand");
3917
3918 Mnemonic = Name;
3919 Operands.push_back(
3920 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
3921
3922 const AsmToken &Tok = getTok();
3923 StringRef Op = Tok.getString();
3924 SMLoc S = Tok.getLoc();
3925
3926 if (Mnemonic == "tlbip") {
3927 bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
3928 if (HasnXSQualifier) {
3929 Op = Op.drop_back(3);
3930 }
3931 const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
3932 if (!TLBIorig)
3933 return TokError("invalid operand for TLBIP instruction");
3934 const AArch64TLBI::TLBI TLBI(
3935 TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
3936 TLBIorig->NeedsReg,
3937 HasnXSQualifier
3938 ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
3939 : TLBIorig->FeaturesRequired);
3940 if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
3941 std::string Name =
3942 std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
3943 std::string Str("TLBIP " + Name + " requires: ");
3945 return TokError(Str);
3946 }
3947 createSysAlias(TLBI.Encoding, Operands, S);
3948 }
3949
3950 Lex(); // Eat operand.
3951
3952 if (parseComma())
3953 return true;
3954
3955 if (Tok.isNot(AsmToken::Identifier))
3956 return TokError("expected register identifier");
3957 auto Result = tryParseSyspXzrPair(Operands);
3958 if (Result.isNoMatch())
3959 Result = tryParseGPRSeqPair(Operands);
3960 if (!Result.isSuccess())
3961 return TokError("specified " + Mnemonic +
3962 " op requires a pair of registers");
3963
3964 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3965 return true;
3966
3967 return false;
3968}
3969
3970ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3971 MCAsmParser &Parser = getParser();
3972 const AsmToken &Tok = getTok();
3973
3974 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
3975 return TokError("'csync' operand expected");
3976 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3977 // Immediate operand.
3978 const MCExpr *ImmVal;
3979 SMLoc ExprLoc = getLoc();
3980 AsmToken IntTok = Tok;
3981 if (getParser().parseExpression(ImmVal))
3982 return ParseStatus::Failure;
3983 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3984 if (!MCE)
3985 return Error(ExprLoc, "immediate value expected for barrier operand");
3986 int64_t Value = MCE->getValue();
3987 if (Mnemonic == "dsb" && Value > 15) {
3988 // This case is a no match here, but it might be matched by the nXS
3989 // variant. Deliberately not unlex the optional '#' as it is not necessary
3990 // to characterize an integer immediate.
3991 Parser.getLexer().UnLex(IntTok);
3992 return ParseStatus::NoMatch;
3993 }
3994 if (Value < 0 || Value > 15)
3995 return Error(ExprLoc, "barrier operand out of range");
3996 auto DB = AArch64DB::lookupDBByEncoding(Value);
3997 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3998 ExprLoc, getContext(),
3999 false /*hasnXSModifier*/));
4000 return ParseStatus::Success;
4001 }
4002
4003 if (Tok.isNot(AsmToken::Identifier))
4004 return TokError("invalid operand for instruction");
4005
4006 StringRef Operand = Tok.getString();
4007 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4008 auto DB = AArch64DB::lookupDBByName(Operand);
4009 // The only valid named option for ISB is 'sy'
4010 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4011 return TokError("'sy' or #imm operand expected");
4012 // The only valid named option for TSB is 'csync'
4013 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4014 return TokError("'csync' operand expected");
4015 if (!DB && !TSB) {
4016 if (Mnemonic == "dsb") {
4017 // This case is a no match here, but it might be matched by the nXS
4018 // variant.
4019 return ParseStatus::NoMatch;
4020 }
4021 return TokError("invalid barrier option name");
4022 }
4023
4024 Operands.push_back(AArch64Operand::CreateBarrier(
4025 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
4026 getContext(), false /*hasnXSModifier*/));
4027 Lex(); // Consume the option
4028
4029 return ParseStatus::Success;
4030}
4031
4033AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4034 const AsmToken &Tok = getTok();
4035
4036 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4037 if (Mnemonic != "dsb")
4038 return ParseStatus::Failure;
4039
4040 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4041 // Immediate operand.
4042 const MCExpr *ImmVal;
4043 SMLoc ExprLoc = getLoc();
4044 if (getParser().parseExpression(ImmVal))
4045 return ParseStatus::Failure;
4046 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4047 if (!MCE)
4048 return Error(ExprLoc, "immediate value expected for barrier operand");
4049 int64_t Value = MCE->getValue();
4050 // v8.7-A DSB in the nXS variant accepts only the following immediate
4051 // values: 16, 20, 24, 28.
4052 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4053 return Error(ExprLoc, "barrier operand out of range");
4054 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4055 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4056 ExprLoc, getContext(),
4057 true /*hasnXSModifier*/));
4058 return ParseStatus::Success;
4059 }
4060
4061 if (Tok.isNot(AsmToken::Identifier))
4062 return TokError("invalid operand for instruction");
4063
4064 StringRef Operand = Tok.getString();
4065 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4066
4067 if (!DB)
4068 return TokError("invalid barrier option name");
4069
4070 Operands.push_back(
4071 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4072 getContext(), true /*hasnXSModifier*/));
4073 Lex(); // Consume the option
4074
4075 return ParseStatus::Success;
4076}
4077
4078ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4079 const AsmToken &Tok = getTok();
4080
4081 if (Tok.isNot(AsmToken::Identifier))
4082 return ParseStatus::NoMatch;
4083
4084 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4085 return ParseStatus::NoMatch;
4086
4087 int MRSReg, MSRReg;
4088 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4089 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4090 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4091 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4092 } else
4093 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4094
4095 unsigned PStateImm = -1;
4096 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4097 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4098 PStateImm = PState15->Encoding;
4099 if (!PState15) {
4100 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4101 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4102 PStateImm = PState1->Encoding;
4103 }
4104
4105 Operands.push_back(
4106 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4107 PStateImm, getContext()));
4108 Lex(); // Eat identifier
4109
4110 return ParseStatus::Success;
4111}
4112
4113/// tryParseNeonVectorRegister - Parse a vector register operand.
4114bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4115 if (getTok().isNot(AsmToken::Identifier))
4116 return true;
4117
4118 SMLoc S = getLoc();
4119 // Check for a vector register specifier first.
4122 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4123 if (!Res.isSuccess())
4124 return true;
4125
4126 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4127 if (!KindRes)
4128 return true;
4129
4130 unsigned ElementWidth = KindRes->second;
4131 Operands.push_back(
4132 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4133 S, getLoc(), getContext()));
4134
4135 // If there was an explicit qualifier, that goes on as a literal text
4136 // operand.
4137 if (!Kind.empty())
4138 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4139
4140 return tryParseVectorIndex(Operands).isFailure();
4141}
4142
4143ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4144 SMLoc SIdx = getLoc();
4145 if (parseOptionalToken(AsmToken::LBrac)) {
4146 const MCExpr *ImmVal;
4147 if (getParser().parseExpression(ImmVal))
4148 return ParseStatus::NoMatch;
4149 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4150 if (!MCE)
4151 return TokError("immediate value expected for vector index");
4152
4153 SMLoc E = getLoc();
4154
4155 if (parseToken(AsmToken::RBrac, "']' expected"))
4156 return ParseStatus::Failure;
4157
4158 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4159 E, getContext()));
4160 return ParseStatus::Success;
4161 }
4162
4163 return ParseStatus::NoMatch;
4164}
4165
4166// tryParseVectorRegister - Try to parse a vector register name with
4167// optional kind specifier. If it is a register specifier, eat the token
4168// and return it.
4169ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4170 StringRef &Kind,
4171 RegKind MatchKind) {
4172 const AsmToken &Tok = getTok();
4173
4174 if (Tok.isNot(AsmToken::Identifier))
4175 return ParseStatus::NoMatch;
4176
4177 StringRef Name = Tok.getString();
4178 // If there is a kind specifier, it's separated from the register name by
4179 // a '.'.
4180 size_t Start = 0, Next = Name.find('.');
4181 StringRef Head = Name.slice(Start, Next);
4182 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4183
4184 if (RegNum) {
4185 if (Next != StringRef::npos) {
4186 Kind = Name.slice(Next, StringRef::npos);
4187 if (!isValidVectorKind(Kind, MatchKind))
4188 return TokError("invalid vector kind qualifier");
4189 }
4190 Lex(); // Eat the register token.
4191
4192 Reg = RegNum;
4193 return ParseStatus::Success;
4194 }
4195
4196 return ParseStatus::NoMatch;
4197}
4198
4199ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4202 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4203 if (!Status.isSuccess())
4204 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4205 return Status;
4206}
4207
4208/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4209template <RegKind RK>
4211AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4212 // Check for a SVE predicate register specifier first.
4213 const SMLoc S = getLoc();
4215 MCRegister RegNum;
4216 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4217 if (!Res.isSuccess())
4218 return Res;
4219
4220 const auto &KindRes = parseVectorKind(Kind, RK);
4221 if (!KindRes)
4222 return ParseStatus::NoMatch;
4223
4224 unsigned ElementWidth = KindRes->second;
4225 Operands.push_back(AArch64Operand::CreateVectorReg(
4226 RegNum, RK, ElementWidth, S,
4227 getLoc(), getContext()));
4228
4229 if (getLexer().is(AsmToken::LBrac)) {
4230 if (RK == RegKind::SVEPredicateAsCounter) {
4231 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4232 if (ResIndex.isSuccess())
4233 return ParseStatus::Success;
4234 } else {
4235 // Indexed predicate, there's no comma so try parse the next operand
4236 // immediately.
4237 if (parseOperand(Operands, false, false))
4238 return ParseStatus::NoMatch;
4239 }
4240 }
4241
4242 // Not all predicates are followed by a '/m' or '/z'.
4243 if (getTok().isNot(AsmToken::Slash))
4244 return ParseStatus::Success;
4245
4246 // But when they do they shouldn't have an element type suffix.
4247 if (!Kind.empty())
4248 return Error(S, "not expecting size suffix");
4249
4250 // Add a literal slash as operand
4251 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4252
4253 Lex(); // Eat the slash.
4254
4255 // Zeroing or merging?
4256 auto Pred = getTok().getString().lower();
4257 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4258 return Error(getLoc(), "expecting 'z' predication");
4259
4260 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4261 return Error(getLoc(), "expecting 'm' or 'z' predication");
4262
4263 // Add zero/merge token.
4264 const char *ZM = Pred == "z" ? "z" : "m";
4265 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4266
4267 Lex(); // Eat zero/merge token.
4268 return ParseStatus::Success;
4269}
4270
4271/// parseRegister - Parse a register operand.
4272bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4273 // Try for a Neon vector register.
4274 if (!tryParseNeonVectorRegister(Operands))
4275 return false;
4276
4277 if (tryParseZTOperand(Operands).isSuccess())
4278 return false;
4279
4280 // Otherwise try for a scalar register.
4281 if (tryParseGPROperand<false>(Operands).isSuccess())
4282 return false;
4283
4284 return true;
4285}
4286
4287bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4288 bool HasELFModifier = false;
4290
4291 if (parseOptionalToken(AsmToken::Colon)) {
4292 HasELFModifier = true;
4293
4294 if (getTok().isNot(AsmToken::Identifier))
4295 return TokError("expect relocation specifier in operand after ':'");
4296
4297 std::string LowerCase = getTok().getIdentifier().lower();
4298 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
4300 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4301 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4302 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4303 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4304 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4305 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4306 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4307 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4308 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4309 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4310 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4311 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4312 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4313 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4314 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4315 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4316 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4317 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4318 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4319 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4320 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4321 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4322 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4323 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4324 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4325 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4326 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4327 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4328 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4329 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4330 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4331 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4332 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4333 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4334 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4336 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4337 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4339 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4340 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4341 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4343 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4344 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4346
4347 if (RefKind == AArch64MCExpr::VK_INVALID)
4348 return TokError("expect relocation specifier in operand after ':'");
4349
4350 Lex(); // Eat identifier
4351
4352 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4353 return true;
4354 }
4355
4356 if (getParser().parseExpression(ImmVal))
4357 return true;
4358
4359 if (HasELFModifier)
4360 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4361
4362 return false;
4363}
4364
4365ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4366 if (getTok().isNot(AsmToken::LCurly))
4367 return ParseStatus::NoMatch;
4368
4369 auto ParseMatrixTile = [this](unsigned &Reg,
4370 unsigned &ElementWidth) -> ParseStatus {
4371 StringRef Name = getTok().getString();
4372 size_t DotPosition = Name.find('.');
4373 if (DotPosition == StringRef::npos)
4374 return ParseStatus::NoMatch;
4375
4376 unsigned RegNum = matchMatrixTileListRegName(Name);
4377 if (!RegNum)
4378 return ParseStatus::NoMatch;
4379
4380 StringRef Tail = Name.drop_front(DotPosition);
4381 const std::optional<std::pair<int, int>> &KindRes =
4382 parseVectorKind(Tail, RegKind::Matrix);
4383 if (!KindRes)
4384 return TokError(
4385 "Expected the register to be followed by element width suffix");
4386 ElementWidth = KindRes->second;
4387 Reg = RegNum;
4388 Lex(); // Eat the register.
4389 return ParseStatus::Success;
4390 };
4391
4392 SMLoc S = getLoc();
4393 auto LCurly = getTok();
4394 Lex(); // Eat left bracket token.
4395
4396 // Empty matrix list
4397 if (parseOptionalToken(AsmToken::RCurly)) {
4398 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4399 /*RegMask=*/0, S, getLoc(), getContext()));
4400 return ParseStatus::Success;
4401 }
4402
4403 // Try parse {za} alias early
4404 if (getTok().getString().equals_insensitive("za")) {
4405 Lex(); // Eat 'za'
4406
4407 if (parseToken(AsmToken::RCurly, "'}' expected"))
4408 return ParseStatus::Failure;
4409
4410 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4411 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4412 return ParseStatus::Success;
4413 }
4414
4415 SMLoc TileLoc = getLoc();
4416
4417 unsigned FirstReg, ElementWidth;
4418 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4419 if (!ParseRes.isSuccess()) {
4420 getLexer().UnLex(LCurly);
4421 return ParseRes;
4422 }
4423
4424 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4425
4426 unsigned PrevReg = FirstReg;
4427
4429 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4430
4431 SmallSet<unsigned, 8> SeenRegs;
4432 SeenRegs.insert(FirstReg);
4433
4434 while (parseOptionalToken(AsmToken::Comma)) {
4435 TileLoc = getLoc();
4436 unsigned Reg, NextElementWidth;
4437 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4438 if (!ParseRes.isSuccess())
4439 return ParseRes;
4440
4441 // Element size must match on all regs in the list.
4442 if (ElementWidth != NextElementWidth)
4443 return Error(TileLoc, "mismatched register size suffix");
4444
4445 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4446 Warning(TileLoc, "tile list not in ascending order");
4447
4448 if (SeenRegs.contains(Reg))
4449 Warning(TileLoc, "duplicate tile in list");
4450 else {
4451 SeenRegs.insert(Reg);
4452 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4453 }
4454
4455 PrevReg = Reg;
4456 }
4457
4458 if (parseToken(AsmToken::RCurly, "'}' expected"))
4459 return ParseStatus::Failure;
4460
4461 unsigned RegMask = 0;
4462 for (auto Reg : DRegs)
4463 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4464 RI->getEncodingValue(AArch64::ZAD0));
4465 Operands.push_back(
4466 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4467
4468 return ParseStatus::Success;
4469}
4470
4471template <RegKind VectorKind>
4472ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4473 bool ExpectMatch) {
4474 MCAsmParser &Parser = getParser();
4475 if (!getTok().is(AsmToken::LCurly))
4476 return ParseStatus::NoMatch;
4477
4478 // Wrapper around parse function
4479 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4480 bool NoMatchIsError) -> ParseStatus {
4481 auto RegTok = getTok();
4482 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4483 if (ParseRes.isSuccess()) {
4484 if (parseVectorKind(Kind, VectorKind))
4485 return ParseRes;
4486 llvm_unreachable("Expected a valid vector kind");
4487 }
4488
4489 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4490 RegTok.getString().equals_insensitive("zt0"))
4491 return ParseStatus::NoMatch;
4492
4493 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4494 (ParseRes.isNoMatch() && NoMatchIsError &&
4495 !RegTok.getString().starts_with_insensitive("za")))
4496 return Error(Loc, "vector register expected");
4497
4498 return ParseStatus::NoMatch;
4499 };
4500
4501 int NumRegs = getNumRegsForRegKind(VectorKind);
4502 SMLoc S = getLoc();
4503 auto LCurly = getTok();
4504 Lex(); // Eat left bracket token.
4505
4507 MCRegister FirstReg;
4508 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4509
4510 // Put back the original left bracket if there was no match, so that
4511 // different types of list-operands can be matched (e.g. SVE, Neon).
4512 if (ParseRes.isNoMatch())
4513 Parser.getLexer().UnLex(LCurly);
4514
4515 if (!ParseRes.isSuccess())
4516 return ParseRes;
4517
4518 int64_t PrevReg = FirstReg;
4519 unsigned Count = 1;
4520
4521 int Stride = 1;
4522 if (parseOptionalToken(AsmToken::Minus)) {
4523 SMLoc Loc = getLoc();
4524 StringRef NextKind;
4525
4527 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4528 if (!ParseRes.isSuccess())
4529 return ParseRes;
4530
4531 // Any Kind suffices must match on all regs in the list.
4532 if (Kind != NextKind)
4533 return Error(Loc, "mismatched register size suffix");
4534
4535 unsigned Space =
4536 (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4537
4538 if (Space == 0 || Space > 3)
4539 return Error(Loc, "invalid number of vectors");
4540
4541 Count += Space;
4542 }
4543 else {
4544 bool HasCalculatedStride = false;
4545 while (parseOptionalToken(AsmToken::Comma)) {
4546 SMLoc Loc = getLoc();
4547 StringRef NextKind;
4549 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4550 if (!ParseRes.isSuccess())
4551 return ParseRes;
4552
4553 // Any Kind suffices must match on all regs in the list.
4554 if (Kind != NextKind)
4555 return Error(Loc, "mismatched register size suffix");
4556
4557 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4558 unsigned PrevRegVal =
4559 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4560 if (!HasCalculatedStride) {
4561 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4562 : (RegVal + NumRegs - PrevRegVal);
4563 HasCalculatedStride = true;
4564 }
4565
4566 // Register must be incremental (with a wraparound at last register).
4567 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4568 return Error(Loc, "registers must have the same sequential stride");
4569
4570 PrevReg = Reg;
4571 ++Count;
4572 }
4573 }
4574
4575 if (parseToken(AsmToken::RCurly, "'}' expected"))
4576 return ParseStatus::Failure;
4577
4578 if (Count > 4)
4579 return Error(S, "invalid number of vectors");
4580
4581 unsigned NumElements = 0;
4582 unsigned ElementWidth = 0;
4583 if (!Kind.empty()) {
4584 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4585 std::tie(NumElements, ElementWidth) = *VK;
4586 }
4587
4588 Operands.push_back(AArch64Operand::CreateVectorList(
4589 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4590 getLoc(), getContext()));
4591
4592 return ParseStatus::Success;
4593}
4594
4595/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4596bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4597 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4598 if (!ParseRes.isSuccess())
4599 return true;
4600
4601 return tryParseVectorIndex(Operands).isFailure();
4602}
4603
4604ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4605 SMLoc StartLoc = getLoc();
4606
4607 MCRegister RegNum;
4608 ParseStatus Res = tryParseScalarRegister(RegNum);
4609 if (!Res.isSuccess())
4610 return Res;
4611
4612 if (!parseOptionalToken(AsmToken::Comma)) {
4613 Operands.push_back(AArch64Operand::CreateReg(
4614 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4615 return ParseStatus::Success;
4616 }
4617
4618 parseOptionalToken(AsmToken::Hash);
4619
4620 if (getTok().isNot(AsmToken::Integer))
4621 return Error(getLoc(), "index must be absent or #0");
4622
4623 const MCExpr *ImmVal;
4624 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4625 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4626 return Error(getLoc(), "index must be absent or #0");
4627
4628 Operands.push_back(AArch64Operand::CreateReg(
4629 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4630 return ParseStatus::Success;
4631}
4632
4633ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4634 SMLoc StartLoc = getLoc();
4635 const AsmToken &Tok = getTok();
4636 std::string Name = Tok.getString().lower();
4637
4638 unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4639
4640 if (RegNum == 0)
4641 return ParseStatus::NoMatch;
4642
4643 Operands.push_back(AArch64Operand::CreateReg(
4644 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4645 Lex(); // Eat register.
4646
4647 // Check if register is followed by an index
4648 if (parseOptionalToken(AsmToken::LBrac)) {
4649 Operands.push_back(
4650 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4651 const MCExpr *ImmVal;
4652 if (getParser().parseExpression(ImmVal))
4653 return ParseStatus::NoMatch;
4654 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4655 if (!MCE)
4656 return TokError("immediate value expected for vector index");
4657 Operands.push_back(AArch64Operand::CreateImm(
4658 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4659 getLoc(), getContext()));
4660 if (parseOptionalToken(AsmToken::Comma))
4661 if (parseOptionalMulOperand(Operands))
4662 return ParseStatus::Failure;
4663 if (parseToken(AsmToken::RBrac, "']' expected"))
4664 return ParseStatus::Failure;
4665 Operands.push_back(
4666 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4667 }
4668 return ParseStatus::Success;
4669}
4670
4671template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4672ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4673 SMLoc StartLoc = getLoc();
4674
4675 MCRegister RegNum;
4676 ParseStatus Res = tryParseScalarRegister(RegNum);
4677 if (!Res.isSuccess())
4678 return Res;
4679
4680 // No shift/extend is the default.
4681 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4682 Operands.push_back(AArch64Operand::CreateReg(
4683 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4684 return ParseStatus::Success;
4685 }
4686
4687 // Eat the comma
4688 Lex();
4689
4690 // Match the shift
4692 Res = tryParseOptionalShiftExtend(ExtOpnd);
4693 if (!Res.isSuccess())
4694 return Res;
4695
4696 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4697 Operands.push_back(AArch64Operand::CreateReg(
4698 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4699 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4700 Ext->hasShiftExtendAmount()));
4701
4702 return ParseStatus::Success;
4703}
4704
4705bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4706 MCAsmParser &Parser = getParser();
4707
4708 // Some SVE instructions have a decoration after the immediate, i.e.
4709 // "mul vl". We parse them here and add tokens, which must be present in the
4710 // asm string in the tablegen instruction.
4711 bool NextIsVL =
4712 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4713 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4714 if (!getTok().getString().equals_insensitive("mul") ||
4715 !(NextIsVL || NextIsHash))
4716 return true;
4717
4718 Operands.push_back(
4719 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4720 Lex(); // Eat the "mul"
4721
4722 if (NextIsVL) {
4723 Operands.push_back(
4724 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4725 Lex(); // Eat the "vl"
4726 return false;
4727 }
4728
4729 if (NextIsHash) {
4730 Lex(); // Eat the #
4731 SMLoc S = getLoc();
4732
4733 // Parse immediate operand.
4734 const MCExpr *ImmVal;
4735 if (!Parser.parseExpression(ImmVal))
4736 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4737 Operands.push_back(AArch64Operand::CreateImm(
4738 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4739 getContext()));
4740 return false;
4741 }
4742 }
4743
4744 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4745}
4746
4747bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4748 StringRef &VecGroup) {
4749 MCAsmParser &Parser = getParser();
4750 auto Tok = Parser.getTok();
4751 if (Tok.isNot(AsmToken::Identifier))
4752 return true;
4753
4755 .Case("vgx2", "vgx2")
4756 .Case("vgx4", "vgx4")
4757 .Default("");
4758
4759 if (VG.empty())
4760 return true;
4761
4762 VecGroup = VG;
4763 Parser.Lex(); // Eat vgx[2|4]
4764 return false;
4765}
4766
4767bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4768 auto Tok = getTok();
4769 if (Tok.isNot(AsmToken::Identifier))
4770 return true;
4771
4772 auto Keyword = Tok.getString();
4774 .Case("sm", "sm")
4775 .Case("za", "za")
4776 .Default(Keyword);
4777 Operands.push_back(
4778 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4779
4780 Lex();
4781 return false;
4782}
4783
4784/// parseOperand - Parse a arm instruction operand. For now this parses the
4785/// operand regardless of the mnemonic.
4786bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4787 bool invertCondCode) {
4788 MCAsmParser &Parser = getParser();
4789
4790 ParseStatus ResTy =
4791 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
4792
4793 // Check if the current operand has a custom associated parser, if so, try to
4794 // custom parse the operand, or fallback to the general approach.
4795 if (ResTy.isSuccess())
4796 return false;
4797 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4798 // there was a match, but an error occurred, in which case, just return that
4799 // the operand parsing failed.
4800 if (ResTy.isFailure())
4801 return true;
4802
4803 // Nothing custom, so do general case parsing.
4804 SMLoc S, E;
4805 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
4806 if (parseOptionalToken(AsmToken::Comma)) {
4807 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
4808 if (!Res.isNoMatch())
4809 return Res.isFailure();
4810 getLexer().UnLex(SavedTok);
4811 }
4812 return false;
4813 };
4814 switch (getLexer().getKind()) {
4815 default: {
4816 SMLoc S = getLoc();
4817 const MCExpr *Expr;
4818 if (parseSymbolicImmVal(Expr))
4819 return Error(S, "invalid operand");
4820
4821 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4822 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4823 return parseOptionalShiftExtend(getTok());
4824 }
4825 case AsmToken::LBrac: {
4826 Operands.push_back(
4827 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4828 Lex(); // Eat '['
4829
4830 // There's no comma after a '[', so we can parse the next operand
4831 // immediately.
4832 return parseOperand(Operands, false, false);
4833 }
4834 case AsmToken::LCurly: {
4835 if (!parseNeonVectorList(Operands))
4836 return false;
4837
4838 Operands.push_back(
4839 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4840 Lex(); // Eat '{'
4841
4842 // There's no comma after a '{', so we can parse the next operand
4843 // immediately.
4844 return parseOperand(Operands, false, false);
4845 }
4846 case AsmToken::Identifier: {
4847 // See if this is a "VG" decoration used by SME instructions.
4848 StringRef VecGroup;
4849 if (!parseOptionalVGOperand(Operands, VecGroup)) {
4850 Operands.push_back(
4851 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4852 return false;
4853 }
4854 // If we're expecting a Condition Code operand, then just parse that.
4855 if (isCondCode)
4856 return parseCondCode(Operands, invertCondCode);
4857
4858 // If it's a register name, parse it.
4859 if (!parseRegister(Operands)) {
4860 // Parse an optional shift/extend modifier.
4861 AsmToken SavedTok = getTok();
4862 if (parseOptionalToken(AsmToken::Comma)) {
4863 // The operand after the register may be a label (e.g. ADR/ADRP). Check
4864 // such cases and don't report an error when <label> happens to match a
4865 // shift/extend modifier.
4866 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
4867 /*ParseForAllFeatures=*/true);
4868 if (!Res.isNoMatch())
4869 return Res.isFailure();
4870 Res = tryParseOptionalShiftExtend(Operands);
4871 if (!Res.isNoMatch())
4872 return Res.isFailure();
4873 getLexer().UnLex(SavedTok);
4874 }
4875 return false;
4876 }
4877
4878 // See if this is a "mul vl" decoration or "mul #<int>" operand used
4879 // by SVE instructions.
4880 if (!parseOptionalMulOperand(Operands))
4881 return false;
4882
4883 // If this is a two-word mnemonic, parse its special keyword
4884 // operand as an identifier.
4885 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
4886 Mnemonic == "gcsb")
4887 return parseKeywordOperand(Operands);
4888
4889 // This was not a register so parse other operands that start with an
4890 // identifier (like labels) as expressions and create them as immediates.
4891 const MCExpr *IdVal;
4892 S = getLoc();
4893 if (getParser().parseExpression(IdVal))
4894 return true;
4895 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4896 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4897 return false;
4898 }
4899 case AsmToken::Integer:
4900 case AsmToken::Real:
4901 case AsmToken::Hash: {
4902 // #42 -> immediate.
4903 S = getLoc();
4904
4905 parseOptionalToken(AsmToken::Hash);
4906
4907 // Parse a negative sign
4908 bool isNegative = false;
4909 if (getTok().is(AsmToken::Minus)) {
4910 isNegative = true;
4911 // We need to consume this token only when we have a Real, otherwise
4912 // we let parseSymbolicImmVal take care of it
4913 if (Parser.getLexer().peekTok().is(AsmToken::Real))
4914 Lex();
4915 }
4916
4917 // The only Real that should come through here is a literal #0.0 for
4918 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4919 // so convert the value.
4920 const AsmToken &Tok = getTok();
4921 if (Tok.is(AsmToken::Real)) {
4922 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4923 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4924 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4925 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4926 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4927 return TokError("unexpected floating point literal");
4928 else if (IntVal != 0 || isNegative)
4929 return TokError("expected floating-point constant #0.0");
4930 Lex(); // Eat the token.
4931
4932 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4933 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4934 return false;
4935 }
4936
4937 const MCExpr *ImmVal;
4938 if (parseSymbolicImmVal(ImmVal))
4939 return true;
4940
4941 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4942 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4943
4944 // Parse an optional shift/extend modifier.
4945 return parseOptionalShiftExtend(Tok);
4946 }
4947 case AsmToken::Equal: {
4948 SMLoc Loc = getLoc();
4949 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4950 return TokError("unexpected token in operand");
4951 Lex(); // Eat '='
4952 const MCExpr *SubExprVal;
4953 if (getParser().parseExpression(SubExprVal))
4954 return true;
4955
4956 if (Operands.size() < 2 ||
4957 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4958 return Error(Loc, "Only valid when first operand is register");
4959
4960 bool IsXReg =
4961 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4962 Operands[1]->getReg());
4963
4964 MCContext& Ctx = getContext();
4965 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4966 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4967 if (isa<MCConstantExpr>(SubExprVal)) {
4968 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4969 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4970 while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) {
4971 ShiftAmt += 16;
4972 Imm >>= 16;
4973 }
4974 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4975 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
4976 Operands.push_back(AArch64Operand::CreateImm(
4977 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4978 if (ShiftAmt)
4979 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4980 ShiftAmt, true, S, E, Ctx));
4981 return false;
4982 }
4983 APInt Simm = APInt(64, Imm << ShiftAmt);
4984 // check if the immediate is an unsigned or signed 32-bit int for W regs
4985 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
4986 return Error(Loc, "Immediate too large for register");
4987 }
4988 // If it is a label or an imm that cannot fit in a movz, put it into CP.
4989 const MCExpr *CPLoc =
4990 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4991 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
4992 return false;
4993 }
4994 }
4995}
4996
4997bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4998 const MCExpr *Expr = nullptr;
4999 SMLoc L = getLoc();
5000 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5001 return true;
5002 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5003 if (check(!Value, L, "expected constant expression"))
5004 return true;
5005 Out = Value->getValue();
5006 return false;
5007}
5008
5009bool AArch64AsmParser::parseComma() {
5010 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
5011 return true;
5012 // Eat the comma
5013 Lex();
5014 return false;
5015}
5016
5017bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5018 unsigned First, unsigned Last) {
5020 SMLoc Start, End;
5021 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register"))
5022 return true;
5023
5024 // Special handling for FP and LR; they aren't linearly after x28 in
5025 // the registers enum.
5026 unsigned RangeEnd = Last;
5027 if (Base == AArch64::X0) {
5028 if (Last == AArch64::FP) {
5029 RangeEnd = AArch64::X28;
5030 if (Reg == AArch64::FP) {
5031 Out = 29;
5032 return false;
5033 }
5034 }
5035 if (Last == AArch64::LR) {
5036 RangeEnd = AArch64::X28;
5037 if (Reg == AArch64::FP) {
5038 Out = 29;
5039 return false;
5040 } else if (Reg == AArch64::LR) {
5041 Out = 30;
5042 return false;
5043 }
5044 }
5045 }
5046
5047 if (check(Reg < First || Reg > RangeEnd, Start,
5048 Twine("expected register in range ") +
5051 return true;
5052 Out = Reg - Base;
5053 return false;
5054}
5055
5056bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5057 const MCParsedAsmOperand &Op2) const {
5058 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5059 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5060
5061 if (AOp1.isVectorList() && AOp2.isVectorList())
5062 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5063 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5064 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5065
5066 if (!AOp1.isReg() || !AOp2.isReg())
5067 return false;
5068
5069 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5070 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5071 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5072
5073 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5074 "Testing equality of non-scalar registers not supported");
5075
5076 // Check if a registers match their sub/super register classes.
5077 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5078 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
5079 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5080 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
5081 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5082 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
5083 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5084 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
5085
5086 return false;
5087}
5088
5089/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
5090/// operands.
5091bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
5092 StringRef Name, SMLoc NameLoc,
5095 .Case("beq", "b.eq")
5096 .Case("bne", "b.ne")
5097 .Case("bhs", "b.hs")
5098 .Case("bcs", "b.cs")
5099 .Case("blo", "b.lo")
5100 .Case("bcc", "b.cc")
5101 .Case("bmi", "b.mi")
5102 .Case("bpl", "b.pl")
5103 .Case("bvs", "b.vs")
5104 .Case("bvc", "b.vc")
5105 .Case("bhi", "b.hi")
5106 .Case("bls", "b.ls")
5107 .Case("bge", "b.ge")
5108 .Case("blt", "b.lt")
5109 .Case("bgt", "b.gt")
5110 .Case("ble", "b.le")
5111 .Case("bal", "b.al")
5112 .Case("bnv", "b.nv")
5113 .Default(Name);
5114
5115 // First check for the AArch64-specific .req directive.
5116 if (getTok().is(AsmToken::Identifier) &&
5117 getTok().getIdentifier().lower() == ".req") {
5118 parseDirectiveReq(Name, NameLoc);
5119 // We always return 'error' for this, as we're done with this
5120 // statement and don't need to match the 'instruction."
5121 return true;
5122 }
5123
5124 // Create the leading tokens for the mnemonic, split by '.' characters.
5125 size_t Start = 0, Next = Name.find('.');
5126 StringRef Head = Name.slice(Start, Next);
5127
5128 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
5129 // the SYS instruction.
5130 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5131 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp")
5132 return parseSysAlias(Head, NameLoc, Operands);
5133
5134 // TLBIP instructions are aliases for the SYSP instruction.
5135 if (Head == "tlbip")
5136 return parseSyspAlias(Head, NameLoc, Operands);
5137
5138 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
5139 Mnemonic = Head;
5140
5141 // Handle condition codes for a branch mnemonic
5142 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5143 Start = Next;
5144 Next = Name.find('.', Start + 1);
5145 Head = Name.slice(Start + 1, Next);
5146
5147 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5148 (Head.data() - Name.data()));
5149 std::string Suggestion;
5150 AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5151 if (CC == AArch64CC::Invalid) {
5152 std::string Msg = "invalid condition code";
5153 if (!Suggestion.empty())
5154 Msg += ", did you mean " + Suggestion + "?";
5155 return Error(SuffixLoc, Msg);
5156 }
5157 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5158 /*IsSuffix=*/true));
5159 Operands.push_back(
5160 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5161 }
5162
5163 // Add the remaining tokens in the mnemonic.
5164 while (Next != StringRef::npos) {
5165 Start = Next;
5166 Next = Name.find('.', Start + 1);
5167 Head = Name.slice(Start, Next);
5168 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5169 (Head.data() - Name.data()) + 1);
5170 Operands.push_back(AArch64Operand::CreateToken(
5171 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5172 }
5173
5174 // Conditional compare instructions have a Condition Code operand, which needs
5175 // to be parsed and an immediate operand created.
5176 bool condCodeFourthOperand =
5177 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5178 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5179 Head == "csinc" || Head == "csinv" || Head == "csneg");
5180
5181 // These instructions are aliases to some of the conditional select
5182 // instructions. However, the condition code is inverted in the aliased
5183 // instruction.
5184 //
5185 // FIXME: Is this the correct way to handle these? Or should the parser
5186 // generate the aliased instructions directly?
5187 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5188 bool condCodeThirdOperand =
5189 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5190
5191 // Read the remaining operands.
5192 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5193
5194 unsigned N = 1;
5195 do {
5196 // Parse and remember the operand.
5197 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5198 (N == 3 && condCodeThirdOperand) ||
5199 (N == 2 && condCodeSecondOperand),
5200 condCodeSecondOperand || condCodeThirdOperand)) {
5201 return true;
5202 }
5203
5204 // After successfully parsing some operands there are three special cases
5205 // to consider (i.e. notional operands not separated by commas). Two are
5206 // due to memory specifiers:
5207 // + An RBrac will end an address for load/store/prefetch
5208 // + An '!' will indicate a pre-indexed operation.
5209 //
5210 // And a further case is '}', which ends a group of tokens specifying the
5211 // SME accumulator array 'ZA' or tile vector, i.e.
5212 //
5213 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5214 //
5215 // It's someone else's responsibility to make sure these tokens are sane
5216 // in the given context!
5217
5218 if (parseOptionalToken(AsmToken::RBrac))
5219 Operands.push_back(
5220 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5221 if (parseOptionalToken(AsmToken::Exclaim))
5222 Operands.push_back(
5223 AArch64Operand::CreateToken("!", getLoc(), getContext()));
5224 if (parseOptionalToken(AsmToken::RCurly))
5225 Operands.push_back(
5226 AArch64Operand::CreateToken("}", getLoc(), getContext()));
5227
5228 ++N;
5229 } while (parseOptionalToken(AsmToken::Comma));
5230 }
5231
5232 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5233 return true;
5234
5235 return false;
5236}
5237
5238static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
5239 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5240 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5241 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5242 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5243 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5244 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5245 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5246}
5247
5248// FIXME: This entire function is a giant hack to provide us with decent
5249// operand range validation/diagnostics until TableGen/MC can be extended
5250// to support autogeneration of this kind of validation.
5251bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5253 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5254 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5255
5256 // A prefix only applies to the instruction following it. Here we extract
5257 // prefix information for the next instruction before validating the current
5258 // one so that in the case of failure we don't erronously continue using the
5259 // current prefix.
5260 PrefixInfo Prefix = NextPrefix;
5261 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5262
5263 // Before validating the instruction in isolation we run through the rules
5264 // applicable when it follows a prefix instruction.
5265 // NOTE: brk & hlt can be prefixed but require no additional validation.
5266 if (Prefix.isActive() &&
5267 (Inst.getOpcode() != AArch64::BRK) &&
5268 (Inst.getOpcode() != AArch64::HLT)) {
5269
5270 // Prefixed intructions must have a destructive operand.
5273 return Error(IDLoc, "instruction is unpredictable when following a"
5274 " movprfx, suggest replacing movprfx with mov");
5275
5276 // Destination operands must match.
5277 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5278 return Error(Loc[0], "instruction is unpredictable when following a"
5279 " movprfx writing to a different destination");
5280
5281 // Destination operand must not be used in any other location.
5282 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5283 if (Inst.getOperand(i).isReg() &&
5284 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5285 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5286 return Error(Loc[0], "instruction is unpredictable when following a"
5287 " movprfx and destination also used as non-destructive"
5288 " source");
5289 }
5290
5291 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5292 if (Prefix.isPredicated()) {
5293 int PgIdx = -1;
5294
5295 // Find the instructions general predicate.
5296 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5297 if (Inst.getOperand(i).isReg() &&
5298 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5299 PgIdx = i;
5300 break;
5301 }
5302
5303 // Instruction must be predicated if the movprfx is predicated.
5304 if (PgIdx == -1 ||
5306 return Error(IDLoc, "instruction is unpredictable when following a"
5307 " predicated movprfx, suggest using unpredicated movprfx");
5308
5309 // Instruction must use same general predicate as the movprfx.
5310 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5311 return Error(IDLoc, "instruction is unpredictable when following a"
5312 " predicated movprfx using a different general predicate");
5313
5314 // Instruction element type must match the movprfx.
5315 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5316 return Error(IDLoc, "instruction is unpredictable when following a"
5317 " predicated movprfx with a different element size");
5318 }
5319 }
5320
5321 // On ARM64EC, only valid registers may be used. Warn against using
5322 // explicitly disallowed registers.
5323 if (IsWindowsArm64EC) {
5324 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
5325 if (Inst.getOperand(i).isReg()) {
5326 unsigned Reg = Inst.getOperand(i).getReg();
5327 // At this point, vector registers are matched to their
5328 // appropriately sized alias.
5329 if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
5330 (Reg == AArch64::W14 || Reg == AArch64::X14) ||
5331 (Reg == AArch64::W23 || Reg == AArch64::X23) ||
5332 (Reg == AArch64::W24 || Reg == AArch64::X24) ||
5333 (Reg == AArch64::W28 || Reg == AArch64::X28) ||
5334 (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) ||
5335 (Reg >= AArch64::D16 && Reg <= AArch64::D31) ||
5336 (Reg >= AArch64::S16 && Reg <= AArch64::S31) ||
5337 (Reg >= AArch64::H16 && Reg <= AArch64::H31) ||
5338 (Reg >= AArch64::B16 && Reg <= AArch64::B31)) {
5339 Warning(IDLoc, "register " + Twine(RI->getName(Reg)) +
5340 " is disallowed on ARM64EC.");
5341 }
5342 }
5343 }
5344 }
5345
5346 // Check for indexed addressing modes w/ the base register being the
5347 // same as a destination/source register or pair load where
5348 // the Rt == Rt2. All of those are undefined behaviour.
5349 switch (Inst.getOpcode()) {
5350 case AArch64::LDPSWpre:
5351 case AArch64::LDPWpost:
5352 case AArch64::LDPWpre:
5353 case AArch64::LDPXpost:
5354 case AArch64::LDPXpre: {
5355 unsigned Rt = Inst.getOperand(1).getReg();
5356 unsigned Rt2 = Inst.getOperand(2).getReg();
5357 unsigned Rn = Inst.getOperand(3).getReg();
5358 if (RI->isSubRegisterEq(Rn, Rt))
5359 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5360 "is also a destination");
5361 if (RI->isSubRegisterEq(Rn, Rt2))
5362 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5363 "is also a destination");
5364 [[fallthrough]];
5365 }
5366 case AArch64::LDR_ZA:
5367 case AArch64::STR_ZA: {
5368 if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() &&
5369 Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm())
5370 return Error(Loc[1],
5371 "unpredictable instruction, immediate and offset mismatch.");
5372 break;
5373 }
5374 case AArch64::LDPDi:
5375 case AArch64::LDPQi:
5376 case AArch64::LDPSi:
5377 case AArch64::LDPSWi:
5378 case AArch64::LDPWi:
5379 case AArch64::LDPXi: {
5380 unsigned Rt = Inst.getOperand(0).getReg();
5381 unsigned Rt2 = Inst.getOperand(1).getReg();
5382 if (Rt == Rt2)
5383 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5384 break;
5385 }
5386 case AArch64::LDPDpost:
5387 case AArch64::LDPDpre:
5388 case AArch64::LDPQpost:
5389 case AArch64::LDPQpre:
5390 case AArch64::LDPSpost:
5391 case AArch64::LDPSpre:
5392 case AArch64::LDPSWpost: {
5393 unsigned Rt = Inst.getOperand(1).getReg();
5394 unsigned Rt2 = Inst.getOperand(2).getReg();
5395 if (Rt == Rt2)
5396 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5397 break;
5398 }
5399 case AArch64::STPDpost:
5400 case AArch64::STPDpre:
5401 case AArch64::STPQpost:
5402 case AArch64::STPQpre:
5403 case AArch64::STPSpost:
5404 case AArch64::STPSpre:
5405 case AArch64::STPWpost:
5406 case AArch64::STPWpre:
5407 case AArch64::STPXpost:
5408 case AArch64::STPXpre: {
5409 unsigned Rt = Inst.getOperand(1).getReg();
5410 unsigned Rt2 = Inst.getOperand(2).getReg();
5411 unsigned Rn = Inst.getOperand(3).getReg();
5412 if (RI->isSubRegisterEq(Rn, Rt))
5413 return Error(Loc[0], "unpredictable STP instruction, writeback base "
5414 "is also a source");
5415 if (RI->isSubRegisterEq(Rn, Rt2))
5416 return Error(Loc[1], "unpredictable STP instruction, writeback base "
5417 "is also a source");
5418 break;
5419 }
5420 case AArch64::LDRBBpre:
5421 case AArch64::LDRBpre:
5422 case AArch64::LDRHHpre:
5423 case AArch64::LDRHpre:
5424 case AArch64::LDRSBWpre:
5425 case AArch64::LDRSBXpre:
5426 case AArch64::LDRSHWpre:
5427 case AArch64::LDRSHXpre:
5428 case AArch64::LDRSWpre:
5429 case AArch64::LDRWpre:
5430 case AArch64::LDRXpre:
5431 case AArch64::LDRBBpost:
5432 case AArch64::LDRBpost:
5433 case AArch64::LDRHHpost:
5434 case AArch64::LDRHpost:
5435 case AArch64::LDRSBWpost:
5436 case AArch64::LDRSBXpost:
5437 case AArch64::LDRSHWpost:
5438 case AArch64::LDRSHXpost:
5439 case AArch64::LDRSWpost:
5440 case AArch64::LDRWpost:
5441 case AArch64::LDRXpost: {
5442 unsigned Rt = Inst.getOperand(1).getReg();
5443 unsigned Rn = Inst.getOperand(2).getReg();
5444 if (RI->isSubRegisterEq(Rn, Rt))
5445 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5446 "is also a source");
5447 break;
5448 }
5449 case AArch64::STRBBpost:
5450 case AArch64::STRBpost:
5451 case AArch64::STRHHpost:
5452 case AArch64::STRHpost:
5453 case AArch64::STRWpost:
5454 case AArch64::STRXpost:
5455 case AArch64::STRBBpre:
5456 case AArch64::STRBpre:
5457 case AArch64::STRHHpre:
5458 case AArch64::STRHpre:
5459 case AArch64::STRWpre:
5460 case AArch64::STRXpre: {
5461 unsigned Rt = Inst.getOperand(1).getReg();
5462 unsigned Rn = Inst.getOperand(2).getReg();
5463 if (RI->isSubRegisterEq(Rn, Rt))
5464 return Error(Loc[0], "unpredictable STR instruction, writeback base "
5465 "is also a source");
5466 break;
5467 }
5468 case AArch64::STXRB:
5469 case AArch64::STXRH:
5470 case AArch64::STXRW:
5471 case AArch64::STXRX:
5472 case AArch64::STLXRB:
5473 case AArch64::STLXRH:
5474 case AArch64::STLXRW:
5475 case AArch64::STLXRX: {
5476 unsigned Rs = Inst.getOperand(0).getReg();
5477 unsigned Rt = Inst.getOperand(1).getReg();
5478 unsigned Rn = Inst.getOperand(2).getReg();
5479 if (RI->isSubRegisterEq(Rt, Rs) ||
5480 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5481 return Error(Loc[0],
5482 "unpredictable STXR instruction, status is also a source");
5483 break;
5484 }
5485 case AArch64::STXPW:
5486 case AArch64::STXPX:
5487 case AArch64::STLXPW:
5488 case AArch64::STLXPX: {
5489 unsigned Rs = Inst.getOperand(0).getReg();
5490 unsigned Rt1 = Inst.getOperand(1).getReg();
5491 unsigned Rt2 = Inst.getOperand(2).getReg();
5492 unsigned Rn = Inst.getOperand(3).getReg();
5493 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5494 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5495 return Error(Loc[0],
5496 "unpredictable STXP instruction, status is also a source");
5497 break;
5498 }
5499 case AArch64::LDRABwriteback:
5500 case AArch64::LDRAAwriteback: {
5501 unsigned Xt = Inst.getOperand(0).getReg();
5502 unsigned Xn = Inst.getOperand(1).getReg();
5503 if (Xt == Xn)
5504 return Error(Loc[0],
5505 "unpredictable LDRA instruction, writeback base"
5506 " is also a destination");
5507 break;
5508 }
5509 }
5510
5511 // Check v8.8-A memops instructions.
5512 switch (Inst.getOpcode()) {
5513 case AArch64::CPYFP:
5514 case AArch64::CPYFPWN:
5515 case AArch64::CPYFPRN:
5516 case AArch64::CPYFPN:
5517 case AArch64::CPYFPWT:
5518 case AArch64::CPYFPWTWN:
5519 case AArch64::CPYFPWTRN:
5520 case AArch64::CPYFPWTN:
5521 case AArch64::CPYFPRT:
5522 case AArch64::CPYFPRTWN:
5523 case AArch64::CPYFPRTRN:
5524 case AArch64::CPYFPRTN:
5525 case AArch64::CPYFPT:
5526 case AArch64::CPYFPTWN:
5527 case AArch64::CPYFPTRN:
5528 case AArch64::CPYFPTN:
5529 case AArch64::CPYFM:
5530 case AArch64::CPYFMWN:
5531 case AArch64::CPYFMRN:
5532 case AArch64::CPYFMN:
5533 case AArch64::CPYFMWT:
5534 case AArch64::CPYFMWTWN:
5535 case AArch64::CPYFMWTRN:
5536 case AArch64::CPYFMWTN:
5537 case AArch64::CPYFMRT:
5538 case AArch64::CPYFMRTWN:
5539 case AArch64::CPYFMRTRN:
5540 case AArch64::CPYFMRTN:
5541 case AArch64::CPYFMT:
5542 case AArch64::CPYFMTWN:
5543 case AArch64::CPYFMTRN:
5544 case AArch64::CPYFMTN:
5545 case AArch64::CPYFE:
5546 case AArch64::CPYFEWN:
5547 case AArch64::CPYFERN:
5548 case AArch64::CPYFEN:
5549 case AArch64::CPYFEWT:
5550 case AArch64::CPYFEWTWN:
5551 case AArch64::CPYFEWTRN:
5552 case AArch64::CPYFEWTN:
5553 case AArch64::CPYFERT:
5554 case AArch64::CPYFERTWN:
5555 case AArch64::CPYFERTRN:
5556 case AArch64::CPYFERTN:
5557 case AArch64::CPYFET:
5558 case AArch64::CPYFETWN:
5559 case AArch64::CPYFETRN:
5560 case AArch64::CPYFETN:
5561 case AArch64::CPYP:
5562 case AArch64::CPYPWN:
5563 case AArch64::CPYPRN:
5564 case AArch64::CPYPN:
5565 case AArch64::CPYPWT:
5566 case AArch64::CPYPWTWN:
5567 case AArch64::CPYPWTRN:
5568 case AArch64::CPYPWTN:
5569 case AArch64::CPYPRT:
5570 case AArch64::CPYPRTWN:
5571 case AArch64::CPYPRTRN:
5572 case AArch64::CPYPRTN:
5573 case AArch64::CPYPT:
5574 case AArch64::CPYPTWN:
5575 case AArch64::CPYPTRN:
5576 case AArch64::CPYPTN:
5577 case AArch64::CPYM:
5578 case AArch64::CPYMWN:
5579 case AArch64::CPYMRN:
5580 case AArch64::CPYMN:
5581 case AArch64::CPYMWT:
5582 case AArch64::CPYMWTWN:
5583 case AArch64::CPYMWTRN:
5584 case AArch64::CPYMWTN:
5585 case AArch64::CPYMRT:
5586 case AArch64::CPYMRTWN:
5587 case AArch64::CPYMRTRN:
5588 case AArch64::CPYMRTN:
5589 case AArch64::CPYMT:
5590 case AArch64::CPYMTWN:
5591 case AArch64::CPYMTRN:
5592 case AArch64::CPYMTN:
5593 case AArch64::CPYE:
5594 case AArch64::CPYEWN:
5595 case AArch64::CPYERN:
5596 case AArch64::CPYEN:
5597 case AArch64::CPYEWT:
5598 case AArch64::CPYEWTWN:
5599 case AArch64::CPYEWTRN:
5600 case AArch64::CPYEWTN:
5601 case AArch64::CPYERT:
5602 case AArch64::CPYERTWN:
5603 case AArch64::CPYERTRN:
5604 case AArch64::CPYERTN:
5605 case AArch64::CPYET:
5606 case AArch64::CPYETWN:
5607 case AArch64::CPYETRN:
5608 case AArch64::CPYETN: {
5609 unsigned Xd_wb = Inst.getOperand(0).getReg();
5610 unsigned Xs_wb = Inst.getOperand(1).getReg();
5611 unsigned Xn_wb = Inst.getOperand(2).getReg();
5612 unsigned Xd = Inst.getOperand(3).getReg();
5613 unsigned Xs = Inst.getOperand(4).getReg();
5614 unsigned Xn = Inst.getOperand(5).getReg();
5615 if (Xd_wb != Xd)
5616 return Error(Loc[0],
5617 "invalid CPY instruction, Xd_wb and Xd do not match");
5618 if (Xs_wb != Xs)
5619 return Error(Loc[0],
5620 "invalid CPY instruction, Xs_wb and Xs do not match");
5621 if (Xn_wb != Xn)
5622 return Error(Loc[0],
5623 "invalid CPY instruction, Xn_wb and Xn do not match");
5624 if (Xd == Xs)
5625 return Error(Loc[0], "invalid CPY instruction, destination and source"
5626 " registers are the same");
5627 if (Xd == Xn)
5628 return Error(Loc[0], "invalid CPY instruction, destination and size"
5629 " registers are the same");
5630 if (Xs == Xn)
5631 return Error(Loc[0], "invalid CPY instruction, source and size"
5632 " registers are the same");
5633 break;
5634 }
5635 case AArch64::SETP:
5636 case AArch64::SETPT:
5637 case AArch64::SETPN:
5638 case AArch64::SETPTN:
5639 case AArch64::SETM:
5640 case AArch64::SETMT:
5641 case AArch64::SETMN:
5642 case AArch64::SETMTN:
5643 case AArch64::SETE:
5644 case AArch64::SETET:
5645 case AArch64::SETEN:
5646 case AArch64::SETETN:
5647 case AArch64::SETGP:
5648 case AArch64::SETGPT:
5649 case AArch64::SETGPN:
5650 case AArch64::SETGPTN:
5651 case AArch64::SETGM:
5652 case AArch64::SETGMT:
5653 case AArch64::SETGMN:
5654 case AArch64::SETGMTN:
5655 case AArch64::MOPSSETGE:
5656 case AArch64::MOPSSETGET:
5657 case AArch64::MOPSSETGEN:
5658 case AArch64::MOPSSETGETN: {
5659 unsigned Xd_wb = Inst.getOperand(0).getReg();
5660 unsigned Xn_wb = Inst.getOperand(1).getReg();
5661 unsigned Xd = Inst.getOperand(2).getReg();
5662 unsigned Xn = Inst.getOperand(3).getReg();
5663 unsigned Xm = Inst.getOperand(4).getReg();
5664 if (Xd_wb != Xd)
5665 return Error(Loc[0],
5666 "invalid SET instruction, Xd_wb and Xd do not match");
5667 if (Xn_wb != Xn)
5668 return Error(Loc[0],
5669 "invalid SET instruction, Xn_wb and Xn do not match");
5670 if (Xd == Xn)
5671 return Error(Loc[0], "invalid SET instruction, destination and size"
5672 " registers are the same");
5673 if (Xd == Xm)
5674 return Error(Loc[0], "invalid SET instruction, destination and source"
5675 " registers are the same");
5676 if (Xn == Xm)
5677 return Error(Loc[0], "invalid SET instruction, source and size"
5678 " registers are the same");
5679 break;
5680 }
5681 }
5682
5683 // Now check immediate ranges. Separate from the above as there is overlap
5684 // in the instructions being checked and this keeps the nested conditionals
5685 // to a minimum.
5686 switch (Inst.getOpcode()) {
5687 case AArch64::ADDSWri:
5688 case AArch64::ADDSXri:
5689 case AArch64::ADDWri:
5690 case AArch64::ADDXri:
5691 case AArch64::SUBSWri:
5692 case AArch64::SUBSXri:
5693 case AArch64::SUBWri:
5694 case AArch64::SUBXri: {
5695 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
5696 // some slight duplication here.
5697 if (Inst.getOperand(2).isExpr()) {
5698 const MCExpr *Expr = Inst.getOperand(2).getExpr();
5699 AArch64MCExpr::VariantKind ELFRefKind;
5700 MCSymbolRefExpr::VariantKind DarwinRefKind;
5701 int64_t Addend;
5702 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
5703
5704 // Only allow these with ADDXri.
5705 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
5706 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
5707 Inst.getOpcode() == AArch64::ADDXri)
5708 return false;
5709
5710 // Only allow these with ADDXri/ADDWri
5711 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
5712 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
5713 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
5714 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
5715 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
5716 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
5717 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
5718 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
5719 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
5720 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
5721 (Inst.getOpcode() == AArch64::ADDXri ||
5722 Inst.getOpcode() == AArch64::ADDWri))
5723 return false;
5724
5725 // Don't allow symbol refs in the immediate field otherwise
5726 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
5727 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
5728 // 'cmp w0, 'borked')
5729 return Error(Loc.back(), "invalid immediate expression");
5730 }
5731 // We don't validate more complex expressions here
5732 }
5733 return false;
5734 }
5735 default:
5736 return false;
5737 }
5738}
5739
5741 const FeatureBitset &FBS,
5742 unsigned VariantID = 0);
5743
5744bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
5747 switch (ErrCode) {
5748 case Match_InvalidTiedOperand: {
5749 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
5750 if (Op.isVectorList())
5751 return Error(Loc, "operand must match destination register list");
5752
5753 assert(Op.isReg() && "Unexpected operand type");
5754 switch (Op.getRegEqualityTy()) {
5755 case RegConstraintEqualityTy::EqualsSubReg:
5756 return Error(Loc, "operand must be 64-bit form of destination register");
5757 case RegConstraintEqualityTy::EqualsSuperReg:
5758 return Error(Loc, "operand must be 32-bit form of destination register");
5759 case RegConstraintEqualityTy::EqualsReg:
5760 return Error(Loc, "operand must match destination register");
5761 }
5762 llvm_unreachable("Unknown RegConstraintEqualityTy");
5763 }
5764 case Match_MissingFeature:
5765 return Error(Loc,
5766 "instruction requires a CPU feature not currently enabled");
5767 case Match_InvalidOperand:
5768 return Error(Loc, "invalid operand for instruction");
5769 case Match_InvalidSuffix:
5770 return Error(Loc, "invalid type suffix for instruction");
5771 case Match_InvalidCondCode:
5772 return Error(Loc, "expected AArch64 condition code");
5773 case Match_AddSubRegExtendSmall:
5774 return Error(Loc,
5775 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5776 case Match_AddSubRegExtendLarge:
5777 return Error(Loc,
5778 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5779 case Match_AddSubSecondSource:
5780 return Error(Loc,
5781 "expected compatible register, symbol or integer in range [0, 4095]");
5782 case Match_LogicalSecondSource:
5783 return Error(Loc, "expected compatible register or logical immediate");
5784 case Match_InvalidMovImm32Shift:
5785 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
5786 case Match_InvalidMovImm64Shift:
5787 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
5788 case Match_AddSubRegShift32:
5789 return Error(Loc,
5790 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5791 case Match_AddSubRegShift64:
5792 return Error(Loc,
5793 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5794 case Match_InvalidFPImm:
5795 return Error(Loc,
5796 "expected compatible register or floating-point constant");
5797 case Match_InvalidMemoryIndexedSImm6:
5798 return Error(Loc, "index must be an integer in range [-32, 31].");
5799 case Match_InvalidMemoryIndexedSImm5:
5800 return Error(Loc, "index must be an integer in range [-16, 15].");
5801 case Match_InvalidMemoryIndexed1SImm4:
5802 return Error(Loc, "index must be an integer in range [-8, 7].");
5803 case Match_InvalidMemoryIndexed2SImm4:
5804 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
5805 case Match_InvalidMemoryIndexed3SImm4:
5806 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
5807 case Match_InvalidMemoryIndexed4SImm4:
5808 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
5809 case Match_InvalidMemoryIndexed16SImm4:
5810 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
5811 case Match_InvalidMemoryIndexed32SImm4:
5812 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
5813 case Match_InvalidMemoryIndexed1SImm6:
5814 return Error(Loc, "index must be an integer in range [-32, 31].");
5815 case Match_InvalidMemoryIndexedSImm8:
5816 return Error(Loc, "index must be an integer in range [-128, 127].");
5817 case Match_InvalidMemoryIndexedSImm9:
5818 return Error(Loc, "index must be an integer in range [-256, 255].");
5819 case Match_InvalidMemoryIndexed16SImm9:
5820 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5821 case Match_InvalidMemoryIndexed8SImm10:
5822 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5823 case Match_InvalidMemoryIndexed4SImm7:
5824 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5825 case Match_InvalidMemoryIndexed8SImm7:
5826 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5827 case Match_InvalidMemoryIndexed16SImm7:
5828 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5829 case Match_InvalidMemoryIndexed8UImm5:
5830 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5831 case Match_InvalidMemoryIndexed8UImm3:
5832 return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
5833 case Match_InvalidMemoryIndexed4UImm5:
5834 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5835 case Match_InvalidMemoryIndexed2UImm5:
5836 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5837 case Match_InvalidMemoryIndexed8UImm6:
5838 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5839 case Match_InvalidMemoryIndexed16UImm6:
5840 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5841 case Match_InvalidMemoryIndexed4UImm6:
5842 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5843 case Match_InvalidMemoryIndexed2UImm6:
5844 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5845 case Match_InvalidMemoryIndexed1UImm6:
5846 return Error(Loc, "index must be in range [0, 63].");
5847 case Match_InvalidMemoryWExtend8:
5848 return Error(Loc,
5849 "expected 'uxtw' or 'sxtw' with optional shift of #0");
5850 case Match_InvalidMemoryWExtend16:
5851 return Error(Loc,
5852 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5853 case Match_InvalidMemoryWExtend32:
5854 return Error(Loc,
5855 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5856 case Match_InvalidMemoryWExtend64:
5857 return Error(Loc,
5858 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5859 case Match_InvalidMemoryWExtend128:
5860 return Error(Loc,
5861 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5862 case Match_InvalidMemoryXExtend8:
5863 return Error(Loc,
5864 "expected 'lsl' or 'sxtx' with optional shift of #0");
5865 case Match_InvalidMemoryXExtend16:
5866 return Error(Loc,
5867 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5868 case Match_InvalidMemoryXExtend32:
5869 return Error(Loc,
5870 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5871 case Match_InvalidMemoryXExtend64:
5872 return Error(Loc,
5873 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5874 case Match_InvalidMemoryXExtend128:
5875 return Error(Loc,
5876 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
5877 case Match_InvalidMemoryIndexed1:
5878 return Error(Loc, "index must be an integer in range [0, 4095].");
5879 case Match_InvalidMemoryIndexed2:
5880 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
5881 case Match_InvalidMemoryIndexed4:
5882 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
5883 case Match_InvalidMemoryIndexed8:
5884 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
5885 case Match_InvalidMemoryIndexed16:
5886 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
5887 case Match_InvalidImm0_0:
5888 return Error(Loc, "immediate must be 0.");
5889 case Match_InvalidImm0_1:
5890 return Error(Loc, "immediate must be an integer in range [0, 1].");
5891 case Match_InvalidImm0_3:
5892 return Error(Loc, "immediate must be an integer in range [0, 3].");
5893 case Match_InvalidImm0_7:
5894 return Error(Loc, "immediate must be an integer in range [0, 7].");
5895 case Match_InvalidImm0_15:
5896 return Error(Loc, "immediate must be an integer in range [0, 15].");
5897 case Match_InvalidImm0_31:
5898 return Error(Loc, "immediate must be an integer in range [0, 31].");
5899 case Match_InvalidImm0_63:
5900 return Error(Loc, "immediate must be an integer in range [0, 63].");
5901 case Match_InvalidImm0_127:
5902 return Error(Loc, "immediate must be an integer in range [0, 127].");
5903 case Match_InvalidImm0_255:
5904 return Error(Loc, "immediate must be an integer in range [0, 255].");
5905 case Match_InvalidImm0_65535:
5906 return Error(Loc, "immediate must be an integer in range [0, 65535].");
5907 case Match_InvalidImm1_8:
5908 return Error(Loc, "immediate must be an integer in range [1, 8].");
5909 case Match_InvalidImm1_16:
5910 return Error(Loc, "immediate must be an integer in range [1, 16].");
5911 case Match_InvalidImm1_32:
5912 return Error(Loc, "immediate must be an integer in range [1, 32].");
5913 case Match_InvalidImm1_64:
5914 return Error(Loc, "immediate must be an integer in range [1, 64].");
5915 case Match_InvalidMemoryIndexedRange2UImm0:
5916 return Error(Loc, "vector select offset must be the immediate range 0:1.");
5917 case Match_InvalidMemoryIndexedRange2UImm1:
5918 return Error(Loc, "vector select offset must be an immediate range of the "
5919 "form <immf>:<imml>, where the first "
5920 "immediate is a multiple of 2 in the range [0, 2], and "
5921 "the second immediate is immf + 1.");
5922 case Match_InvalidMemoryIndexedRange2UImm2:
5923 case Match_InvalidMemoryIndexedRange2UImm3:
5924 return Error(
5925 Loc,
5926 "vector select offset must be an immediate range of the form "
5927 "<immf>:<imml>, "
5928 "where the first immediate is a multiple of 2 in the range [0, 6] or "
5929 "[0, 14] "
5930 "depending on the instruction, and the second immediate is immf + 1.");
5931 case Match_InvalidMemoryIndexedRange4UImm0:
5932 return Error(Loc, "vector select offset must be the immediate range 0:3.");
5933 case Match_InvalidMemoryIndexedRange4UImm1:
5934 case Match_InvalidMemoryIndexedRange4UImm2:
5935 return Error(
5936 Loc,
5937 "vector select offset must be an immediate range of the form "
5938 "<immf>:<imml>, "
5939 "where the first immediate is a multiple of 4 in the range [0, 4] or "
5940 "[0, 12] "
5941 "depending on the instruction, and the second immediate is immf + 3.");
5942 case Match_InvalidSVEAddSubImm8:
5943 return Error(Loc, "immediate must be an integer in range [0, 255]"
5944 " with a shift amount of 0");
5945 case Match_InvalidSVEAddSubImm16:
5946 case Match_InvalidSVEAddSubImm32:
5947 case Match_InvalidSVEAddSubImm64:
5948 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
5949 "multiple of 256 in range [256, 65280]");
5950 case Match_InvalidSVECpyImm8:
5951 return Error(Loc, "immediate must be an integer in range [-128, 255]"
5952 " with a shift amount of 0");
5953 case Match_InvalidSVECpyImm16:
5954 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5955 "multiple of 256 in range [-32768, 65280]");
5956 case Match_InvalidSVECpyImm32:
5957 case Match_InvalidSVECpyImm64:
5958 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5959 "multiple of 256 in range [-32768, 32512]");
5960 case Match_InvalidIndexRange0_0:
5961 return Error(Loc, "expected lane specifier '[0]'");
5962 case Match_InvalidIndexRange1_1:
5963 return Error(Loc, "expected lane specifier '[1]'");
5964 case Match_InvalidIndexRange0_15:
5965 return Error(Loc, "vector lane must be an integer in range [0, 15].");
5966 case Match_InvalidIndexRange0_7:
5967 return Error(Loc, "vector lane must be an integer in range [0, 7].");
5968 case Match_InvalidIndexRange0_3:
5969 return Error(Loc, "vector lane must be an integer in range [0, 3].");
5970 case Match_InvalidIndexRange0_1:
5971 return Error(Loc, "vector lane must be an integer in range [0, 1].");
5972 case Match_InvalidSVEIndexRange0_63:
5973 return Error(Loc, "vector lane must be an integer in range [0, 63].");
5974 case Match_InvalidSVEIndexRange0_31:
5975 return Error(Loc, "vector lane must be an integer in range [0, 31].");
5976 case Match_InvalidSVEIndexRange0_15:
5977 return Error(Loc, "vector lane must be an integer in range [0, 15].");
5978 case Match_InvalidSVEIndexRange0_7:
5979 return Error(Loc, "vector lane must be an integer in range [0, 7].");
5980 case Match_InvalidSVEIndexRange0_3:
5981 return Error(Loc, "vector lane must be an integer in range [0, 3].");
5982 case Match_InvalidLabel:
5983 return Error(Loc, "expected label or encodable integer pc offset");
5984 case Match_MRS:
5985 return Error(Loc, "expected readable system register");
5986 case Match_MSR:
5987 case Match_InvalidSVCR:
5988 return Error(Loc, "expected writable system register or pstate");
5989 case Match_InvalidComplexRotationEven:
5990 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
5991 case Match_InvalidComplexRotationOdd:
5992 return Error(Loc, "complex rotation must be 90 or 270.");
5993 case Match_MnemonicFail: {
5994 std::string Suggestion = AArch64MnemonicSpellCheck(
5995 ((AArch64Operand &)*Operands[0]).getToken(),
5996 ComputeAvailableFeatures(STI->getFeatureBits()));
5997 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
5998 }
5999 case Match_InvalidGPR64shifted8:
6000 return Error(Loc, "register must be x0..x30 or xzr, without shift");
6001 case Match_InvalidGPR64shifted16:
6002 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
6003 case Match_InvalidGPR64shifted32:
6004 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
6005 case Match_InvalidGPR64shifted64:
6006 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
6007 case Match_InvalidGPR64shifted128:
6008 return Error(
6009 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
6010 case Match_InvalidGPR64NoXZRshifted8:
6011 return Error(Loc, "register must be x0..x30 without shift");
6012 case Match_InvalidGPR64NoXZRshifted16:
6013 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
6014 case Match_InvalidGPR64NoXZRshifted32:
6015 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
6016 case Match_InvalidGPR64NoXZRshifted64:
6017 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
6018 case Match_InvalidGPR64NoXZRshifted128:
6019 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
6020 case Match_InvalidZPR32UXTW8:
6021 case Match_InvalidZPR32SXTW8:
6022 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6023 case Match_InvalidZPR32UXTW16:
6024 case Match_InvalidZPR32SXTW16:
6025 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6026 case Match_InvalidZPR32UXTW32:
6027 case Match_InvalidZPR32SXTW32:
6028 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6029 case Match_InvalidZPR32UXTW64:
6030 case Match_InvalidZPR32SXTW64:
6031 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6032 case Match_InvalidZPR64UXTW8:
6033 case Match_InvalidZPR64SXTW8:
6034 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6035 case Match_InvalidZPR64UXTW16:
6036 case Match_InvalidZPR64SXTW16:
6037 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6038 case Match_InvalidZPR64UXTW32:
6039 case Match_InvalidZPR64SXTW32:
6040 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6041 case Match_InvalidZPR64UXTW64:
6042 case Match_InvalidZPR64SXTW64:
6043 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6044 case Match_InvalidZPR32LSL8:
6045 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
6046 case Match_InvalidZPR32LSL16:
6047 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6048 case Match_InvalidZPR32LSL32:
6049 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6050 case Match_InvalidZPR32LSL64:
6051 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6052 case Match_InvalidZPR64LSL8:
6053 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
6054 case Match_InvalidZPR64LSL16:
6055 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6056 case Match_InvalidZPR64LSL32:
6057 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6058 case Match_InvalidZPR64LSL64:
6059 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6060 case Match_InvalidZPR0:
6061 return Error(Loc, "expected register without element width suffix");
6062 case Match_InvalidZPR8:
6063 case Match_InvalidZPR16:
6064 case Match_InvalidZPR32:
6065 case Match_InvalidZPR64:
6066 case Match_InvalidZPR128:
6067 return Error(Loc, "invalid element width");
6068 case Match_InvalidZPR_3b8:
6069 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
6070 case Match_InvalidZPR_3b16:
6071 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
6072 case Match_InvalidZPR_3b32:
6073 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
6074 case Match_InvalidZPR_4b8:
6075 return Error(Loc,
6076 "Invalid restricted vector register, expected z0.b..z15.b");
6077 case Match_InvalidZPR_4b16:
6078 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
6079 case Match_InvalidZPR_4b32:
6080 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
6081 case Match_InvalidZPR_4b64:
6082 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
6083 case Match_InvalidSVEPattern:
6084 return Error(Loc, "invalid predicate pattern");
6085 case Match_InvalidSVEPPRorPNRAnyReg:
6086 case Match_InvalidSVEPPRorPNRBReg:
6087 case Match_InvalidSVEPredicateAnyReg:
6088 case Match_InvalidSVEPredicateBReg:
6089 case Match_InvalidSVEPredicateHReg:
6090 case Match_InvalidSVEPredicateSReg:
6091 case Match_InvalidSVEPredicateDReg:
6092 return Error(Loc, "invalid predicate register.");
6093 case Match_InvalidSVEPredicate3bAnyReg:
6094 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6095 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6096 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6097 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6098 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6099 return Error(Loc, "Invalid predicate register, expected PN in range "
6100 "pn8..pn15 with element suffix.");
6101 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6102 return Error(Loc, "invalid restricted predicate-as-counter register "
6103 "expected pn8..pn15");
6104 case Match_InvalidSVEPNPredicateBReg:
6105 case Match_InvalidSVEPNPredicateHReg:
6106 case Match_InvalidSVEPNPredicateSReg:
6107 case Match_InvalidSVEPNPredicateDReg:
6108 return Error(Loc, "Invalid predicate register, expected PN in range "
6109 "pn0..pn15 with element suffix.");
6110 case Match_InvalidSVEVecLenSpecifier:
6111 return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
6112 case Match_InvalidSVEPredicateListMul2x8:
6113 case Match_InvalidSVEPredicateListMul2x16:
6114 case Match_InvalidSVEPredicateListMul2x32:
6115 case Match_InvalidSVEPredicateListMul2x64:
6116 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6117 "predicate registers, where the first vector is a multiple of 2 "
6118 "and with correct element type");
6119 case Match_InvalidSVEExactFPImmOperandHalfOne:
6120 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
6121 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6122 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
6123 case Match_InvalidSVEExactFPImmOperandZeroOne:
6124 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
6125 case Match_InvalidMatrixTileVectorH8:
6126 case Match_InvalidMatrixTileVectorV8:
6127 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
6128 case Match_InvalidMatrixTileVectorH16:
6129 case Match_InvalidMatrixTileVectorV16:
6130 return Error(Loc,
6131 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6132 case Match_InvalidMatrixTileVectorH32:
6133 case Match_InvalidMatrixTileVectorV32:
6134 return Error(Loc,
6135 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6136 case Match_InvalidMatrixTileVectorH64:
6137 case Match_InvalidMatrixTileVectorV64:
6138 return Error(Loc,
6139 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6140 case Match_InvalidMatrixTileVectorH128:
6141 case Match_InvalidMatrixTileVectorV128:
6142 return Error(Loc,
6143 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6144 case Match_InvalidMatrixTile32:
6145 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
6146 case Match_InvalidMatrixTile64:
6147 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
6148 case Match_InvalidMatrix:
6149 return Error(Loc, "invalid matrix operand, expected za");
6150 case Match_InvalidMatrix8:
6151 return Error(Loc, "invalid matrix operand, expected suffix .b");
6152 case Match_InvalidMatrix16:
6153 return Error(Loc, "invalid matrix operand, expected suffix .h");
6154 case Match_InvalidMatrix32:
6155 return Error(Loc, "invalid matrix operand, expected suffix .s");
6156 case Match_InvalidMatrix64:
6157 return Error(Loc, "invalid matrix operand, expected suffix .d");
6158 case Match_InvalidMatrixIndexGPR32_12_15:
6159 return Error(Loc, "operand must be a register in range [w12, w15]");
6160 case Match_InvalidMatrixIndexGPR32_8_11:
6161 return Error(Loc, "operand must be a register in range [w8, w11]");
6162 case Match_InvalidSVEVectorListMul2x8:
6163 case Match_InvalidSVEVectorListMul2x16:
6164 case Match_InvalidSVEVectorListMul2x32:
6165 case Match_InvalidSVEVectorListMul2x64:
6166 case Match_InvalidSVEVectorListMul2x128:
6167 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6168 "SVE vectors, where the first vector is a multiple of 2 "
6169 "and with matching element types");
6170 case Match_InvalidSVEVectorListMul4x8:
6171 case Match_InvalidSVEVectorListMul4x16:
6172 case Match_InvalidSVEVectorListMul4x32:
6173 case Match_InvalidSVEVectorListMul4x64:
6174 case Match_InvalidSVEVectorListMul4x128:
6175 return Error(Loc, "Invalid vector list, expected list with 4 consecutive "
6176 "SVE vectors, where the first vector is a multiple of 4 "
6177 "and with matching element types");
6178 case Match_InvalidLookupTable:
6179 return Error(Loc, "Invalid lookup table, expected zt0");
6180 case Match_InvalidSVEVectorListStrided2x8:
6181 case Match_InvalidSVEVectorListStrided2x16:
6182 case Match_InvalidSVEVectorListStrided2x32:
6183 case Match_InvalidSVEVectorListStrided2x64:
6184 return Error(
6185 Loc,
6186 "Invalid vector list, expected list with each SVE vector in the list "
6187 "8 registers apart, and the first register in the range [z0, z7] or "
6188 "[z16, z23] and with correct element type");
6189 case Match_InvalidSVEVectorListStrided4x8:
6190 case Match_InvalidSVEVectorListStrided4x16:
6191 case Match_InvalidSVEVectorListStrided4x32:
6192 case Match_InvalidSVEVectorListStrided4x64:
6193 return Error(
6194 Loc,
6195 "Invalid vector list, expected list with each SVE vector in the list "
6196 "4 registers apart, and the first register in the range [z0, z3] or "
6197 "[z16, z19] and with correct element type");
6198 case Match_AddSubLSLImm3ShiftLarge:
6199 return Error(Loc,
6200 "expected 'lsl' with optional integer in range [0, 7]");
6201 default:
6202 llvm_unreachable("unexpected error code!");
6203 }
6204}
6205
6206static const char *getSubtargetFeatureName(uint64_t Val);
6207
6208bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6210 MCStreamer &Out,
6212 bool MatchingInlineAsm) {
6213 assert(!Operands.empty() && "Unexpect empty operand list!");
6214 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6215 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6216
6217 StringRef Tok = Op.getToken();
6218 unsigned NumOperands = Operands.size();
6219
6220 if (NumOperands == 4 && Tok == "lsl") {
6221 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6222 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6223 if (Op2.isScalarReg() && Op3.isImm()) {
6224 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6225 if (Op3CE) {
6226 uint64_t Op3Val = Op3CE->getValue();
6227 uint64_t NewOp3Val = 0;
6228 uint64_t NewOp4Val = 0;
6229 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6230 Op2.getReg())) {
6231 NewOp3Val = (32 - Op3Val) & 0x1f;
6232 NewOp4Val = 31 - Op3Val;
6233 } else {
6234 NewOp3Val = (64 - Op3Val) & 0x3f;
6235 NewOp4Val = 63 - Op3Val;
6236 }
6237
6238 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
6239 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
6240
6241 Operands[0] =
6242 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
6243 Operands.push_back(AArch64Operand::CreateImm(
6244 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
6245 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6246 Op3.getEndLoc(), getContext());
6247 }
6248 }
6249 } else if (NumOperands == 4 && Tok == "bfc") {
6250 // FIXME: Horrible hack to handle BFC->BFM alias.
6251 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6252 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6253 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6254
6255 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6256 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
6257 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
6258
6259 if (LSBCE && WidthCE) {
6260 uint64_t LSB = LSBCE->getValue();
6261 uint64_t Width = WidthCE->getValue();
6262
6263 uint64_t RegWidth = 0;
6264 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6265 Op1.getReg()))
6266 RegWidth = 64;
6267 else
6268 RegWidth = 32;
6269
6270 if (LSB >= RegWidth)
6271 return Error(LSBOp.getStartLoc(),
6272 "expected integer in range [0, 31]");
6273 if (Width < 1 || Width > RegWidth)
6274 return Error(WidthOp.getStartLoc(),
6275 "expected integer in range [1, 32]");
6276
6277 uint64_t ImmR = 0;
6278 if (RegWidth == 32)
6279 ImmR = (32 - LSB) & 0x1f;
6280 else
6281 ImmR = (64 - LSB) & 0x3f;
6282
6283 uint64_t ImmS = Width - 1;
6284
6285 if (ImmR != 0 && ImmS >= ImmR)
6286 return Error(WidthOp.getStartLoc(),
6287 "requested insert overflows register");
6288
6289 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
6290 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
6291 Operands[0] =
6292 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
6293 Operands[2] = AArch64Operand::CreateReg(
6294 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6295 SMLoc(), SMLoc(), getContext());
6296 Operands[3] = AArch64Operand::CreateImm(
6297 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
6298 Operands.emplace_back(
6299 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6300 WidthOp.getEndLoc(), getContext()));
6301 }
6302 }
6303 } else if (NumOperands == 5) {
6304 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6305 // UBFIZ -> UBFM aliases.
6306 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6307 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6308 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6309 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6310
6311 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6312 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6313 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6314
6315 if (Op3CE && Op4CE) {
6316 uint64_t Op3Val = Op3CE->getValue();
6317 uint64_t Op4Val = Op4CE->getValue();
6318
6319 uint64_t RegWidth = 0;
6320 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6321 Op1.getReg()))
6322 RegWidth = 64;
6323 else
6324 RegWidth = 32;
6325
6326 if (Op3Val >= RegWidth)
6327 return Error(Op3.getStartLoc(),
6328 "expected integer in range [0, 31]");
6329 if (Op4Val < 1 || Op4Val > RegWidth)
6330 return Error(Op4.getStartLoc(),
6331 "expected integer in range [1, 32]");
6332
6333 uint64_t NewOp3Val = 0;
6334 if (RegWidth == 32)
6335 NewOp3Val = (32 - Op3Val) & 0x1f;
6336 else
6337 NewOp3Val = (64 - Op3Val) & 0x3f;
6338
6339 uint64_t NewOp4Val = Op4Val - 1;
6340
6341 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6342 return Error(Op4.getStartLoc(),
6343 "requested insert overflows register");
6344
6345 const MCExpr *NewOp3 =
6346 MCConstantExpr::create(NewOp3Val, getContext());
6347 const MCExpr *NewOp4 =
6348 MCConstantExpr::create(NewOp4Val, getContext());
6349 Operands[3] = AArch64Operand::CreateImm(
6350 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
6351 Operands[4] = AArch64Operand::CreateImm(
6352 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6353 if (Tok == "bfi")
6354 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6355 getContext());
6356 else if (Tok == "sbfiz")
6357 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6358 getContext());
6359 else if (Tok == "ubfiz")
6360 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6361 getContext());
6362 else
6363 llvm_unreachable("No valid mnemonic for alias?");
6364 }
6365 }
6366
6367 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6368 // UBFX -> UBFM aliases.
6369 } else if (NumOperands == 5 &&
6370 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6371 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6372 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6373 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6374
6375 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6376 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6377 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6378
6379 if (Op3CE && Op4CE) {
6380 uint64_t Op3Val = Op3CE->getValue();
6381 uint64_t Op4Val = Op4CE->getValue();
6382
6383 uint64_t RegWidth = 0;
6384 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6385 Op1.getReg()))
6386 RegWidth = 64;
6387 else
6388 RegWidth = 32;
6389
6390 if (Op3Val >= RegWidth)
6391 return Error(Op3.getStartLoc(),
6392 "expected integer in range [0, 31]");
6393 if (Op4Val < 1 || Op4Val > RegWidth)
6394 return Error(Op4.getStartLoc(),
6395 "expected integer in range [1, 32]");
6396
6397 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6398
6399 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6400 return Error(Op4.getStartLoc(),
6401 "requested extract overflows register");
6402
6403 const MCExpr *NewOp4 =
6404 MCConstantExpr::create(NewOp4Val, getContext());
6405 Operands[4] = AArch64Operand::CreateImm(
6406 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6407 if (Tok == "bfxil")
6408 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6409 getContext());
6410 else if (Tok == "sbfx")
6411 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6412 getContext());
6413 else if (Tok == "ubfx")
6414 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6415 getContext());
6416 else
6417 llvm_unreachable("No valid mnemonic for alias?");
6418 }
6419 }
6420 }
6421 }
6422
6423 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6424 // instruction for FP registers correctly in some rare circumstances. Convert
6425 // it to a safe instruction and warn (because silently changing someone's
6426 // assembly is rude).
6427 if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6428 NumOperands == 4 && Tok == "movi") {
6429 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6430 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6431 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6432 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6433 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6434 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6435 if (Suffix.lower() == ".2d" &&
6436 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
6437 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
6438 " correctly on this CPU, converting to equivalent movi.16b");
6439 // Switch the suffix to .16b.
6440 unsigned Idx = Op1.isToken() ? 1 : 2;
6441 Operands[Idx] =
6442 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
6443 }
6444 }
6445 }
6446
6447 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6448 // InstAlias can't quite handle this since the reg classes aren't
6449 // subclasses.
6450 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6451 // The source register can be Wn here, but the matcher expects a
6452 // GPR64. Twiddle it here if necessary.
6453 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6454 if (Op.isScalarReg()) {
6455 unsigned Reg = getXRegFromWReg(Op.getReg());
6456 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6457 Op.getStartLoc(), Op.getEndLoc(),
6458 getContext());
6459 }
6460 }
6461 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6462 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6463 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6464 if (Op.isScalarReg() &&
6465 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6466 Op.getReg())) {
6467 // The source register can be Wn here, but the matcher expects a
6468 // GPR64. Twiddle it here if necessary.
6469 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6470 if (Op.isScalarReg()) {
6471 unsigned Reg = getXRegFromWReg(Op.getReg());
6472 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6473 Op.getStartLoc(),
6474 Op.getEndLoc(), getContext());
6475 }
6476 }
6477 }
6478 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6479 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6480 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6481 if (Op.isScalarReg() &&
6482 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6483 Op.getReg())) {
6484 // The source register can be Wn here, but the matcher expects a
6485 // GPR32. Twiddle it here if necessary.
6486 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6487 if (Op.isScalarReg()) {
6488 unsigned Reg = getWRegFromXReg(Op.getReg());
6489 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6490 Op.getStartLoc(),
6491 Op.getEndLoc(), getContext());
6492 }
6493 }
6494 }
6495
6496 MCInst Inst;
6497 FeatureBitset MissingFeatures;
6498 // First try to match against the secondary set of tables containing the
6499 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6500 unsigned MatchResult =
6501 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6502 MatchingInlineAsm, 1);
6503
6504 // If that fails, try against the alternate table containing long-form NEON:
6505 // "fadd v0.2s, v1.2s, v2.2s"
6506 if (MatchResult != Match_Success) {
6507 // But first, save the short-form match result: we can use it in case the
6508 // long-form match also fails.
6509 auto ShortFormNEONErrorInfo = ErrorInfo;
6510 auto ShortFormNEONMatchResult = MatchResult;
6511 auto ShortFormNEONMissingFeatures = MissingFeatures;
6512
6513 MatchResult =
6514 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6515 MatchingInlineAsm, 0);
6516
6517 // Now, both matches failed, and the long-form match failed on the mnemonic
6518 // suffix token operand. The short-form match failure is probably more
6519 // relevant: use it instead.
6520 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6521 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6522 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6523 MatchResult = ShortFormNEONMatchResult;
6524 ErrorInfo = ShortFormNEONErrorInfo;
6525 MissingFeatures = ShortFormNEONMissingFeatures;
6526 }
6527 }
6528
6529 switch (MatchResult) {
6530 case Match_Success: {
6531 // Perform range checking and other semantic validations
6532 SmallVector<SMLoc, 8> OperandLocs;
6533 NumOperands = Operands.size();
6534 for (unsigned i = 1; i < NumOperands; ++i)
6535 OperandLocs.push_back(Operands[i]->getStartLoc());
6536 if (validateInstruction(Inst, IDLoc, OperandLocs))
6537 return true;
6538
6539 Inst.setLoc(IDLoc);
6540 Out.emitInstruction(Inst, getSTI());
6541 return false;
6542 }
6543 case Match_MissingFeature: {
6544 assert(MissingFeatures.any() && "Unknown missing feature!");
6545 // Special case the error message for the very common case where only
6546 // a single subtarget feature is missing (neon, e.g.).
6547 std::string Msg = "instruction requires:";
6548 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
6549 if (MissingFeatures[i]) {
6550 Msg += " ";
6551 Msg += getSubtargetFeatureName(i);
6552 }
6553 }
6554 return Error(IDLoc, Msg);
6555 }
6556 case Match_MnemonicFail:
6557 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
6558 case Match_InvalidOperand: {
6559 SMLoc ErrorLoc = IDLoc;
6560
6561 if (ErrorInfo != ~0ULL) {
6562 if (ErrorInfo >= Operands.size())
6563 return Error(IDLoc, "too few operands for instruction",
6564 SMRange(IDLoc, getTok().getLoc()));
6565
6566 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6567 if (ErrorLoc == SMLoc())
6568 ErrorLoc = IDLoc;
6569 }
6570 // If the match failed on a suffix token operand, tweak the diagnostic
6571 // accordingly.
6572 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
6573 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
6574 MatchResult = Match_InvalidSuffix;
6575
6576 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6577 }
6578 case Match_InvalidTiedOperand:
6579 case Match_InvalidMemoryIndexed1:
6580 case Match_InvalidMemoryIndexed2:
6581 case Match_InvalidMemoryIndexed4:
6582 case Match_InvalidMemoryIndexed8:
6583 case Match_InvalidMemoryIndexed16:
6584 case Match_InvalidCondCode:
6585 case Match_AddSubLSLImm3ShiftLarge:
6586 case Match_AddSubRegExtendSmall:
6587 case Match_AddSubRegExtendLarge:
6588 case Match_AddSubSecondSource:
6589 case Match_LogicalSecondSource:
6590 case Match_AddSubRegShift32:
6591 case Match_AddSubRegShift64:
6592 case Match_InvalidMovImm32Shift:
6593 case Match_InvalidMovImm64Shift:
6594 case Match_InvalidFPImm:
6595 case Match_InvalidMemoryWExtend8:
6596 case Match_InvalidMemoryWExtend16:
6597 case Match_InvalidMemoryWExtend32:
6598 case Match_InvalidMemoryWExtend64:
6599 case Match_InvalidMemoryWExtend128:
6600 case Match_InvalidMemoryXExtend8:
6601 case Match_InvalidMemoryXExtend16:
6602 case Match_InvalidMemoryXExtend32:
6603 case Match_InvalidMemoryXExtend64:
6604 case Match_InvalidMemoryXExtend128:
6605 case Match_InvalidMemoryIndexed1SImm4:
6606 case Match_InvalidMemoryIndexed2SImm4:
6607 case Match_InvalidMemoryIndexed3SImm4:
6608 case Match_InvalidMemoryIndexed4SImm4:
6609 case Match_InvalidMemoryIndexed1SImm6:
6610 case Match_InvalidMemoryIndexed16SImm4:
6611 case Match_InvalidMemoryIndexed32SImm4:
6612 case Match_InvalidMemoryIndexed4SImm7:
6613 case Match_InvalidMemoryIndexed8SImm7:
6614 case Match_InvalidMemoryIndexed16SImm7:
6615 case Match_InvalidMemoryIndexed8UImm5:
6616 case Match_InvalidMemoryIndexed8UImm3:
6617 case Match_InvalidMemoryIndexed4UImm5:
6618 case Match_InvalidMemoryIndexed2UImm5:
6619 case Match_InvalidMemoryIndexed1UImm6:
6620 case Match_InvalidMemoryIndexed2UImm6:
6621 case Match_InvalidMemoryIndexed4UImm6:
6622 case Match_InvalidMemoryIndexed8UImm6:
6623 case Match_InvalidMemoryIndexed16UImm6:
6624 case Match_InvalidMemoryIndexedSImm6:
6625 case Match_InvalidMemoryIndexedSImm5:
6626 case Match_InvalidMemoryIndexedSImm8:
6627 case Match_InvalidMemoryIndexedSImm9:
6628 case Match_InvalidMemoryIndexed16SImm9:
6629 case Match_InvalidMemoryIndexed8SImm10:
6630 case Match_InvalidImm0_0:
6631 case Match_InvalidImm0_1:
6632 case Match_InvalidImm0_3:
6633 case Match_InvalidImm0_7:
6634 case Match_InvalidImm0_15:
6635 case Match_InvalidImm0_31:
6636 case Match_InvalidImm0_63:
6637 case Match_InvalidImm0_127:
6638 case Match_InvalidImm0_255:
6639 case Match_InvalidImm0_65535:
6640 case Match_InvalidImm1_8:
6641 case Match_InvalidImm1_16:
6642 case Match_InvalidImm1_32:
6643 case Match_InvalidImm1_64:
6644 case Match_InvalidMemoryIndexedRange2UImm0:
6645 case Match_InvalidMemoryIndexedRange2UImm1:
6646 case Match_InvalidMemoryIndexedRange2UImm2:
6647 case Match_InvalidMemoryIndexedRange2UImm3:
6648 case Match_InvalidMemoryIndexedRange4UImm0:
6649 case Match_InvalidMemoryIndexedRange4UImm1:
6650 case Match_InvalidMemoryIndexedRange4UImm2:
6651 case Match_InvalidSVEAddSubImm8:
6652 case Match_InvalidSVEAddSubImm16:
6653 case Match_InvalidSVEAddSubImm32:
6654 case Match_InvalidSVEAddSubImm64:
6655 case Match_InvalidSVECpyImm8:
6656 case Match_InvalidSVECpyImm16:
6657 case Match_InvalidSVECpyImm32:
6658 case Match_InvalidSVECpyImm64:
6659 case Match_InvalidIndexRange0_0:
6660 case Match_InvalidIndexRange1_1:
6661 case Match_InvalidIndexRange0_15:
6662 case Match_InvalidIndexRange0_7:
6663 case Match_InvalidIndexRange0_3:
6664 case Match_InvalidIndexRange0_1:
6665 case Match_InvalidSVEIndexRange0_63:
6666 case Match_InvalidSVEIndexRange0_31:
6667 case Match_InvalidSVEIndexRange0_15:
6668 case Match_InvalidSVEIndexRange0_7:
6669 case Match_InvalidSVEIndexRange0_3:
6670 case Match_InvalidLabel:
6671 case Match_InvalidComplexRotationEven:
6672 case Match_InvalidComplexRotationOdd:
6673 case Match_InvalidGPR64shifted8:
6674 case Match_InvalidGPR64shifted16:
6675 case Match_InvalidGPR64shifted32:
6676 case Match_InvalidGPR64shifted64:
6677 case Match_InvalidGPR64shifted128:
6678 case Match_InvalidGPR64NoXZRshifted8:
6679 case Match_InvalidGPR64NoXZRshifted16:
6680 case Match_InvalidGPR64NoXZRshifted32:
6681 case Match_InvalidGPR64NoXZRshifted64:
6682 case Match_InvalidGPR64NoXZRshifted128:
6683 case Match_InvalidZPR32UXTW8:
6684 case Match_InvalidZPR32UXTW16:
6685 case Match_InvalidZPR32UXTW32:
6686 case Match_InvalidZPR32UXTW64:
6687 case Match_InvalidZPR32SXTW8:
6688 case Match_InvalidZPR32SXTW16:
6689 case Match_InvalidZPR32SXTW32:
6690 case Match_InvalidZPR32SXTW64:
6691 case Match_InvalidZPR64UXTW8:
6692 case Match_InvalidZPR64SXTW8:
6693 case Match_InvalidZPR64UXTW16:
6694 case Match_InvalidZPR64SXTW16:
6695 case Match_InvalidZPR64UXTW32:
6696 case Match_InvalidZPR64SXTW32:
6697 case Match_InvalidZPR64UXTW64:
6698 case Match_InvalidZPR64SXTW64:
6699 case Match_InvalidZPR32LSL8:
6700 case Match_InvalidZPR32LSL16:
6701 case Match_InvalidZPR32LSL32:
6702 case Match_InvalidZPR32LSL64:
6703 case Match_InvalidZPR64LSL8:
6704 case Match_InvalidZPR64LSL16:
6705 case Match_InvalidZPR64LSL32:
6706 case Match_InvalidZPR64LSL64:
6707 case Match_InvalidZPR0:
6708 case Match_InvalidZPR8:
6709 case Match_InvalidZPR16:
6710 case Match_InvalidZPR32:
6711 case Match_InvalidZPR64:
6712 case Match_InvalidZPR128:
6713 case Match_InvalidZPR_3b8:
6714 case Match_InvalidZPR_3b16:
6715 case Match_InvalidZPR_3b32:
6716 case Match_InvalidZPR_4b8:
6717 case Match_InvalidZPR_4b16:
6718 case Match_InvalidZPR_4b32:
6719 case Match_InvalidZPR_4b64:
6720 case Match_InvalidSVEPPRorPNRAnyReg:
6721 case Match_InvalidSVEPPRorPNRBReg:
6722 case Match_InvalidSVEPredicateAnyReg:
6723 case Match_InvalidSVEPattern:
6724 case Match_InvalidSVEVecLenSpecifier:
6725 case Match_InvalidSVEPredicateBReg:
6726 case Match_InvalidSVEPredicateHReg:
6727 case Match_InvalidSVEPredicateSReg:
6728 case Match_InvalidSVEPredicateDReg:
6729 case Match_InvalidSVEPredicate3bAnyReg:
6730 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6731 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6732 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6733 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6734 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6735 case Match_InvalidSVEPNPredicateBReg:
6736 case Match_InvalidSVEPNPredicateHReg:
6737 case Match_InvalidSVEPNPredicateSReg:
6738 case Match_InvalidSVEPNPredicateDReg:
6739 case Match_InvalidSVEPredicateListMul2x8:
6740 case Match_InvalidSVEPredicateListMul2x16:
6741 case Match_InvalidSVEPredicateListMul2x32:
6742 case Match_InvalidSVEPredicateListMul2x64:
6743 case Match_InvalidSVEExactFPImmOperandHalfOne:
6744 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6745 case Match_InvalidSVEExactFPImmOperandZeroOne:
6746 case Match_InvalidMatrixTile32:
6747 case Match_InvalidMatrixTile64:
6748 case Match_InvalidMatrix:
6749 case Match_InvalidMatrix8:
6750 case Match_InvalidMatrix16:
6751 case Match_InvalidMatrix32:
6752 case Match_InvalidMatrix64:
6753 case Match_InvalidMatrixTileVectorH8:
6754 case Match_InvalidMatrixTileVectorH16:
6755 case Match_InvalidMatrixTileVectorH32:
6756 case Match_InvalidMatrixTileVectorH64:
6757 case Match_InvalidMatrixTileVectorH128:
6758 case Match_InvalidMatrixTileVectorV8:
6759 case Match_InvalidMatrixTileVectorV16:
6760 case Match_InvalidMatrixTileVectorV32:
6761 case Match_InvalidMatrixTileVectorV64:
6762 case Match_InvalidMatrixTileVectorV128:
6763 case Match_InvalidSVCR:
6764 case Match_InvalidMatrixIndexGPR32_12_15:
6765 case Match_InvalidMatrixIndexGPR32_8_11:
6766 case Match_InvalidLookupTable:
6767 case Match_InvalidSVEVectorListMul2x8:
6768 case Match_InvalidSVEVectorListMul2x16:
6769 case Match_InvalidSVEVectorListMul2x32:
6770 case Match_InvalidSVEVectorListMul2x64:
6771 case Match_InvalidSVEVectorListMul2x128:
6772 case Match_InvalidSVEVectorListMul4x8:
6773 case Match_InvalidSVEVectorListMul4x16:
6774 case Match_InvalidSVEVectorListMul4x32:
6775 case Match_InvalidSVEVectorListMul4x64:
6776 case Match_InvalidSVEVectorListMul4x128:
6777 case Match_InvalidSVEVectorListStrided2x8:
6778 case Match_InvalidSVEVectorListStrided2x16:
6779 case Match_InvalidSVEVectorListStrided2x32:
6780 case Match_InvalidSVEVectorListStrided2x64:
6781 case Match_InvalidSVEVectorListStrided4x8:
6782 case Match_InvalidSVEVectorListStrided4x16:
6783 case Match_InvalidSVEVectorListStrided4x32:
6784 case Match_InvalidSVEVectorListStrided4x64:
6785 case Match_MSR:
6786 case Match_MRS: {
6787 if (ErrorInfo >= Operands.size())
6788 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
6789 // Any time we get here, there's nothing fancy to do. Just get the
6790 // operand SMLoc and display the diagnostic.
6791 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6792 if (ErrorLoc == SMLoc())
6793 ErrorLoc = IDLoc;
6794 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6795 }
6796 }
6797
6798 llvm_unreachable("Implement any new match types added!");
6799}
6800
6801/// ParseDirective parses the arm specific directives
6802bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
6803 const MCContext::Environment Format = getContext().getObjectFileType();
6804 bool IsMachO = Format == MCContext::IsMachO;
6805 bool IsCOFF = Format == MCContext::IsCOFF;
6806
6807 auto IDVal = DirectiveID.getIdentifier().lower();
6808 SMLoc Loc = DirectiveID.getLoc();
6809 if (IDVal == ".arch")
6810 parseDirectiveArch(Loc);
6811 else if (IDVal == ".cpu")
6812 parseDirectiveCPU(Loc);
6813 else if (IDVal == ".tlsdesccall")
6814 parseDirectiveTLSDescCall(Loc);
6815 else if (IDVal == ".ltorg" || IDVal == ".pool")
6816 parseDirectiveLtorg(Loc);
6817 else if (IDVal == ".unreq")
6818 parseDirectiveUnreq(Loc);
6819 else if (IDVal == ".inst")
6820 parseDirectiveInst(Loc);
6821 else if (IDVal == ".cfi_negate_ra_state")
6822 parseDirectiveCFINegateRAState();
6823 else if (IDVal == ".cfi_b_key_frame")
6824 parseDirectiveCFIBKeyFrame();
6825 else if (IDVal == ".cfi_mte_tagged_frame")
6826 parseDirectiveCFIMTETaggedFrame();
6827 else if (IDVal == ".arch_extension")
6828 parseDirectiveArchExtension(Loc);
6829 else if (IDVal == ".variant_pcs")
6830 parseDirectiveVariantPCS(Loc);
6831 else if (IsMachO) {
6832 if (IDVal == MCLOHDirectiveName())
6833 parseDirectiveLOH(IDVal, Loc);
6834 else
6835 return true;
6836 } else if (IsCOFF) {
6837 if (IDVal == ".seh_stackalloc")
6838 parseDirectiveSEHAllocStack(Loc);
6839 else if (IDVal == ".seh_endprologue")
6840 parseDirectiveSEHPrologEnd(Loc);
6841 else if (IDVal == ".seh_save_r19r20_x")
6842 parseDirectiveSEHSaveR19R20X(Loc);
6843 else if (IDVal == ".seh_save_fplr")
6844 parseDirectiveSEHSaveFPLR(Loc);
6845 else if (IDVal == ".seh_save_fplr_x")
6846 parseDirectiveSEHSaveFPLRX(Loc);
6847 else if (IDVal == ".seh_save_reg")
6848 parseDirectiveSEHSaveReg(Loc);
6849 else if (IDVal == ".seh_save_reg_x")
6850 parseDirectiveSEHSaveRegX(Loc);
6851 else if (IDVal == ".seh_save_regp")
6852 parseDirectiveSEHSaveRegP(Loc);
6853 else if (IDVal == ".seh_save_regp_x")
6854 parseDirectiveSEHSaveRegPX(Loc);
6855 else if (IDVal == ".seh_save_lrpair")
6856 parseDirectiveSEHSaveLRPair(Loc);
6857 else if (IDVal == ".seh_save_freg")
6858 parseDirectiveSEHSaveFReg(Loc);
6859 else if (IDVal == ".seh_save_freg_x")
6860 parseDirectiveSEHSaveFRegX(Loc);
6861 else if (IDVal == ".seh_save_fregp")
6862 parseDirectiveSEHSaveFRegP(Loc);
6863 else if (IDVal == ".seh_save_fregp_x")
6864 parseDirectiveSEHSaveFRegPX(Loc);
6865 else if (IDVal == ".seh_set_fp")
6866 parseDirectiveSEHSetFP(Loc);
6867 else if (IDVal == ".seh_add_fp")
6868 parseDirectiveSEHAddFP(Loc);
6869 else if (IDVal == ".seh_nop")
6870 parseDirectiveSEHNop(Loc);
6871 else if (IDVal == ".seh_save_next")
6872 parseDirectiveSEHSaveNext(Loc);
6873 else if (IDVal == ".seh_startepilogue")
6874 parseDirectiveSEHEpilogStart(Loc);
6875 else if (IDVal == ".seh_endepilogue")
6876 parseDirectiveSEHEpilogEnd(Loc);
6877 else if (IDVal == ".seh_trap_frame")
6878 parseDirectiveSEHTrapFrame(Loc);
6879 else if (IDVal == ".seh_pushframe")
6880 parseDirectiveSEHMachineFrame(Loc);
6881 else if (IDVal == ".seh_context")
6882 parseDirectiveSEHContext(Loc);
6883 else if (IDVal == ".seh_ec_context")
6884 parseDirectiveSEHECContext(Loc);
6885 else if (IDVal == ".seh_clear_unwound_to_call")
6886 parseDirectiveSEHClearUnwoundToCall(Loc);
6887 else if (IDVal == ".seh_pac_sign_lr")
6888 parseDirectiveSEHPACSignLR(Loc);
6889 else if (IDVal == ".seh_save_any_reg")
6890 parseDirectiveSEHSaveAnyReg(Loc, false, false);
6891 else if (IDVal == ".seh_save_any_reg_p")
6892 parseDirectiveSEHSaveAnyReg(Loc, true, false);
6893 else if (IDVal == ".seh_save_any_reg_x")
6894 parseDirectiveSEHSaveAnyReg(Loc, false, true);
6895 else if (IDVal == ".seh_save_any_reg_px")
6896 parseDirectiveSEHSaveAnyReg(Loc, true, true);
6897 else
6898 return true;
6899 } else
6900 return true;
6901 return false;
6902}
6903
6904static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
6905 SmallVector<StringRef, 4> &RequestedExtensions) {
6906 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
6907 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
6908
6909 if (!NoCrypto && Crypto) {
6910 // Map 'generic' (and others) to sha2 and aes, because
6911 // that was the traditional meaning of crypto.
6912 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
6913 ArchInfo == AArch64::ARMV8_3A) {
6914 RequestedExtensions.push_back("sha2");
6915 RequestedExtensions.push_back("aes");
6916 }
6917 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
6918 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
6919 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
6920 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
6921 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
6922 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
6923 RequestedExtensions.push_back("sm4");
6924 RequestedExtensions.push_back("sha3");
6925 RequestedExtensions.push_back("sha2");
6926 RequestedExtensions.push_back("aes");
6927 }
6928 } else if (NoCrypto) {
6929 // Map 'generic' (and others) to sha2 and aes, because
6930 // that was the traditional meaning of crypto.
6931 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
6932 ArchInfo == AArch64::ARMV8_3A) {
6933 RequestedExtensions.push_back("nosha2");
6934 RequestedExtensions.push_back("noaes");
6935 }
6936 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
6937 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
6938 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
6939 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
6940 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
6941 ArchInfo == AArch64::ARMV9_4A) {
6942 RequestedExtensions.push_back("nosm4");
6943 RequestedExtensions.push_back("nosha3");
6944 RequestedExtensions.push_back("nosha2");
6945 RequestedExtensions.push_back("noaes");
6946 }
6947 }
6948}
6949
6950/// parseDirectiveArch
6951/// ::= .arch token
6952bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
6953 SMLoc ArchLoc = getLoc();
6954
6955 StringRef Arch, ExtensionString;
6956 std::tie(Arch, ExtensionString) =
6957 getParser().parseStringToEndOfStatement().trim().split('+');
6958
6959 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
6960 if (!ArchInfo)
6961 return Error(ArchLoc, "unknown arch name");
6962
6963 if (parseToken(AsmToken::EndOfStatement))
6964 return true;
6965
6966 // Get the architecture and extension features.
6967 std::vector<StringRef> AArch64Features;
6968 AArch64Features.push_back(ArchInfo->ArchFeature);
6969 AArch64::getExtensionFeatures(ArchInfo->DefaultExts, AArch64Features);
6970
6971 MCSubtargetInfo &STI = copySTI();
6972 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
6973 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
6974 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
6975
6976 SmallVector<StringRef, 4> RequestedExtensions;
6977 if (!ExtensionString.empty())
6978 ExtensionString.split(RequestedExtensions, '+');
6979
6980 ExpandCryptoAEK(*ArchInfo, RequestedExtensions);
6981
6982 FeatureBitset Features = STI.getFeatureBits();
6983 setAvailableFeatures(ComputeAvailableFeatures(Features));
6984 for (auto Name : RequestedExtensions) {
6985 bool EnableFeature = !Name.consume_front_insensitive("no");
6986
6987 for (const auto &Extension : ExtensionMap) {
6988 if (Extension.Name != Name)
6989 continue;
6990
6991 if (Extension.Features.none())
6992 report_fatal_error("unsupported architectural extension: " + Name);
6993
6994 FeatureBitset ToggleFeatures =
6995 EnableFeature
6997 : STI.ToggleFeature(Features & Extension.Features);
6998 setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
6999 break;
7000 }
7001 }
7002 return false;
7003}
7004
7005/// parseDirectiveArchExtension
7006/// ::= .arch_extension [no]feature
7007bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7008 SMLoc ExtLoc = getLoc();
7009
7010 StringRef Name = getParser().parseStringToEndOfStatement().trim();
7011
7012 if (parseEOL())
7013 return true;
7014
7015 bool EnableFeature = true;
7016 if (Name.starts_with_insensitive("no")) {
7017 EnableFeature = false;
7018 Name = Name.substr(2);
7019 }
7020
7021 MCSubtargetInfo &STI = copySTI();
7022 FeatureBitset Features = STI.getFeatureBits();
7023 for (const auto &Extension : ExtensionMap) {
7024 if (Extension.Name != Name)
7025 continue;
7026
7027 if (Extension.Features.none())
7028 return Error(ExtLoc, "unsupported architectural extension: " + Name);
7029
7030 FeatureBitset ToggleFeatures =
7031 EnableFeature
7033 : STI.ToggleFeature(Features & Extension.Features);
7034 setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
7035 return false;
7036 }
7037
7038 return Error(ExtLoc, "unknown architectural extension: " + Name);
7039}
7040
7042 return SMLoc::getFromPointer(L.getPointer() + Offset);
7043}
7044
7045/// parseDirectiveCPU
7046/// ::= .cpu id
7047bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7048 SMLoc CurLoc = getLoc();
7049
7050 StringRef CPU, ExtensionString;
7051 std::tie(CPU, ExtensionString) =
7052 getParser().parseStringToEndOfStatement().trim().split('+');
7053
7054 if (parseToken(AsmToken::EndOfStatement))
7055 return true;
7056
7057 SmallVector<StringRef, 4> RequestedExtensions;
7058 if (!ExtensionString.empty())
7059 ExtensionString.split(RequestedExtensions, '+');
7060
7062 if (!CpuArch) {
7063 Error(CurLoc, "unknown CPU name");
7064 return false;
7065 }
7066 ExpandCryptoAEK(*CpuArch, RequestedExtensions);
7067
7068 MCSubtargetInfo &STI = copySTI();
7069 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
7070 CurLoc = incrementLoc(CurLoc, CPU.size());
7071
7072 for (auto Name : RequestedExtensions) {
7073 // Advance source location past '+'.
7074 CurLoc = incrementLoc(CurLoc, 1);
7075
7076 bool EnableFeature = !Name.consume_front_insensitive("no");
7077
7078 bool FoundExtension = false;
7079 for (const auto &Extension : ExtensionMap) {
7080 if (Extension.Name != Name)
7081 continue;
7082
7083 if (Extension.Features.none())
7084 report_fatal_error("unsupported architectural extension: " + Name);
7085
7086 FeatureBitset Features = STI.getFeatureBits();
7087 FeatureBitset ToggleFeatures =
7088 EnableFeature
7090 : STI.ToggleFeature(Features & Extension.Features);
7091 setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
7092 FoundExtension = true;
7093
7094 break;
7095 }
7096
7097 if (!FoundExtension)
7098 Error(CurLoc, "unsupported architectural extension");
7099
7100 CurLoc = incrementLoc(CurLoc, Name.size());
7101 }
7102 return false;
7103}
7104
7105/// parseDirectiveInst
7106/// ::= .inst opcode [, ...]
7107bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7108 if (getLexer().is(AsmToken::EndOfStatement))
7109 return Error(Loc, "expected expression following '.inst' directive");
7110
7111 auto parseOp = [&]() -> bool {
7112 SMLoc L = getLoc();
7113 const MCExpr *Expr = nullptr;
7114 if (check(getParser().parseExpression(Expr), L, "expected expression"))
7115 return true;
7116 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
7117 if (check(!Value, L, "expected constant expression"))
7118 return true;
7119 getTargetStreamer().emitInst(Value->getValue());
7120 return false;
7121 };
7122
7123 return parseMany(parseOp);
7124}
7125
7126// parseDirectiveTLSDescCall:
7127// ::= .tlsdesccall symbol
7128bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7130 if (check(getParser().parseIdentifier(Name), L, "expected symbol") ||
7131 parseToken(AsmToken::EndOfStatement))
7132 return true;
7133
7134 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7135 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
7136 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
7137
7138 MCInst Inst;
7139 Inst.setOpcode(AArch64::TLSDESCCALL);
7141
7142 getParser().getStreamer().emitInstruction(Inst, getSTI());
7143 return false;
7144}
7145
7146/// ::= .loh <lohName | lohId> label1, ..., labelN
7147/// The number of arguments depends on the loh identifier.
7148bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7150 if (getTok().isNot(AsmToken::Identifier)) {
7151 if (getTok().isNot(AsmToken::Integer))
7152 return TokError("expected an identifier or a number in directive");
7153 // We successfully get a numeric value for the identifier.
7154 // Check if it is valid.
7155 int64_t Id = getTok().getIntVal();
7156 if (Id <= -1U && !isValidMCLOHType(Id))
7157 return TokError("invalid numeric identifier in directive");
7158 Kind = (MCLOHType)Id;
7159 } else {
7160 StringRef Name = getTok().getIdentifier();
7161 // We successfully parse an identifier.
7162 // Check if it is a recognized one.
7163 int Id = MCLOHNameToId(Name);
7164
7165 if (Id == -1)
7166 return TokError("invalid identifier in directive");
7167 Kind = (MCLOHType)Id;
7168 }
7169 // Consume the identifier.
7170 Lex();
7171 // Get the number of arguments of this LOH.
7172 int NbArgs = MCLOHIdToNbArgs(Kind);
7173
7174 assert(NbArgs != -1 && "Invalid number of arguments");
7175
7177 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7179 if (getParser().parseIdentifier(Name))
7180 return TokError("expected identifier in directive");
7181 Args.push_back(getContext().getOrCreateSymbol(Name));
7182
7183 if (Idx + 1 == NbArgs)
7184 break;
7185 if (parseComma())
7186 return true;
7187 }
7188 if (parseEOL())
7189 return true;
7190
7191 getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
7192 return false;
7193}
7194
7195/// parseDirectiveLtorg
7196/// ::= .ltorg | .pool
7197bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7198 if (parseEOL())
7199 return true;
7200 getTargetStreamer().emitCurrentConstantPool();
7201 return false;
7202}
7203
7204/// parseDirectiveReq
7205/// ::= name .req registername
7206bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7207 Lex(); // Eat the '.req' token.
7208 SMLoc SRegLoc = getLoc();
7209 RegKind RegisterKind = RegKind::Scalar;
7210 MCRegister RegNum;
7211 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7212
7213 if (!ParseRes.isSuccess()) {
7215 RegisterKind = RegKind::NeonVector;
7216 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7217
7218 if (ParseRes.isFailure())
7219 return true;
7220
7221 if (ParseRes.isSuccess() && !Kind.empty())
7222 return Error(SRegLoc, "vector register without type specifier expected");
7223 }
7224
7225 if (!ParseRes.isSuccess()) {
7227 RegisterKind = RegKind::SVEDataVector;
7228 ParseRes =
7229 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7230
7231 if (ParseRes.isFailure())
7232 return true;
7233
7234 if (ParseRes.isSuccess() && !Kind.empty())
7235 return Error(SRegLoc,
7236 "sve vector register without type specifier expected");
7237 }
7238
7239 if (!ParseRes.isSuccess()) {
7241 RegisterKind = RegKind::SVEPredicateVector;
7242 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7243
7244 if (ParseRes.isFailure())
7245 return true;
7246
7247 if (ParseRes.isSuccess() && !Kind.empty())
7248 return Error(SRegLoc,
7249 "sve predicate register without type specifier expected");
7250 }
7251
7252 if (!ParseRes.isSuccess())
7253 return Error(SRegLoc, "register name or alias expected");
7254
7255 // Shouldn't be anything else.
7256 if (parseEOL())
7257 return true;
7258
7259 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
7260 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
7261 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
7262
7263 return false;
7264}
7265
7266/// parseDirectiveUneq
7267/// ::= .unreq registername
7268bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7269 if (getTok().isNot(AsmToken::Identifier))
7270 return TokError("unexpected input in .unreq directive.");
7271 RegisterReqs.erase(getTok().getIdentifier().lower());
7272 Lex(); // Eat the identifier.
7273 return parseToken(AsmToken::EndOfStatement);
7274}
7275
7276bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7277 if (parseEOL())
7278 return true;
7279 getStreamer().emitCFINegateRAState();
7280 return false;
7281}
7282
7283/// parseDirectiveCFIBKeyFrame
7284/// ::= .cfi_b_key
7285bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7286 if (parseEOL())
7287 return true;
7288 getStreamer().emitCFIBKeyFrame();
7289 return false;
7290}
7291
7292/// parseDirectiveCFIMTETaggedFrame
7293/// ::= .cfi_mte_tagged_frame
7294bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7295 if (parseEOL())
7296 return true;
7297 getStreamer().emitCFIMTETaggedFrame();
7298 return false;
7299}
7300
7301/// parseDirectiveVariantPCS
7302/// ::= .variant_pcs symbolname
7303bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7305 if (getParser().parseIdentifier(Name))
7306 return TokError("expected symbol name");
7307 if (parseEOL())
7308 return true;
7309 getTargetStreamer().emitDirectiveVariantPCS(
7310 getContext().getOrCreateSymbol(Name));
7311 return false;
7312}
7313
7314/// parseDirectiveSEHAllocStack
7315/// ::= .seh_stackalloc
7316bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7317 int64_t Size;
7318 if (parseImmExpr(Size))
7319 return true;
7320 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7321 return false;
7322}
7323
7324/// parseDirectiveSEHPrologEnd
7325/// ::= .seh_endprologue
7326bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7327 getTargetStreamer().emitARM64WinCFIPrologEnd();
7328 return false;
7329}
7330
7331/// parseDirectiveSEHSaveR19R20X
7332/// ::= .seh_save_r19r20_x
7333bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7334 int64_t Offset;
7335 if (parseImmExpr(Offset))
7336 return true;
7337 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7338 return false;
7339}
7340
7341/// parseDirectiveSEHSaveFPLR
7342/// ::= .seh_save_fplr
7343bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7344 int64_t Offset;
7345 if (parseImmExpr(Offset))
7346 return true;
7347 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7348 return false;
7349}
7350
7351/// parseDirectiveSEHSaveFPLRX
7352/// ::= .seh_save_fplr_x
7353bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7354 int64_t Offset;
7355 if (parseImmExpr(Offset))
7356 return true;
7357 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7358 return false;
7359}
7360
7361/// parseDirectiveSEHSaveReg
7362/// ::= .seh_save_reg
7363bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7364 unsigned Reg;
7365 int64_t Offset;
7366 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7367 parseComma() || parseImmExpr(Offset))
7368 return true;
7369 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7370 return false;
7371}
7372
7373/// parseDirectiveSEHSaveRegX
7374/// ::= .seh_save_reg_x
7375bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7376 unsigned Reg;
7377 int64_t Offset;
7378 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7379 parseComma() || parseImmExpr(Offset))
7380 return true;
7381 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7382 return false;
7383}
7384
7385/// parseDirectiveSEHSaveRegP
7386/// ::= .seh_save_regp
7387bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7388 unsigned Reg;
7389 int64_t Offset;
7390 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7391 parseComma() || parseImmExpr(Offset))
7392 return true;
7393 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7394 return false;
7395}
7396
7397/// parseDirectiveSEHSaveRegPX
7398/// ::= .seh_save_regp_x
7399bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7400 unsigned Reg;
7401 int64_t Offset;
7402 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7403 parseComma() || parseImmExpr(Offset))
7404 return true;
7405 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7406 return false;
7407}
7408
7409/// parseDirectiveSEHSaveLRPair
7410/// ::= .seh_save_lrpair
7411bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7412 unsigned Reg;
7413 int64_t Offset;
7414 L = getLoc();
7415 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7416 parseComma() || parseImmExpr(Offset))
7417 return true;
7418 if (check(((Reg - 19) % 2 != 0), L,
7419 "expected register with even offset from x19"))
7420 return true;
7421 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7422 return false;
7423}
7424
7425/// parseDirectiveSEHSaveFReg
7426/// ::= .seh_save_freg
7427bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7428 unsigned Reg;
7429 int64_t Offset;
7430 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7431 parseComma() || parseImmExpr(Offset))
7432 return true;
7433 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7434 return false;
7435}
7436
7437/// parseDirectiveSEHSaveFRegX
7438/// ::= .seh_save_freg_x
7439bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7440 unsigned Reg;
7441 int64_t Offset;
7442 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7443 parseComma() || parseImmExpr(Offset))
7444 return true;
7445 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7446 return false;
7447}
7448
7449/// parseDirectiveSEHSaveFRegP
7450/// ::= .seh_save_fregp
7451bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7452 unsigned Reg;
7453 int64_t Offset;
7454 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7455 parseComma() || parseImmExpr(Offset))
7456 return true;
7457 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7458 return false;
7459}
7460
7461/// parseDirectiveSEHSaveFRegPX
7462/// ::= .seh_save_fregp_x
7463bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7464 unsigned Reg;
7465 int64_t Offset;
7466 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7467 parseComma() || parseImmExpr(Offset))
7468 return true;
7469 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7470 return false;
7471}
7472
7473/// parseDirectiveSEHSetFP
7474/// ::= .seh_set_fp
7475bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7476 getTargetStreamer().emitARM64WinCFISetFP();
7477 return false;
7478}
7479
7480/// parseDirectiveSEHAddFP
7481/// ::= .seh_add_fp
7482bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7483 int64_t Size;
7484 if (parseImmExpr(Size))
7485 return true;
7486 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7487 return false;
7488}
7489
7490/// parseDirectiveSEHNop
7491/// ::= .seh_nop
7492bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7493 getTargetStreamer().emitARM64WinCFINop();
7494 return false;
7495}
7496
7497/// parseDirectiveSEHSaveNext
7498/// ::= .seh_save_next
7499bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7500 getTargetStreamer().emitARM64WinCFISaveNext();
7501 return false;
7502}
7503
7504/// parseDirectiveSEHEpilogStart
7505/// ::= .seh_startepilogue
7506bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7507 getTargetStreamer().emitARM64WinCFIEpilogStart();
7508 return false;
7509}
7510
7511/// parseDirectiveSEHEpilogEnd
7512/// ::= .seh_endepilogue
7513bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
7514 getTargetStreamer().emitARM64WinCFIEpilogEnd();
7515 return false;
7516}
7517
7518/// parseDirectiveSEHTrapFrame
7519/// ::= .seh_trap_frame
7520bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
7521 getTargetStreamer().emitARM64WinCFITrapFrame();
7522 return false;
7523}
7524
7525/// parseDirectiveSEHMachineFrame
7526/// ::= .seh_pushframe
7527bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
7528 getTargetStreamer().emitARM64WinCFIMachineFrame();
7529 return false;
7530}
7531
7532/// parseDirectiveSEHContext
7533/// ::= .seh_context
7534bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
7535 getTargetStreamer().emitARM64WinCFIContext();
7536 return false;
7537}
7538
7539/// parseDirectiveSEHECContext
7540/// ::= .seh_ec_context
7541bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
7542 getTargetStreamer().emitARM64WinCFIECContext();
7543 return false;
7544}
7545
7546/// parseDirectiveSEHClearUnwoundToCall
7547/// ::= .seh_clear_unwound_to_call
7548bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
7549 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
7550 return false;
7551}
7552
7553/// parseDirectiveSEHPACSignLR
7554/// ::= .seh_pac_sign_lr
7555bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
7556 getTargetStreamer().emitARM64WinCFIPACSignLR();
7557 return false;
7558}
7559
7560/// parseDirectiveSEHSaveAnyReg
7561/// ::= .seh_save_any_reg
7562/// ::= .seh_save_any_reg_p
7563/// ::= .seh_save_any_reg_x
7564/// ::= .seh_save_any_reg_px
7565bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
7566 bool Writeback) {
7568 SMLoc Start, End;
7569 int64_t Offset;
7570 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") ||
7571 parseComma() || parseImmExpr(Offset))
7572 return true;
7573
7574 if (Reg == AArch64::FP || Reg == AArch64::LR ||
7575 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
7576 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7577 return Error(L, "invalid save_any_reg offset");
7578 unsigned EncodedReg;
7579 if (Reg == AArch64::FP)
7580 EncodedReg = 29;
7581 else if (Reg == AArch64::LR)
7582 EncodedReg = 30;
7583 else
7584 EncodedReg = Reg - AArch64::X0;
7585 if (Paired) {
7586 if (Reg == AArch64::LR)
7587 return Error(Start, "lr cannot be paired with another register");
7588 if (Writeback)
7589 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset);
7590 else
7591 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset);
7592 } else {
7593 if (Writeback)
7594 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset);
7595 else
7596 getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset);
7597 }
7598 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
7599 unsigned EncodedReg = Reg - AArch64::D0;
7600 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7601 return Error(L, "invalid save_any_reg offset");
7602 if (Paired) {
7603 if (Reg == AArch64::D31)
7604 return Error(Start, "d31 cannot be paired with another register");
7605 if (Writeback)
7606 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset);
7607 else
7608 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset);
7609 } else {
7610 if (Writeback)
7611 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset);
7612 else
7613 getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset);
7614 }
7615 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
7616 unsigned EncodedReg = Reg - AArch64::Q0;
7617 if (Offset < 0 || Offset % 16)
7618 return Error(L, "invalid save_any_reg offset");
7619 if (Paired) {
7620 if (Reg == AArch64::Q31)
7621 return Error(Start, "q31 cannot be paired with another register");
7622 if (Writeback)
7623 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset);
7624 else
7625 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset);
7626 } else {
7627 if (Writeback)
7628 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset);
7629 else
7630 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset);
7631 }
7632 } else {
7633 return Error(Start, "save_any_reg register must be x, q or d register");
7634 }
7635 return false;
7636}
7637
7638bool AArch64AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
7639 // Try @AUTH expressions: they're more complex than the usual symbol variants.
7640 if (!parseAuthExpr(Res, EndLoc))
7641 return false;
7642 return getParser().parsePrimaryExpr(Res, EndLoc, nullptr);
7643}
7644
7645/// parseAuthExpr
7646/// ::= _sym@AUTH(ib,123[,addr])
7647/// ::= (_sym + 5)@AUTH(ib,123[,addr])
7648/// ::= (_sym - 5)@AUTH(ib,123[,addr])
7649bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
7650 MCAsmParser &Parser = getParser();
7651 MCContext &Ctx = getContext();
7652
7653 AsmToken Tok = Parser.getTok();
7654
7655 // Look for '_sym@AUTH' ...
7656 if (Tok.is(AsmToken::Identifier) && Tok.getIdentifier().ends_with("@AUTH")) {
7657 StringRef SymName = Tok.getIdentifier().drop_back(strlen("@AUTH"));
7658 if (SymName.contains('@'))
7659 return TokError(
7660 "combination of @AUTH with other modifiers not supported");
7661 Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx);
7662
7663 Parser.Lex(); // Eat the identifier.
7664 } else {
7665 // ... or look for a more complex symbol reference, such as ...
7667
7668 // ... '"_long sym"@AUTH' ...
7669 if (Tok.is(AsmToken::String))
7670 Tokens.resize(2);
7671 // ... or '(_sym + 5)@AUTH'.
7672 else if (Tok.is(AsmToken::LParen))
7673 Tokens.resize(6);
7674 else
7675 return true;
7676
7677 if (Parser.getLexer().peekTokens(Tokens) != Tokens.size())
7678 return true;
7679
7680 // In either case, the expression ends with '@' 'AUTH'.
7681 if (Tokens[Tokens.size() - 2].isNot(AsmToken::At) ||
7682 Tokens[Tokens.size() - 1].isNot(AsmToken::Identifier) ||
7683 Tokens[Tokens.size() - 1].getIdentifier() != "AUTH")
7684 return true;
7685
7686 if (Tok.is(AsmToken::String)) {
7687 StringRef SymName;
7688 if (Parser.parseIdentifier(SymName))
7689 return true;
7690 Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx);
7691 } else {
7692 if (Parser.parsePrimaryExpr(Res, EndLoc, nullptr))
7693 return true;
7694 }
7695
7696 Parser.Lex(); // '@'
7697 Parser.Lex(); // 'AUTH'
7698 }
7699
7700 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
7701 if (parseToken(AsmToken::LParen, "expected '('"))
7702 return true;
7703
7704 if (Parser.getTok().isNot(AsmToken::Identifier))
7705 return TokError("expected key name");
7706
7707 StringRef KeyStr = Parser.getTok().getIdentifier();
7708 auto KeyIDOrNone = AArch64StringToPACKeyID(KeyStr);
7709 if (!KeyIDOrNone)
7710 return TokError("invalid key '" + KeyStr + "'");
7711 Parser.Lex();
7712
7713 if (parseToken(AsmToken::Comma, "expected ','"))
7714 return true;
7715
7716 if (Parser.getTok().isNot(AsmToken::Integer))
7717 return TokError("expected integer discriminator");
7718 int64_t Discriminator = Parser.getTok().getIntVal();
7719
7720 if (!isUInt<16>(Discriminator))
7721 return TokError("integer discriminator " + Twine(Discriminator) +
7722 " out of range [0, 0xFFFF]");
7723 Parser.Lex();
7724
7725 bool UseAddressDiversity = false;
7726 if (Parser.getTok().is(AsmToken::Comma)) {
7727 Parser.Lex();
7728 if (Parser.getTok().isNot(AsmToken::Identifier) ||
7729 Parser.getTok().getIdentifier() != "addr")
7730 return TokError("expected 'addr'");
7731 UseAddressDiversity = true;
7732 Parser.Lex();
7733 }
7734
7735 EndLoc = Parser.getTok().getEndLoc();
7736 if (parseToken(AsmToken::RParen, "expected ')'"))
7737 return true;
7738
7739 Res = AArch64AuthMCExpr::create(Res, Discriminator, *KeyIDOrNone,
7740 UseAddressDiversity, Ctx);
7741 return false;
7742}
7743
7744bool
7745AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
7746 AArch64MCExpr::VariantKind &ELFRefKind,
7747 MCSymbolRefExpr::VariantKind &DarwinRefKind,
7748 int64_t &Addend) {
7749 ELFRefKind = AArch64MCExpr::VK_INVALID;
7750 DarwinRefKind = MCSymbolRefExpr::VK_None;
7751 Addend = 0;
7752
7753 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
7754 ELFRefKind = AE->getKind();
7755 Expr = AE->getSubExpr();
7756 }
7757
7758 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
7759 if (SE) {
7760 // It's a simple symbol reference with no addend.
7761 DarwinRefKind = SE->getKind();
7762 return true;
7763 }
7764
7765 // Check that it looks like a symbol + an addend
7766 MCValue Res;
7767 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
7768 if (!Relocatable || Res.getSymB())
7769 return false;
7770
7771 // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
7772 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
7773 if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
7774 return false;
7775
7776 if (Res.getSymA())
7777 DarwinRefKind = Res.getSymA()->getKind();
7778 Addend = Res.getConstant();
7779
7780 // It's some symbol reference + a constant addend, but really
7781 // shouldn't use both Darwin and ELF syntax.
7782 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
7783 DarwinRefKind == MCSymbolRefExpr::VK_None;
7784}
7785
7786/// Force static initialization.
7793}
7794
7795#define GET_REGISTER_MATCHER
7796#define GET_SUBTARGET_FEATURE_NAME
7797#define GET_MATCHER_IMPLEMENTATION
7798#define GET_MNEMONIC_SPELL_CHECKER
7799#include "AArch64GenAsmMatcher.inc"
7800
7801// Define this matcher function after the auto-generated include so we
7802// have the match class enum definitions.
7803unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
7804 unsigned Kind) {
7805 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
7806
7807 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
7808 if (!Op.isImm())
7809 return Match_InvalidOperand;
7810 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
7811 if (!CE)
7812 return Match_InvalidOperand;
7813 if (CE->getValue() == ExpectedVal)
7814 return Match_Success;
7815 return Match_InvalidOperand;
7816 };
7817
7818 switch (Kind) {
7819 default:
7820 return Match_InvalidOperand;
7821 case MCK_MPR:
7822 // If the Kind is a token for the MPR register class which has the "za"
7823 // register (SME accumulator array), check if the asm is a literal "za"
7824 // token. This is for the "smstart za" alias that defines the register
7825 // as a literal token.
7826 if (Op.isTokenEqual("za"))
7827 return Match_Success;
7828 return Match_InvalidOperand;
7829
7830 // If the kind is a token for a literal immediate, check if our asm operand
7831 // matches. This is for InstAliases which have a fixed-value immediate in
7832 // the asm string, such as hints which are parsed into a specific
7833 // instruction definition.
7834#define MATCH_HASH(N) \
7835 case MCK__HASH_##N: \
7836 return MatchesOpImmediate(N);
7837 MATCH_HASH(0)
7838 MATCH_HASH(1)
7839 MATCH_HASH(2)
7840 MATCH_HASH(3)
7841 MATCH_HASH(4)
7842 MATCH_HASH(6)
7843 MATCH_HASH(7)
7844 MATCH_HASH(8)
7845 MATCH_HASH(10)
7846 MATCH_HASH(12)
7847 MATCH_HASH(14)
7848 MATCH_HASH(16)
7849 MATCH_HASH(24)
7850 MATCH_HASH(25)
7851 MATCH_HASH(26)
7852 MATCH_HASH(27)
7853 MATCH_HASH(28)
7854 MATCH_HASH(29)
7855 MATCH_HASH(30)
7856 MATCH_HASH(31)
7857 MATCH_HASH(32)
7858 MATCH_HASH(40)
7859 MATCH_HASH(48)
7860 MATCH_HASH(64)
7861#undef MATCH_HASH
7862#define MATCH_HASH_MINUS(N) \
7863 case MCK__HASH__MINUS_##N: \
7864 return MatchesOpImmediate(-N);
7868#undef MATCH_HASH_MINUS
7869 }
7870}
7871
7872ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
7873
7874 SMLoc S = getLoc();
7875
7876 if (getTok().isNot(AsmToken::Identifier))
7877 return Error(S, "expected register");
7878
7879 MCRegister FirstReg;
7880 ParseStatus Res = tryParseScalarRegister(FirstReg);
7881 if (!Res.isSuccess())
7882 return Error(S, "expected first even register of a consecutive same-size "
7883 "even/odd register pair");
7884
7885 const MCRegisterClass &WRegClass =
7886 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
7887 const MCRegisterClass &XRegClass =
7888 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
7889
7890 bool isXReg = XRegClass.contains(FirstReg),
7891 isWReg = WRegClass.contains(FirstReg);
7892 if (!isXReg && !isWReg)
7893 return Error(S, "expected first even register of a consecutive same-size "
7894 "even/odd register pair");
7895
7896 const MCRegisterInfo *RI = getContext().getRegisterInfo();
7897 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
7898
7899 if (FirstEncoding & 0x1)
7900 return Error(S, "expected first even register of a consecutive same-size "
7901 "even/odd register pair");
7902
7903 if (getTok().isNot(AsmToken::Comma))
7904 return Error(getLoc(), "expected comma");
7905 // Eat the comma
7906 Lex();
7907
7908 SMLoc E = getLoc();
7909 MCRegister SecondReg;
7910 Res = tryParseScalarRegister(SecondReg);
7911 if (!Res.isSuccess())
7912 return Error(E, "expected second odd register of a consecutive same-size "
7913 "even/odd register pair");
7914
7915 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
7916 (isXReg && !XRegClass.contains(SecondReg)) ||
7917 (isWReg && !WRegClass.contains(SecondReg)))
7918 return Error(E, "expected second odd register of a consecutive same-size "
7919 "even/odd register pair");
7920
7921 unsigned Pair = 0;
7922 if (isXReg) {
7923 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
7924 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
7925 } else {
7926 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
7927 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
7928 }
7929
7930 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
7931 getLoc(), getContext()));
7932
7933 return ParseStatus::Success;
7934}
7935
7936template <bool ParseShiftExtend, bool ParseSuffix>
7937ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
7938 const SMLoc S = getLoc();
7939 // Check for a SVE vector register specifier first.
7940 MCRegister RegNum;
7942
7943 ParseStatus Res =
7944 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7945
7946 if (!Res.isSuccess())
7947 return Res;
7948
7949 if (ParseSuffix && Kind.empty())
7950 return ParseStatus::NoMatch;
7951
7952 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
7953 if (!KindRes)
7954 return ParseStatus::NoMatch;
7955
7956 unsigned ElementWidth = KindRes->second;
7957
7958 // No shift/extend is the default.
7959 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
7960 Operands.push_back(AArch64Operand::CreateVectorReg(
7961 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
7962
7963 ParseStatus Res = tryParseVectorIndex(Operands);
7964 if (Res.isFailure())
7965 return ParseStatus::Failure;
7966 return ParseStatus::Success;
7967 }
7968
7969 // Eat the comma
7970 Lex();
7971
7972 // Match the shift
7974 Res = tryParseOptionalShiftExtend(ExtOpnd);
7975 if (!Res.isSuccess())
7976 return Res;
7977
7978 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
7979 Operands.push_back(AArch64Operand::CreateVectorReg(
7980 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
7981 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
7982 Ext->hasShiftExtendAmount()));
7983
7984 return ParseStatus::Success;
7985}
7986
7987ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
7988 MCAsmParser &Parser = getParser();
7989
7990 SMLoc SS = getLoc();
7991 const AsmToken &TokE = getTok();
7992 bool IsHash = TokE.is(AsmToken::Hash);
7993
7994 if (!IsHash && TokE.isNot(AsmToken::Identifier))
7995 return ParseStatus::NoMatch;
7996
7997 int64_t Pattern;
7998 if (IsHash) {
7999 Lex(); // Eat hash
8000
8001 // Parse the immediate operand.
8002 const MCExpr *ImmVal;
8003 SS = getLoc();
8004 if (Parser.parseExpression(ImmVal))
8005 return ParseStatus::Failure;
8006
8007 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
8008 if (!MCE)
8009 return TokError("invalid operand for instruction");
8010
8011 Pattern = MCE->getValue();
8012 } else {
8013 // Parse the pattern
8014 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
8015 if (!Pat)
8016 return ParseStatus::NoMatch;
8017
8018 Lex();
8019 Pattern = Pat->Encoding;
8020 assert(Pattern >= 0 && Pattern < 32);
8021 }
8022
8023 Operands.push_back(
8024 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8025 SS, getLoc(), getContext()));
8026
8027 return ParseStatus::Success;
8028}
8029
8031AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8032 int64_t Pattern;
8033 SMLoc SS = getLoc();
8034 const AsmToken &TokE = getTok();
8035 // Parse the pattern
8036 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8037 TokE.getString());
8038 if (!Pat)
8039 return ParseStatus::NoMatch;
8040
8041 Lex();
8042 Pattern = Pat->Encoding;
8043 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8044
8045 Operands.push_back(
8046 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8047 SS, getLoc(), getContext()));
8048
8049 return ParseStatus::Success;
8050}
8051
8052ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8053 SMLoc SS = getLoc();
8054
8055 MCRegister XReg;
8056 if (!tryParseScalarRegister(XReg).isSuccess())
8057 return ParseStatus::NoMatch;
8058
8059 MCContext &ctx = getContext();
8060 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8061 int X8Reg = RI->getMatchingSuperReg(
8062 XReg, AArch64::x8sub_0,
8063 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8064 if (!X8Reg)
8065 return Error(SS,
8066 "expected an even-numbered x-register in the range [x0,x22]");
8067
8068 Operands.push_back(
8069 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
8070 return ParseStatus::Success;
8071}
8072
8073ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8074 SMLoc S = getLoc();
8075
8076 if (getTok().isNot(AsmToken::Integer))
8077 return ParseStatus::NoMatch;
8078
8079 if (getLexer().peekTok().isNot(AsmToken::Colon))
8080 return ParseStatus::NoMatch;
8081
8082 const MCExpr *ImmF;
8083 if (getParser().parseExpression(ImmF))
8084 return ParseStatus::NoMatch;
8085
8086 if (getTok().isNot(AsmToken::Colon))
8087 return ParseStatus::NoMatch;
8088
8089 Lex(); // Eat ':'
8090 if (getTok().isNot(AsmToken::Integer))
8091 return ParseStatus::NoMatch;
8092
8093 SMLoc E = getTok().getLoc();
8094 const MCExpr *ImmL;
8095 if (getParser().parseExpression(ImmL))
8096 return ParseStatus::NoMatch;
8097
8098 unsigned ImmFVal = cast<MCConstantExpr>(ImmF)->getValue();
8099 unsigned ImmLVal = cast<MCConstantExpr>(ImmL)->getValue();
8100
8101 Operands.push_back(
8102 AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext()));
8103 return ParseStatus::Success;
8104}
#define MATCH_HASH_MINUS(N)
static unsigned matchSVEDataVectorRegName(StringRef Name)
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind)
static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo, SmallVector< StringRef, 4 > &RequestedExtensions)
static unsigned matchSVEPredicateAsCounterRegName(StringRef Name)
static MCRegister MatchRegisterName(StringRef Name)
static bool isMatchingOrAlias(unsigned ZReg, unsigned Reg)
static const char * getSubtargetFeatureName(uint64_t Val)
static unsigned MatchNeonVectorRegName(StringRef Name)
}
static std::optional< std::pair< int, int > > parseVectorKind(StringRef Suffix, RegKind VectorKind)
Returns an optional pair of (#elements, element-width) if Suffix is a valid vector kind.
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser()
Force static initialization.
static unsigned matchMatrixRegName(StringRef Name)
static unsigned matchMatrixTileListRegName(StringRef Name)
static std::string AArch64MnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static SMLoc incrementLoc(SMLoc L, int Offset)
#define MATCH_HASH(N)
static const struct Extension ExtensionMap[]
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
static unsigned matchSVEPredicateVectorRegName(StringRef Name)
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:135
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Given that RA is a live value
@ Default
Definition: DwarfDebug.cpp:87
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Symbol * Sym
Definition: ELF_riscv.cpp:479
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
#define check(cond)
static LVOptions Options
Definition: LVOptions.cpp:25
Live Register Matrix
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
static MSP430CC::CondCodes getCondCode(unsigned Cond)
unsigned Reg
#define T
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx)
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static const AArch64MCExpr * create(const MCExpr *Expr, VariantKind Kind, MCContext &Ctx)
APInt bitcastToAPInt() const
Definition: APFloat.h:1266
Class for arbitrary precision integers.
Definition: APInt.h:78
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition: APInt.h:415
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition: APInt.h:412
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1522
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Target independent representation for an assembler token.
Definition: MCAsmMacro.h:21
SMLoc getLoc() const
Definition: MCAsmLexer.cpp:26
int64_t getIntVal() const
Definition: MCAsmMacro.h:115
bool isNot(TokenKind K) const
Definition: MCAsmMacro.h:83
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition: MCAsmMacro.h:110
bool is(TokenKind K) const
Definition: MCAsmMacro.h:82
SMLoc getEndLoc() const
Definition: MCAsmLexer.cpp:30
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Definition: MCAsmMacro.h:99
This class represents an Operation in the Expression.
Base class for user error types.
Definition: Error.h:355
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
Container class for subtarget features.
constexpr size_t size() const
void UnLex(AsmToken const &Token)
Definition: MCAsmLexer.h:93
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition: MCAsmLexer.h:111
virtual size_t peekTokens(MutableArrayRef< AsmToken > Buf, bool ShouldSkipSpace=true)=0
Look ahead an arbitrary number of tokens.
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
Generic assembler parser interface, for use by target specific assembly parsers.
Definition: MCAsmParser.h:123
virtual MCStreamer & getStreamer()=0
Return the output streamer for the assembler.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc, AsmTypeInfo *TypeInfo)=0
Parse a primary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
Definition: MCAsmParser.cpp:40
virtual bool parseIdentifier(StringRef &Res)=0
Parse an identifier or string (as a quoted identifier) and set Res to the identifier contents.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual MCAsmLexer & getLexer()=0
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
int64_t getValue() const
Definition: MCExpr.h:169
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition: MCExpr.cpp:193
Context object for machine code objects.
Definition: MCContext.h:83
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:414
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:213
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm, const MCFixup *Fixup) const
Try to evaluate the expression to a relocatable value, i.e.
Definition: MCExpr.cpp:788
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
unsigned getNumOperands() const
Definition: MCInst.h:208
void setLoc(SMLoc loc)
Definition: MCInst.h:203
unsigned getOpcode() const
Definition: MCInst.h:198
void addOperand(const MCOperand Op)
Definition: MCInst.h:210
void setOpcode(unsigned Op)
Definition: MCInst.h:197
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:206
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
static MCOperand createReg(unsigned Reg)
Definition: MCInst.h:134
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:162
int64_t getImm() const
Definition: MCInst.h:80
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:141
bool isImm() const
Definition: MCInst.h:62
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:69
bool isReg() const
Definition: MCInst.h:61
const MCExpr * getExpr() const
Definition: MCInst.h:114
bool isExpr() const
Definition: MCInst.h:65
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual MCRegister getReg() const =0
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
uint16_t getEncodingValue(MCRegister RegNo) const
Returns the encoding for RegNo.
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Streaming machine code generation interface.
Definition: MCStreamer.h:213
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
MCTargetStreamer * getTargetStreamer()
Definition: MCStreamer.h:309
Generic base class for all target subtargets.
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
FeatureBitset SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
FeatureBitset ToggleFeature(uint64_t FB)
Toggle a feature and return the re-computed feature bits.
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:188
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:393
VariantKind getKind() const
Definition: MCExpr.h:408
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
virtual bool ParseDirective(AsmToken DirectiveID)
ParseDirective - Parse a target specific assembler directive This method is deprecated,...
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc)
virtual ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
tryParseRegister - parse one register if possible
virtual bool areEqualRegs(const MCParsedAsmOperand &Op1, const MCParsedAsmOperand &Op2) const
Returns whether two operands are registers and are equal.
void setAvailableFeatures(const FeatureBitset &Value)
const MCSubtargetInfo & getSTI() const
virtual unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, unsigned Kind)
Allow a target to add special case operand matching for things that tblgen doesn't/can't handle effec...
virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands)=0
ParseInstruction - Parse one assembly instruction.
virtual bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm)=0
MatchAndEmitInstruction - Recognize a series of operands of a parsed instruction as an actual MCInst ...
Target specific streamer interface.
Definition: MCStreamer.h:94
This represents an "assembler immediate".
Definition: MCValue.h:36
int64_t getConstant() const
Definition: MCValue.h:43
const MCSymbolRefExpr * getSymB() const
Definition: MCValue.h:45
const MCSymbolRefExpr * getSymA() const
Definition: MCValue.h:44
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
constexpr bool isNoMatch() const
Represents a location in source code.
Definition: SMLoc.h:23
static SMLoc getFromPointer(const char *Ptr)
Definition: SMLoc.h:36
constexpr const char * getPointer() const
Definition: SMLoc.h:34
Represents a range in source code.
Definition: SMLoc.h:48
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition: SmallSet.h:236
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:95
size_t size() const
Definition: SmallVector.h:92
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:587
void resize(size_type N)
Definition: SmallVector.h:652
void push_back(const T &Elt)
Definition: SmallVector.h:427
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1210
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
Definition: StringMap.h:128
iterator end()
Definition: StringMap.h:220
iterator find(StringRef Key)
Definition: StringMap.h:233
void erase(iterator I)
Definition: StringMap.h:416
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
Definition: StringMap.h:308
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:685
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:455
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:250
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
Definition: StringRef.h:594
std::string upper() const
Convert the given ASCII string to uppercase.
Definition: StringRef.cpp:116
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:131
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:409
StringRef take_back(size_t N=1) const
Return a StringRef equal to 'this' but with only the last N elements remaining.
Definition: StringRef.h:574
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition: StringRef.h:800
std::string lower() const
Definition: StringRef.cpp:111
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition: StringRef.h:262
static constexpr size_t npos
Definition: StringRef.h:52
StringRef drop_back(size_t N=1) const
Return a StringRef equal to 'this' but with the last N elements dropped.
Definition: StringRef.h:601
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
Definition: StringRef.h:163
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
EnvironmentType getEnvironment() const
Get the parsed environment type of this triple.
Definition: Triple.h:390
bool isWindowsArm64EC() const
Definition: Triple.h:640
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static CondCode getInvertedCondCode(CondCode Code)
uint32_t parseGenericRegister(StringRef Name)
const SysReg * lookupSysRegByName(StringRef)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static float getFPImmFloat(unsigned Imm)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
const ArchInfo * parseArch(StringRef Arch)
const ArchInfo * getArchForCpu(StringRef CPU)
bool getExtensionFeatures(const AArch64::ExtensionBitset &Extensions, std::vector< StringRef > &Features)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool isPredicated(const MCInst &MI, const MCInstrInfo *MCII)
@ Entry
Definition: COFF.h:826
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1603
float getFPImm(unsigned Imm)
@ CE
Windows NT (Windows on ARM)
@ SS
Definition: X86.h:211
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:47
NodeAddr< CodeNode * > Code
Definition: RDFGraph.h:388
Format
The format used for serializing/deserializing remarks.
Definition: RemarkFormat.h:25
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
static std::optional< AArch64PACKey::ID > AArch64StringToPACKeyID(StringRef Name)
Return numeric key ID for 2-letter identifier string.
bool errorToBool(Error Err)
Helper for converting an Error to a bool.
Definition: Error.h:1099
@ Offset
Definition: DWP.cpp:480
@ Length
Definition: DWP.cpp:480
static int MCLOHNameToId(StringRef Name)
static bool isMem(const MachineInstr &MI, unsigned Op)
Definition: X86InstrInfo.h:170
Target & getTheAArch64beTarget()
static StringRef MCLOHDirectiveName()
static bool isValidMCLOHType(unsigned Kind)
Target & getTheAArch64leTarget()
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
static unsigned getXRegFromWReg(unsigned Reg)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:340
Target & getTheAArch64_32Target()
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
Target & getTheARM64_32Target()
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
static int MCLOHIdToNbArgs(MCLOHType Kind)
MCLOHType
Linker Optimization Hint Type.
static unsigned getWRegFromXReg(unsigned Reg)
Target & getTheARM64Target()
DWARFExpression::Operation Op
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1886
#define N
const FeatureBitset Features
const char * Name
A record for a potential prefetch made during the initial scan of the loop.
AArch64::ExtensionBitset DefaultExts
Description of the encoding of one expression Op.
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
bool haveFeatures(FeatureBitset ActiveFeatures) const
FeatureBitset getRequiredFeatures() const
const char * Name
FeatureBitset FeaturesRequired