LLVM 23.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCAsmInfo.h"
29#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
40#include "llvm/MC/MCStreamer.h"
42#include "llvm/MC/MCSymbol.h"
44#include "llvm/MC/MCValue.h"
50#include "llvm/Support/SMLoc.h"
54#include <cassert>
55#include <cctype>
56#include <cstdint>
57#include <cstdio>
58#include <optional>
59#include <string>
60#include <tuple>
61#include <utility>
62#include <vector>
63
64using namespace llvm;
65
66namespace {
67
68enum class RegKind {
69 Scalar,
70 NeonVector,
71 SVEDataVector,
72 SVEPredicateAsCounter,
73 SVEPredicateVector,
74 Matrix,
75 LookupTable
76};
77
78enum class MatrixKind { Array, Tile, Row, Col };
79
80enum RegConstraintEqualityTy {
81 EqualsReg,
82 EqualsSuperReg,
83 EqualsSubReg
84};
85
86class AArch64AsmParser : public MCTargetAsmParser {
87private:
88 StringRef Mnemonic; ///< Instruction mnemonic.
89
90 // Map of register aliases registers via the .req directive.
91 StringMap<std::pair<RegKind, MCRegister>> RegisterReqs;
92
93 class PrefixInfo {
94 public:
95 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
96 PrefixInfo Prefix;
97 switch (Inst.getOpcode()) {
98 case AArch64::MOVPRFX_ZZ:
99 Prefix.Active = true;
100 Prefix.Dst = Inst.getOperand(0).getReg();
101 break;
102 case AArch64::MOVPRFX_ZPmZ_B:
103 case AArch64::MOVPRFX_ZPmZ_H:
104 case AArch64::MOVPRFX_ZPmZ_S:
105 case AArch64::MOVPRFX_ZPmZ_D:
106 Prefix.Active = true;
107 Prefix.Predicated = true;
108 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
109 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
110 "No destructive element size set for movprfx");
111 Prefix.Dst = Inst.getOperand(0).getReg();
112 Prefix.Pg = Inst.getOperand(2).getReg();
113 break;
114 case AArch64::MOVPRFX_ZPzZ_B:
115 case AArch64::MOVPRFX_ZPzZ_H:
116 case AArch64::MOVPRFX_ZPzZ_S:
117 case AArch64::MOVPRFX_ZPzZ_D:
118 Prefix.Active = true;
119 Prefix.Predicated = true;
120 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
121 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
122 "No destructive element size set for movprfx");
123 Prefix.Dst = Inst.getOperand(0).getReg();
124 Prefix.Pg = Inst.getOperand(1).getReg();
125 break;
126 default:
127 break;
128 }
129
130 return Prefix;
131 }
132
133 PrefixInfo() = default;
134 bool isActive() const { return Active; }
135 bool isPredicated() const { return Predicated; }
136 unsigned getElementSize() const {
137 assert(Predicated);
138 return ElementSize;
139 }
140 MCRegister getDstReg() const { return Dst; }
141 MCRegister getPgReg() const {
142 assert(Predicated);
143 return Pg;
144 }
145
146 private:
147 bool Active = false;
148 bool Predicated = false;
149 unsigned ElementSize;
150 MCRegister Dst;
151 MCRegister Pg;
152 } NextPrefix;
153
154 AArch64TargetStreamer &getTargetStreamer() {
155 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
156 return static_cast<AArch64TargetStreamer &>(TS);
157 }
158
159 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
160
161 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 bool parseSyslAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
163 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
164 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
165 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
166 std::string &Suggestion);
167 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
168 MCRegister matchRegisterNameAlias(StringRef Name, RegKind Kind);
169 bool parseRegister(OperandVector &Operands);
170 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
171 bool parseNeonVectorList(OperandVector &Operands);
172 bool parseOptionalMulOperand(OperandVector &Operands);
173 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
174 bool parseKeywordOperand(OperandVector &Operands);
175 bool parseOperand(OperandVector &Operands, bool isCondCode,
176 bool invertCondCode);
177 bool parseImmExpr(int64_t &Out);
178 bool parseComma();
179 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
180 unsigned Last);
181
182 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
183 OperandVector &Operands);
184
185 bool parseExprWithSpecifier(const MCExpr *&Res, SMLoc &E);
186 bool parseDataExpr(const MCExpr *&Res) override;
187 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
188
189 bool parseDirectiveArch(SMLoc L);
190 bool parseDirectiveArchExtension(SMLoc L);
191 bool parseDirectiveCPU(SMLoc L);
192 bool parseDirectiveInst(SMLoc L);
193
194 bool parseDirectiveTLSDescCall(SMLoc L);
195
196 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
197 bool parseDirectiveLtorg(SMLoc L);
198
199 bool parseDirectiveReq(StringRef Name, SMLoc L);
200 bool parseDirectiveUnreq(SMLoc L);
201 bool parseDirectiveCFINegateRAState();
202 bool parseDirectiveCFINegateRAStateWithPC();
203 bool parseDirectiveCFIBKeyFrame();
204 bool parseDirectiveCFIMTETaggedFrame();
205
206 bool parseDirectiveVariantPCS(SMLoc L);
207
208 bool parseDirectiveSEHAllocStack(SMLoc L);
209 bool parseDirectiveSEHPrologEnd(SMLoc L);
210 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
211 bool parseDirectiveSEHSaveFPLR(SMLoc L);
212 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
213 bool parseDirectiveSEHSaveReg(SMLoc L);
214 bool parseDirectiveSEHSaveRegX(SMLoc L);
215 bool parseDirectiveSEHSaveRegP(SMLoc L);
216 bool parseDirectiveSEHSaveRegPX(SMLoc L);
217 bool parseDirectiveSEHSaveLRPair(SMLoc L);
218 bool parseDirectiveSEHSaveFReg(SMLoc L);
219 bool parseDirectiveSEHSaveFRegX(SMLoc L);
220 bool parseDirectiveSEHSaveFRegP(SMLoc L);
221 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
222 bool parseDirectiveSEHSetFP(SMLoc L);
223 bool parseDirectiveSEHAddFP(SMLoc L);
224 bool parseDirectiveSEHNop(SMLoc L);
225 bool parseDirectiveSEHSaveNext(SMLoc L);
226 bool parseDirectiveSEHEpilogStart(SMLoc L);
227 bool parseDirectiveSEHEpilogEnd(SMLoc L);
228 bool parseDirectiveSEHTrapFrame(SMLoc L);
229 bool parseDirectiveSEHMachineFrame(SMLoc L);
230 bool parseDirectiveSEHContext(SMLoc L);
231 bool parseDirectiveSEHECContext(SMLoc L);
232 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
233 bool parseDirectiveSEHPACSignLR(SMLoc L);
234 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
235 bool parseDirectiveSEHAllocZ(SMLoc L);
236 bool parseDirectiveSEHSaveZReg(SMLoc L);
237 bool parseDirectiveSEHSavePReg(SMLoc L);
238 bool parseDirectiveAeabiSubSectionHeader(SMLoc L);
239 bool parseDirectiveAeabiAArch64Attr(SMLoc L);
240
241 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
242 SmallVectorImpl<SMLoc> &Loc);
243 unsigned getNumRegsForRegKind(RegKind K);
244 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
245 OperandVector &Operands, MCStreamer &Out,
246 uint64_t &ErrorInfo,
247 bool MatchingInlineAsm) override;
248 /// @name Auto-generated Match Functions
249 /// {
250
251#define GET_ASSEMBLER_HEADER
252#include "AArch64GenAsmMatcher.inc"
253
254 /// }
255
256 ParseStatus tryParseScalarRegister(MCRegister &Reg);
257 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
258 RegKind MatchKind);
259 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
260 ParseStatus tryParseSVCR(OperandVector &Operands);
261 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
262 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
263 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
264 ParseStatus tryParseSysReg(OperandVector &Operands);
265 ParseStatus tryParseSysCROperand(OperandVector &Operands);
266 template <bool IsSVEPrefetch = false>
267 ParseStatus tryParsePrefetch(OperandVector &Operands);
268 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
269 ParseStatus tryParsePSBHint(OperandVector &Operands);
270 ParseStatus tryParseBTIHint(OperandVector &Operands);
271 ParseStatus tryParseCMHPriorityHint(OperandVector &Operands);
272 ParseStatus tryParseTIndexHint(OperandVector &Operands);
273 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
274 ParseStatus tryParseAdrLabel(OperandVector &Operands);
275 template <bool AddFPZeroAsLiteral>
276 ParseStatus tryParseFPImm(OperandVector &Operands);
277 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
278 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
279 bool tryParseNeonVectorRegister(OperandVector &Operands);
280 ParseStatus tryParseVectorIndex(OperandVector &Operands);
281 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
282 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
283 template <bool ParseShiftExtend,
284 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
285 ParseStatus tryParseGPROperand(OperandVector &Operands);
286 ParseStatus tryParseZTOperand(OperandVector &Operands);
287 template <bool ParseShiftExtend, bool ParseSuffix>
288 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
289 template <RegKind RK>
290 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
292 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
293 template <RegKind VectorKind>
294 ParseStatus tryParseVectorList(OperandVector &Operands,
295 bool ExpectMatch = false);
296 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
297 ParseStatus tryParseSVEPattern(OperandVector &Operands);
298 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
299 ParseStatus tryParseGPR64x8(OperandVector &Operands);
300 ParseStatus tryParseImmRange(OperandVector &Operands);
301 template <int> ParseStatus tryParseAdjImm0_63(OperandVector &Operands);
302 ParseStatus tryParsePHintInstOperand(OperandVector &Operands);
303
304public:
305 enum AArch64MatchResultTy {
306 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
307#define GET_OPERAND_DIAGNOSTIC_TYPES
308#include "AArch64GenAsmMatcher.inc"
309 };
310 bool IsILP32;
311 bool IsWindowsArm64EC;
312
313 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
314 const MCInstrInfo &MII, const MCTargetOptions &Options)
315 : MCTargetAsmParser(Options, STI, MII) {
316 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
317 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
319 MCStreamer &S = getParser().getStreamer();
320 if (S.getTargetStreamer() == nullptr)
321 new AArch64TargetStreamer(S);
322
323 // Alias .hword/.word/.[dx]word to the target-independent
324 // .2byte/.4byte/.8byte directives as they have the same form and
325 // semantics:
326 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
327 Parser.addAliasForDirective(".hword", ".2byte");
328 Parser.addAliasForDirective(".word", ".4byte");
329 Parser.addAliasForDirective(".dword", ".8byte");
330 Parser.addAliasForDirective(".xword", ".8byte");
331
332 // Initialize the set of available features.
333 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
334 }
335
336 bool areEqualRegs(const MCParsedAsmOperand &Op1,
337 const MCParsedAsmOperand &Op2) const override;
338 bool parseInstruction(ParseInstructionInfo &Info, StringRef Name,
339 SMLoc NameLoc, OperandVector &Operands) override;
340 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
341 ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
342 SMLoc &EndLoc) override;
343 bool ParseDirective(AsmToken DirectiveID) override;
344 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
345 unsigned Kind) override;
346
347 static bool classifySymbolRef(const MCExpr *Expr, AArch64::Specifier &ELFSpec,
348 AArch64::Specifier &DarwinSpec,
349 int64_t &Addend);
350};
351
352/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
353/// instruction.
354class AArch64Operand : public MCParsedAsmOperand {
355private:
356 enum KindTy {
357 k_Immediate,
358 k_ShiftedImm,
359 k_ImmRange,
360 k_CondCode,
361 k_Register,
362 k_MatrixRegister,
363 k_MatrixTileList,
364 k_SVCR,
365 k_VectorList,
366 k_VectorIndex,
367 k_Token,
368 k_SysReg,
369 k_SysCR,
370 k_Prefetch,
371 k_ShiftExtend,
372 k_FPImm,
373 k_Barrier,
374 k_PSBHint,
375 k_PHint,
376 k_BTIHint,
377 k_CMHPriorityHint,
378 k_TIndexHint,
379 } Kind;
380
381 SMLoc StartLoc, EndLoc;
382
383 struct TokOp {
384 const char *Data;
385 unsigned Length;
386 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
387 };
388
389 // Separate shift/extend operand.
390 struct ShiftExtendOp {
392 unsigned Amount;
393 bool HasExplicitAmount;
394 };
395
396 struct RegOp {
397 MCRegister Reg;
398 RegKind Kind;
399 int ElementWidth;
400
401 // The register may be allowed as a different register class,
402 // e.g. for GPR64as32 or GPR32as64.
403 RegConstraintEqualityTy EqualityTy;
404
405 // In some cases the shift/extend needs to be explicitly parsed together
406 // with the register, rather than as a separate operand. This is needed
407 // for addressing modes where the instruction as a whole dictates the
408 // scaling/extend, rather than specific bits in the instruction.
409 // By parsing them as a single operand, we avoid the need to pass an
410 // extra operand in all CodeGen patterns (because all operands need to
411 // have an associated value), and we avoid the need to update TableGen to
412 // accept operands that have no associated bits in the instruction.
413 //
414 // An added benefit of parsing them together is that the assembler
415 // can give a sensible diagnostic if the scaling is not correct.
416 //
417 // The default is 'lsl #0' (HasExplicitAmount = false) if no
418 // ShiftExtend is specified.
419 ShiftExtendOp ShiftExtend;
420 };
421
422 struct MatrixRegOp {
423 MCRegister Reg;
424 unsigned ElementWidth;
425 MatrixKind Kind;
426 };
427
428 struct MatrixTileListOp {
429 unsigned RegMask = 0;
430 };
431
432 struct VectorListOp {
433 MCRegister Reg;
434 unsigned Count;
435 unsigned Stride;
436 unsigned NumElements;
437 unsigned ElementWidth;
438 RegKind RegisterKind;
439 };
440
441 struct VectorIndexOp {
442 int Val;
443 };
444
445 struct ImmOp {
446 const MCExpr *Val;
447 };
448
449 struct ShiftedImmOp {
450 const MCExpr *Val;
451 unsigned ShiftAmount;
452 };
453
454 struct ImmRangeOp {
455 unsigned First;
456 unsigned Last;
457 };
458
459 struct CondCodeOp {
461 };
462
463 struct FPImmOp {
464 uint64_t Val; // APFloat value bitcasted to uint64_t.
465 bool IsExact; // describes whether parsed value was exact.
466 };
467
468 struct BarrierOp {
469 const char *Data;
470 unsigned Length;
471 unsigned Val; // Not the enum since not all values have names.
472 bool HasnXSModifier;
473 };
474
475 struct SysRegOp {
476 const char *Data;
477 unsigned Length;
478 uint32_t MRSReg;
479 uint32_t MSRReg;
480 uint32_t PStateField;
481 };
482
483 struct SysCRImmOp {
484 unsigned Val;
485 };
486
487 struct PrefetchOp {
488 const char *Data;
489 unsigned Length;
490 unsigned Val;
491 };
492
493 struct PSBHintOp {
494 const char *Data;
495 unsigned Length;
496 unsigned Val;
497 };
498 struct PHintOp {
499 const char *Data;
500 unsigned Length;
501 unsigned Val;
502 };
503 struct BTIHintOp {
504 const char *Data;
505 unsigned Length;
506 unsigned Val;
507 };
508 struct CMHPriorityHintOp {
509 const char *Data;
510 unsigned Length;
511 unsigned Val;
512 };
513 struct TIndexHintOp {
514 const char *Data;
515 unsigned Length;
516 unsigned Val;
517 };
518
519 struct SVCROp {
520 const char *Data;
521 unsigned Length;
522 unsigned PStateField;
523 };
524
525 union {
526 struct TokOp Tok;
527 struct RegOp Reg;
528 struct MatrixRegOp MatrixReg;
529 struct MatrixTileListOp MatrixTileList;
530 struct VectorListOp VectorList;
531 struct VectorIndexOp VectorIndex;
532 struct ImmOp Imm;
533 struct ShiftedImmOp ShiftedImm;
534 struct ImmRangeOp ImmRange;
535 struct CondCodeOp CondCode;
536 struct FPImmOp FPImm;
537 struct BarrierOp Barrier;
538 struct SysRegOp SysReg;
539 struct SysCRImmOp SysCRImm;
540 struct PrefetchOp Prefetch;
541 struct PSBHintOp PSBHint;
542 struct PHintOp PHint;
543 struct BTIHintOp BTIHint;
544 struct CMHPriorityHintOp CMHPriorityHint;
545 struct TIndexHintOp TIndexHint;
546 struct ShiftExtendOp ShiftExtend;
547 struct SVCROp SVCR;
548 };
549
550 // Keep the MCContext around as the MCExprs may need manipulated during
551 // the add<>Operands() calls.
552 MCContext &Ctx;
553
554public:
555 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
556
557 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
558 Kind = o.Kind;
559 StartLoc = o.StartLoc;
560 EndLoc = o.EndLoc;
561 switch (Kind) {
562 case k_Token:
563 Tok = o.Tok;
564 break;
565 case k_Immediate:
566 Imm = o.Imm;
567 break;
568 case k_ShiftedImm:
569 ShiftedImm = o.ShiftedImm;
570 break;
571 case k_ImmRange:
572 ImmRange = o.ImmRange;
573 break;
574 case k_CondCode:
575 CondCode = o.CondCode;
576 break;
577 case k_FPImm:
578 FPImm = o.FPImm;
579 break;
580 case k_Barrier:
581 Barrier = o.Barrier;
582 break;
583 case k_Register:
584 Reg = o.Reg;
585 break;
586 case k_MatrixRegister:
587 MatrixReg = o.MatrixReg;
588 break;
589 case k_MatrixTileList:
590 MatrixTileList = o.MatrixTileList;
591 break;
592 case k_VectorList:
593 VectorList = o.VectorList;
594 break;
595 case k_VectorIndex:
596 VectorIndex = o.VectorIndex;
597 break;
598 case k_SysReg:
599 SysReg = o.SysReg;
600 break;
601 case k_SysCR:
602 SysCRImm = o.SysCRImm;
603 break;
604 case k_Prefetch:
605 Prefetch = o.Prefetch;
606 break;
607 case k_PSBHint:
608 PSBHint = o.PSBHint;
609 break;
610 case k_PHint:
611 PHint = o.PHint;
612 break;
613 case k_BTIHint:
614 BTIHint = o.BTIHint;
615 break;
616 case k_CMHPriorityHint:
617 CMHPriorityHint = o.CMHPriorityHint;
618 break;
619 case k_TIndexHint:
620 TIndexHint = o.TIndexHint;
621 break;
622 case k_ShiftExtend:
623 ShiftExtend = o.ShiftExtend;
624 break;
625 case k_SVCR:
626 SVCR = o.SVCR;
627 break;
628 }
629 }
630
631 /// getStartLoc - Get the location of the first token of this operand.
632 SMLoc getStartLoc() const override { return StartLoc; }
633 /// getEndLoc - Get the location of the last token of this operand.
634 SMLoc getEndLoc() const override { return EndLoc; }
635
636 StringRef getToken() const {
637 assert(Kind == k_Token && "Invalid access!");
638 return StringRef(Tok.Data, Tok.Length);
639 }
640
641 bool isTokenSuffix() const {
642 assert(Kind == k_Token && "Invalid access!");
643 return Tok.IsSuffix;
644 }
645
646 const MCExpr *getImm() const {
647 assert(Kind == k_Immediate && "Invalid access!");
648 return Imm.Val;
649 }
650
651 const MCExpr *getShiftedImmVal() const {
652 assert(Kind == k_ShiftedImm && "Invalid access!");
653 return ShiftedImm.Val;
654 }
655
656 unsigned getShiftedImmShift() const {
657 assert(Kind == k_ShiftedImm && "Invalid access!");
658 return ShiftedImm.ShiftAmount;
659 }
660
661 unsigned getFirstImmVal() const {
662 assert(Kind == k_ImmRange && "Invalid access!");
663 return ImmRange.First;
664 }
665
666 unsigned getLastImmVal() const {
667 assert(Kind == k_ImmRange && "Invalid access!");
668 return ImmRange.Last;
669 }
670
672 assert(Kind == k_CondCode && "Invalid access!");
673 return CondCode.Code;
674 }
675
676 APFloat getFPImm() const {
677 assert (Kind == k_FPImm && "Invalid access!");
678 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
679 }
680
681 bool getFPImmIsExact() const {
682 assert (Kind == k_FPImm && "Invalid access!");
683 return FPImm.IsExact;
684 }
685
686 unsigned getBarrier() const {
687 assert(Kind == k_Barrier && "Invalid access!");
688 return Barrier.Val;
689 }
690
691 StringRef getBarrierName() const {
692 assert(Kind == k_Barrier && "Invalid access!");
693 return StringRef(Barrier.Data, Barrier.Length);
694 }
695
696 bool getBarriernXSModifier() const {
697 assert(Kind == k_Barrier && "Invalid access!");
698 return Barrier.HasnXSModifier;
699 }
700
701 MCRegister getReg() const override {
702 assert(Kind == k_Register && "Invalid access!");
703 return Reg.Reg;
704 }
705
706 MCRegister getMatrixReg() const {
707 assert(Kind == k_MatrixRegister && "Invalid access!");
708 return MatrixReg.Reg;
709 }
710
711 unsigned getMatrixElementWidth() const {
712 assert(Kind == k_MatrixRegister && "Invalid access!");
713 return MatrixReg.ElementWidth;
714 }
715
716 MatrixKind getMatrixKind() const {
717 assert(Kind == k_MatrixRegister && "Invalid access!");
718 return MatrixReg.Kind;
719 }
720
721 unsigned getMatrixTileListRegMask() const {
722 assert(isMatrixTileList() && "Invalid access!");
723 return MatrixTileList.RegMask;
724 }
725
726 RegConstraintEqualityTy getRegEqualityTy() const {
727 assert(Kind == k_Register && "Invalid access!");
728 return Reg.EqualityTy;
729 }
730
731 MCRegister getVectorListStart() const {
732 assert(Kind == k_VectorList && "Invalid access!");
733 return VectorList.Reg;
734 }
735
736 unsigned getVectorListCount() const {
737 assert(Kind == k_VectorList && "Invalid access!");
738 return VectorList.Count;
739 }
740
741 unsigned getVectorListStride() const {
742 assert(Kind == k_VectorList && "Invalid access!");
743 return VectorList.Stride;
744 }
745
746 int getVectorIndex() const {
747 assert(Kind == k_VectorIndex && "Invalid access!");
748 return VectorIndex.Val;
749 }
750
751 StringRef getSysReg() const {
752 assert(Kind == k_SysReg && "Invalid access!");
753 return StringRef(SysReg.Data, SysReg.Length);
754 }
755
756 unsigned getSysCR() const {
757 assert(Kind == k_SysCR && "Invalid access!");
758 return SysCRImm.Val;
759 }
760
761 unsigned getPrefetch() const {
762 assert(Kind == k_Prefetch && "Invalid access!");
763 return Prefetch.Val;
764 }
765
766 unsigned getPSBHint() const {
767 assert(Kind == k_PSBHint && "Invalid access!");
768 return PSBHint.Val;
769 }
770
771 unsigned getPHint() const {
772 assert(Kind == k_PHint && "Invalid access!");
773 return PHint.Val;
774 }
775
776 StringRef getPSBHintName() const {
777 assert(Kind == k_PSBHint && "Invalid access!");
778 return StringRef(PSBHint.Data, PSBHint.Length);
779 }
780
781 StringRef getPHintName() const {
782 assert(Kind == k_PHint && "Invalid access!");
783 return StringRef(PHint.Data, PHint.Length);
784 }
785
786 unsigned getBTIHint() const {
787 assert(Kind == k_BTIHint && "Invalid access!");
788 return BTIHint.Val;
789 }
790
791 StringRef getBTIHintName() const {
792 assert(Kind == k_BTIHint && "Invalid access!");
793 return StringRef(BTIHint.Data, BTIHint.Length);
794 }
795
796 unsigned getCMHPriorityHint() const {
797 assert(Kind == k_CMHPriorityHint && "Invalid access!");
798 return CMHPriorityHint.Val;
799 }
800
801 StringRef getCMHPriorityHintName() const {
802 assert(Kind == k_CMHPriorityHint && "Invalid access!");
803 return StringRef(CMHPriorityHint.Data, CMHPriorityHint.Length);
804 }
805
806 unsigned getTIndexHint() const {
807 assert(Kind == k_TIndexHint && "Invalid access!");
808 return TIndexHint.Val;
809 }
810
811 StringRef getTIndexHintName() const {
812 assert(Kind == k_TIndexHint && "Invalid access!");
813 return StringRef(TIndexHint.Data, TIndexHint.Length);
814 }
815
816 StringRef getSVCR() const {
817 assert(Kind == k_SVCR && "Invalid access!");
818 return StringRef(SVCR.Data, SVCR.Length);
819 }
820
821 StringRef getPrefetchName() const {
822 assert(Kind == k_Prefetch && "Invalid access!");
823 return StringRef(Prefetch.Data, Prefetch.Length);
824 }
825
826 AArch64_AM::ShiftExtendType getShiftExtendType() const {
827 if (Kind == k_ShiftExtend)
828 return ShiftExtend.Type;
829 if (Kind == k_Register)
830 return Reg.ShiftExtend.Type;
831 llvm_unreachable("Invalid access!");
832 }
833
834 unsigned getShiftExtendAmount() const {
835 if (Kind == k_ShiftExtend)
836 return ShiftExtend.Amount;
837 if (Kind == k_Register)
838 return Reg.ShiftExtend.Amount;
839 llvm_unreachable("Invalid access!");
840 }
841
842 bool hasShiftExtendAmount() const {
843 if (Kind == k_ShiftExtend)
844 return ShiftExtend.HasExplicitAmount;
845 if (Kind == k_Register)
846 return Reg.ShiftExtend.HasExplicitAmount;
847 llvm_unreachable("Invalid access!");
848 }
849
850 bool isImm() const override { return Kind == k_Immediate; }
851 bool isMem() const override { return false; }
852
853 bool isUImm6() const {
854 if (!isImm())
855 return false;
856 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
857 if (!MCE)
858 return false;
859 int64_t Val = MCE->getValue();
860 return (Val >= 0 && Val < 64);
861 }
862
863 template <int Width> bool isSImm() const {
864 return bool(isSImmScaled<Width, 1>());
865 }
866
867 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
868 return isImmScaled<Bits, Scale>(true);
869 }
870
871 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
872 DiagnosticPredicate isUImmScaled() const {
873 if (IsRange && isImmRange() &&
874 (getLastImmVal() != getFirstImmVal() + Offset))
876
877 return isImmScaled<Bits, Scale, IsRange>(false);
878 }
879
880 template <int Bits, int Scale, bool IsRange = false>
881 DiagnosticPredicate isImmScaled(bool Signed) const {
882 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
883 (isImmRange() && !IsRange))
885
886 int64_t Val;
887 if (isImmRange())
888 Val = getFirstImmVal();
889 else {
890 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
891 if (!MCE)
893 Val = MCE->getValue();
894 }
895
896 int64_t MinVal, MaxVal;
897 if (Signed) {
898 int64_t Shift = Bits - 1;
899 MinVal = (int64_t(1) << Shift) * -Scale;
900 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
901 } else {
902 MinVal = 0;
903 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
904 }
905
906 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
908
910 }
911
912 DiagnosticPredicate isSVEPattern() const {
913 if (!isImm())
915 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
916 if (!MCE)
918 int64_t Val = MCE->getValue();
919 if (Val >= 0 && Val < 32)
922 }
923
924 DiagnosticPredicate isSVEVecLenSpecifier() const {
925 if (!isImm())
927 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
928 if (!MCE)
930 int64_t Val = MCE->getValue();
931 if (Val >= 0 && Val <= 1)
934 }
935
936 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
937 AArch64::Specifier ELFSpec;
938 AArch64::Specifier DarwinSpec;
939 int64_t Addend;
940 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
941 Addend)) {
942 // If we don't understand the expression, assume the best and
943 // let the fixup and relocation code deal with it.
944 return true;
945 }
946
947 if (DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
955 ELFSpec)) {
956 // Note that we don't range-check the addend. It's adjusted modulo page
957 // size when converted, so there is no "out of range" condition when using
958 // @pageoff.
959 return true;
960 } else if (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF ||
961 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) {
962 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
963 return Addend == 0;
964 }
965
966 return false;
967 }
968
969 template <int Scale> bool isUImm12Offset() const {
970 if (!isImm())
971 return false;
972
973 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
974 if (!MCE)
975 return isSymbolicUImm12Offset(getImm());
976
977 int64_t Val = MCE->getValue();
978 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
979 }
980
981 template <int N, int M>
982 bool isImmInRange() const {
983 if (!isImm())
984 return false;
985 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
986 if (!MCE)
987 return false;
988 int64_t Val = MCE->getValue();
989 return (Val >= N && Val <= M);
990 }
991
992 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
993 // a logical immediate can always be represented when inverted.
994 template <typename T>
995 bool isLogicalImm() const {
996 if (!isImm())
997 return false;
998 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
999 if (!MCE)
1000 return false;
1001
1002 int64_t Val = MCE->getValue();
1003 // Avoid left shift by 64 directly.
1004 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
1005 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
1006 if ((Val & Upper) && (Val & Upper) != Upper)
1007 return false;
1008
1009 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
1010 }
1011
1012 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
1013
1014 bool isImmRange() const { return Kind == k_ImmRange; }
1015
1016 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
1017 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
1018 /// immediate that can be shifted by 'Shift'.
1019 template <unsigned Width>
1020 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
1021 if (isShiftedImm() && Width == getShiftedImmShift())
1022 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
1023 return std::make_pair(CE->getValue(), Width);
1024
1025 if (isImm())
1026 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
1027 int64_t Val = CE->getValue();
1028 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
1029 return std::make_pair(Val >> Width, Width);
1030 else
1031 return std::make_pair(Val, 0u);
1032 }
1033
1034 return {};
1035 }
1036
1037 bool isAddSubImm() const {
1038 if (!isShiftedImm() && !isImm())
1039 return false;
1040
1041 const MCExpr *Expr;
1042
1043 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
1044 if (isShiftedImm()) {
1045 unsigned Shift = ShiftedImm.ShiftAmount;
1046 Expr = ShiftedImm.Val;
1047 if (Shift != 0 && Shift != 12)
1048 return false;
1049 } else {
1050 Expr = getImm();
1051 }
1052
1053 AArch64::Specifier ELFSpec;
1054 AArch64::Specifier DarwinSpec;
1055 int64_t Addend;
1056 if (AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
1057 Addend)) {
1058 return DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
1059 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF ||
1060 (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF && Addend == 0) ||
1068 ELFSpec);
1069 }
1070
1071 // If it's a constant, it should be a real immediate in range.
1072 if (auto ShiftedVal = getShiftedVal<12>())
1073 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1074
1075 // If it's an expression, we hope for the best and let the fixup/relocation
1076 // code deal with it.
1077 return true;
1078 }
1079
1080 bool isAddSubImmNeg() const {
1081 if (!isShiftedImm() && !isImm())
1082 return false;
1083
1084 // Otherwise it should be a real negative immediate in range.
1085 if (auto ShiftedVal = getShiftedVal<12>())
1086 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1087
1088 return false;
1089 }
1090
1091 // Signed value in the range -128 to +127. For element widths of
1092 // 16 bits or higher it may also be a signed multiple of 256 in the
1093 // range -32768 to +32512.
1094 // For element-width of 8 bits a range of -128 to 255 is accepted,
1095 // since a copy of a byte can be either signed/unsigned.
1096 template <typename T>
1097 DiagnosticPredicate isSVECpyImm() const {
1098 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1100
1101 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1102 std::is_same<int8_t, T>::value;
1103 if (auto ShiftedImm = getShiftedVal<8>())
1104 if (!(IsByte && ShiftedImm->second) &&
1105 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1106 << ShiftedImm->second))
1108
1110 }
1111
1112 // Unsigned value in the range 0 to 255. For element widths of
1113 // 16 bits or higher it may also be a signed multiple of 256 in the
1114 // range 0 to 65280.
1115 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1116 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1118
1119 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1120 std::is_same<int8_t, T>::value;
1121 if (auto ShiftedImm = getShiftedVal<8>())
1122 if (!(IsByte && ShiftedImm->second) &&
1123 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1124 << ShiftedImm->second))
1126
1128 }
1129
1130 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1131 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1134 }
1135
1136 bool isCondCode() const { return Kind == k_CondCode; }
1137
1138 bool isSIMDImmType10() const {
1139 if (!isImm())
1140 return false;
1141 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1142 if (!MCE)
1143 return false;
1145 }
1146
1147 template<int N>
1148 bool isBranchTarget() const {
1149 if (!isImm())
1150 return false;
1151 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1152 if (!MCE)
1153 return true;
1154 int64_t Val = MCE->getValue();
1155 if (Val & 0x3)
1156 return false;
1157 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1158 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1159 }
1160
1161 bool isMovWSymbol(ArrayRef<AArch64::Specifier> AllowedModifiers) const {
1162 if (!isImm())
1163 return false;
1164
1165 AArch64::Specifier ELFSpec;
1166 AArch64::Specifier DarwinSpec;
1167 int64_t Addend;
1168 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFSpec, DarwinSpec,
1169 Addend)) {
1170 return false;
1171 }
1172 if (DarwinSpec != AArch64::S_None)
1173 return false;
1174
1175 return llvm::is_contained(AllowedModifiers, ELFSpec);
1176 }
1177
1178 bool isMovWSymbolG3() const {
1179 return isMovWSymbol({AArch64::S_ABS_G3, AArch64::S_PREL_G3});
1180 }
1181
1182 bool isMovWSymbolG2() const {
1183 return isMovWSymbol({AArch64::S_ABS_G2, AArch64::S_ABS_G2_S,
1187 }
1188
1189 bool isMovWSymbolG1() const {
1190 return isMovWSymbol({AArch64::S_ABS_G1, AArch64::S_ABS_G1_S,
1195 }
1196
1197 bool isMovWSymbolG0() const {
1198 return isMovWSymbol({AArch64::S_ABS_G0, AArch64::S_ABS_G0_S,
1203 }
1204
1205 template<int RegWidth, int Shift>
1206 bool isMOVZMovAlias() const {
1207 if (!isImm()) return false;
1208
1209 const MCExpr *E = getImm();
1210 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1211 uint64_t Value = CE->getValue();
1212
1213 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1214 }
1215 // Only supports the case of Shift being 0 if an expression is used as an
1216 // operand
1217 return !Shift && E;
1218 }
1219
1220 template<int RegWidth, int Shift>
1221 bool isMOVNMovAlias() const {
1222 if (!isImm()) return false;
1223
1224 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1225 if (!CE) return false;
1226 uint64_t Value = CE->getValue();
1227
1228 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1229 }
1230
1231 bool isFPImm() const {
1232 return Kind == k_FPImm &&
1233 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1234 }
1235
1236 bool isBarrier() const {
1237 return Kind == k_Barrier && !getBarriernXSModifier();
1238 }
1239 bool isBarriernXS() const {
1240 return Kind == k_Barrier && getBarriernXSModifier();
1241 }
1242 bool isSysReg() const { return Kind == k_SysReg; }
1243
1244 bool isMRSSystemRegister() const {
1245 if (!isSysReg()) return false;
1246
1247 return SysReg.MRSReg != -1U;
1248 }
1249
1250 bool isMSRSystemRegister() const {
1251 if (!isSysReg()) return false;
1252 return SysReg.MSRReg != -1U;
1253 }
1254
1255 bool isSystemPStateFieldWithImm0_1() const {
1256 if (!isSysReg()) return false;
1257 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1258 }
1259
1260 bool isSystemPStateFieldWithImm0_15() const {
1261 if (!isSysReg())
1262 return false;
1263 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1264 }
1265
1266 bool isSVCR() const {
1267 if (Kind != k_SVCR)
1268 return false;
1269 return SVCR.PStateField != -1U;
1270 }
1271
1272 bool isReg() const override {
1273 return Kind == k_Register;
1274 }
1275
1276 bool isVectorList() const { return Kind == k_VectorList; }
1277
1278 bool isScalarReg() const {
1279 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1280 }
1281
1282 bool isNeonVectorReg() const {
1283 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1284 }
1285
1286 bool isNeonVectorRegLo() const {
1287 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1288 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1289 Reg.Reg) ||
1290 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1291 Reg.Reg));
1292 }
1293
1294 bool isNeonVectorReg0to7() const {
1295 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1296 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1297 Reg.Reg));
1298 }
1299
1300 bool isMatrix() const { return Kind == k_MatrixRegister; }
1301 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1302
1303 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1304 RegKind RK;
1305 switch (Class) {
1306 case AArch64::PPRRegClassID:
1307 case AArch64::PPR_3bRegClassID:
1308 case AArch64::PPR_p8to15RegClassID:
1309 case AArch64::PNRRegClassID:
1310 case AArch64::PNR_p8to15RegClassID:
1311 case AArch64::PPRorPNRRegClassID:
1312 RK = RegKind::SVEPredicateAsCounter;
1313 break;
1314 default:
1315 llvm_unreachable("Unsupported register class");
1316 }
1317
1318 return (Kind == k_Register && Reg.Kind == RK) &&
1319 AArch64MCRegisterClasses[Class].contains(getReg());
1320 }
1321
1322 template <unsigned Class> bool isSVEVectorReg() const {
1323 RegKind RK;
1324 switch (Class) {
1325 case AArch64::ZPRRegClassID:
1326 case AArch64::ZPR_3bRegClassID:
1327 case AArch64::ZPR_4bRegClassID:
1328 case AArch64::ZPRMul2_LoRegClassID:
1329 case AArch64::ZPRMul2_HiRegClassID:
1330 case AArch64::ZPR_KRegClassID:
1331 RK = RegKind::SVEDataVector;
1332 break;
1333 case AArch64::PPRRegClassID:
1334 case AArch64::PPR_3bRegClassID:
1335 case AArch64::PPR_p8to15RegClassID:
1336 case AArch64::PNRRegClassID:
1337 case AArch64::PNR_p8to15RegClassID:
1338 case AArch64::PPRorPNRRegClassID:
1339 RK = RegKind::SVEPredicateVector;
1340 break;
1341 default:
1342 llvm_unreachable("Unsupported register class");
1343 }
1344
1345 return (Kind == k_Register && Reg.Kind == RK) &&
1346 AArch64MCRegisterClasses[Class].contains(getReg());
1347 }
1348
1349 template <unsigned Class> bool isFPRasZPR() const {
1350 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1351 AArch64MCRegisterClasses[Class].contains(getReg());
1352 }
1353
1354 template <int ElementWidth, unsigned Class>
1355 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1356 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1358
1359 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1361
1363 }
1364
1365 template <int ElementWidth, unsigned Class>
1366 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1367 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1368 Reg.Kind != RegKind::SVEPredicateVector))
1370
1371 if ((isSVEPredicateAsCounterReg<Class>() ||
1372 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1373 Reg.ElementWidth == ElementWidth)
1375
1377 }
1378
1379 template <int ElementWidth, unsigned Class>
1380 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1381 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1383
1384 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1386
1388 }
1389
1390 template <int ElementWidth, unsigned Class>
1391 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1392 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1394
1395 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1397
1399 }
1400
1401 template <int ElementWidth, unsigned Class,
1402 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1403 bool ShiftWidthAlwaysSame>
1404 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1405 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1406 if (!VectorMatch.isMatch())
1408
1409 // Give a more specific diagnostic when the user has explicitly typed in
1410 // a shift-amount that does not match what is expected, but for which
1411 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1412 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1413 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1414 ShiftExtendTy == AArch64_AM::SXTW) &&
1415 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1417
1418 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1420
1422 }
1423
1424 bool isGPR32as64() const {
1425 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1426 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.Reg);
1427 }
1428
1429 bool isGPR64as32() const {
1430 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1431 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.Reg);
1432 }
1433
1434 bool isGPR64x8() const {
1435 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1436 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1437 Reg.Reg);
1438 }
1439
1440 bool isWSeqPair() const {
1441 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1442 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1443 Reg.Reg);
1444 }
1445
1446 bool isXSeqPair() const {
1447 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1448 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1449 Reg.Reg);
1450 }
1451
1452 bool isSyspXzrPair() const {
1453 return isGPR64<AArch64::GPR64RegClassID>() && Reg.Reg == AArch64::XZR;
1454 }
1455
1456 template<int64_t Angle, int64_t Remainder>
1457 DiagnosticPredicate isComplexRotation() const {
1458 if (!isImm())
1460
1461 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1462 if (!CE)
1464 uint64_t Value = CE->getValue();
1465
1466 if (Value % Angle == Remainder && Value <= 270)
1469 }
1470
1471 template <unsigned RegClassID> bool isGPR64() const {
1472 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1473 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1474 }
1475
1476 template <unsigned RegClassID, int ExtWidth>
1477 DiagnosticPredicate isGPR64WithShiftExtend() const {
1478 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1480
1481 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1482 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1485 }
1486
1487 /// Is this a vector list with the type implicit (presumably attached to the
1488 /// instruction itself)?
1489 template <RegKind VectorKind, unsigned NumRegs, bool IsConsecutive = false>
1490 bool isImplicitlyTypedVectorList() const {
1491 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1492 VectorList.NumElements == 0 &&
1493 VectorList.RegisterKind == VectorKind &&
1494 (!IsConsecutive || (VectorList.Stride == 1));
1495 }
1496
1497 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1498 unsigned ElementWidth, unsigned Stride = 1>
1499 bool isTypedVectorList() const {
1500 if (Kind != k_VectorList)
1501 return false;
1502 if (VectorList.Count != NumRegs)
1503 return false;
1504 if (VectorList.RegisterKind != VectorKind)
1505 return false;
1506 if (VectorList.ElementWidth != ElementWidth)
1507 return false;
1508 if (VectorList.Stride != Stride)
1509 return false;
1510 return VectorList.NumElements == NumElements;
1511 }
1512
1513 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1514 unsigned ElementWidth, unsigned RegClass>
1515 DiagnosticPredicate isTypedVectorListMultiple() const {
1516 bool Res =
1517 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1518 if (!Res)
1520 if (!AArch64MCRegisterClasses[RegClass].contains(VectorList.Reg))
1523 }
1524
1525 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1526 unsigned ElementWidth>
1527 DiagnosticPredicate isTypedVectorListStrided() const {
1528 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1529 ElementWidth, Stride>();
1530 if (!Res)
1532 if ((VectorList.Reg < (AArch64::Z0 + Stride)) ||
1533 ((VectorList.Reg >= AArch64::Z16) &&
1534 (VectorList.Reg < (AArch64::Z16 + Stride))))
1537 }
1538
1539 template <int Min, int Max>
1540 DiagnosticPredicate isVectorIndex() const {
1541 if (Kind != k_VectorIndex)
1543 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1546 }
1547
1548 bool isToken() const override { return Kind == k_Token; }
1549
1550 bool isTokenEqual(StringRef Str) const {
1551 return Kind == k_Token && getToken() == Str;
1552 }
1553 bool isSysCR() const { return Kind == k_SysCR; }
1554 bool isPrefetch() const { return Kind == k_Prefetch; }
1555 bool isPSBHint() const { return Kind == k_PSBHint; }
1556 bool isPHint() const { return Kind == k_PHint; }
1557 bool isBTIHint() const { return Kind == k_BTIHint; }
1558 bool isCMHPriorityHint() const { return Kind == k_CMHPriorityHint; }
1559 bool isTIndexHint() const { return Kind == k_TIndexHint; }
1560 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1561 bool isShifter() const {
1562 if (!isShiftExtend())
1563 return false;
1564
1565 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1566 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1567 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1568 ST == AArch64_AM::MSL);
1569 }
1570
1571 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1572 if (Kind != k_FPImm)
1574
1575 if (getFPImmIsExact()) {
1576 // Lookup the immediate from table of supported immediates.
1577 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1578 assert(Desc && "Unknown enum value");
1579
1580 // Calculate its FP value.
1581 APFloat RealVal(APFloat::IEEEdouble());
1582 auto StatusOrErr =
1583 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1584 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1585 llvm_unreachable("FP immediate is not exact");
1586
1587 if (getFPImm().bitwiseIsEqual(RealVal))
1589 }
1590
1592 }
1593
1594 template <unsigned ImmA, unsigned ImmB>
1595 DiagnosticPredicate isExactFPImm() const {
1596 DiagnosticPredicate Res = DiagnosticPredicate::NoMatch;
1597 if ((Res = isExactFPImm<ImmA>()))
1599 if ((Res = isExactFPImm<ImmB>()))
1601 return Res;
1602 }
1603
1604 bool isExtend() const {
1605 if (!isShiftExtend())
1606 return false;
1607
1608 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1609 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1610 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1611 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1612 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1613 ET == AArch64_AM::LSL) &&
1614 getShiftExtendAmount() <= 4;
1615 }
1616
1617 bool isExtend64() const {
1618 if (!isExtend())
1619 return false;
1620 // Make sure the extend expects a 32-bit source register.
1621 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1622 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1623 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1624 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1625 }
1626
1627 bool isExtendLSL64() const {
1628 if (!isExtend())
1629 return false;
1630 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1631 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1632 ET == AArch64_AM::LSL) &&
1633 getShiftExtendAmount() <= 4;
1634 }
1635
1636 bool isLSLImm3Shift() const {
1637 if (!isShiftExtend())
1638 return false;
1639 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1640 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1641 }
1642
1643 template<int Width> bool isMemXExtend() const {
1644 if (!isExtend())
1645 return false;
1646 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1647 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1648 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1649 getShiftExtendAmount() == 0);
1650 }
1651
1652 template<int Width> bool isMemWExtend() const {
1653 if (!isExtend())
1654 return false;
1655 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1656 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1657 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1658 getShiftExtendAmount() == 0);
1659 }
1660
1661 template <unsigned width>
1662 bool isArithmeticShifter() const {
1663 if (!isShifter())
1664 return false;
1665
1666 // An arithmetic shifter is LSL, LSR, or ASR.
1667 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1668 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1669 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1670 }
1671
1672 template <unsigned width>
1673 bool isLogicalShifter() const {
1674 if (!isShifter())
1675 return false;
1676
1677 // A logical shifter is LSL, LSR, ASR or ROR.
1678 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1679 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1680 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1681 getShiftExtendAmount() < width;
1682 }
1683
1684 bool isMovImm32Shifter() const {
1685 if (!isShifter())
1686 return false;
1687
1688 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1689 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1690 if (ST != AArch64_AM::LSL)
1691 return false;
1692 uint64_t Val = getShiftExtendAmount();
1693 return (Val == 0 || Val == 16);
1694 }
1695
1696 bool isMovImm64Shifter() const {
1697 if (!isShifter())
1698 return false;
1699
1700 // A MOVi shifter is LSL of 0 or 16.
1701 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1702 if (ST != AArch64_AM::LSL)
1703 return false;
1704 uint64_t Val = getShiftExtendAmount();
1705 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1706 }
1707
1708 bool isLogicalVecShifter() const {
1709 if (!isShifter())
1710 return false;
1711
1712 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1713 unsigned Shift = getShiftExtendAmount();
1714 return getShiftExtendType() == AArch64_AM::LSL &&
1715 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1716 }
1717
1718 bool isLogicalVecHalfWordShifter() const {
1719 if (!isLogicalVecShifter())
1720 return false;
1721
1722 // A logical vector shifter is a left shift by 0 or 8.
1723 unsigned Shift = getShiftExtendAmount();
1724 return getShiftExtendType() == AArch64_AM::LSL &&
1725 (Shift == 0 || Shift == 8);
1726 }
1727
1728 bool isMoveVecShifter() const {
1729 if (!isShiftExtend())
1730 return false;
1731
1732 // A logical vector shifter is a left shift by 8 or 16.
1733 unsigned Shift = getShiftExtendAmount();
1734 return getShiftExtendType() == AArch64_AM::MSL &&
1735 (Shift == 8 || Shift == 16);
1736 }
1737
1738 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1739 // to LDUR/STUR when the offset is not legal for the former but is for
1740 // the latter. As such, in addition to checking for being a legal unscaled
1741 // address, also check that it is not a legal scaled address. This avoids
1742 // ambiguity in the matcher.
1743 template<int Width>
1744 bool isSImm9OffsetFB() const {
1745 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1746 }
1747
1748 bool isAdrpLabel() const {
1749 // Validation was handled during parsing, so we just verify that
1750 // something didn't go haywire.
1751 if (!isImm())
1752 return false;
1753
1754 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1755 int64_t Val = CE->getValue();
1756 int64_t Min = - (4096 * (1LL << (21 - 1)));
1757 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1758 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1759 }
1760
1761 return true;
1762 }
1763
1764 bool isAdrLabel() const {
1765 // Validation was handled during parsing, so we just verify that
1766 // something didn't go haywire.
1767 if (!isImm())
1768 return false;
1769
1770 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1771 int64_t Val = CE->getValue();
1772 int64_t Min = - (1LL << (21 - 1));
1773 int64_t Max = ((1LL << (21 - 1)) - 1);
1774 return Val >= Min && Val <= Max;
1775 }
1776
1777 return true;
1778 }
1779
1780 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1781 DiagnosticPredicate isMatrixRegOperand() const {
1782 if (!isMatrix())
1784 if (getMatrixKind() != Kind ||
1785 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1786 EltSize != getMatrixElementWidth())
1789 }
1790
1791 bool isPAuthPCRelLabel16Operand() const {
1792 // PAuth PCRel16 operands are similar to regular branch targets, but only
1793 // negative values are allowed for concrete immediates as signing instr
1794 // should be in a lower address.
1795 if (!isImm())
1796 return false;
1797 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1798 if (!MCE)
1799 return true;
1800 int64_t Val = MCE->getValue();
1801 if (Val & 0b11)
1802 return false;
1803 return (Val <= 0) && (Val > -(1 << 18));
1804 }
1805
1806 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1807 // Add as immediates when possible. Null MCExpr = 0.
1808 if (!Expr)
1810 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1811 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1812 else
1814 }
1815
1816 void addRegOperands(MCInst &Inst, unsigned N) const {
1817 assert(N == 1 && "Invalid number of operands!");
1819 }
1820
1821 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1822 assert(N == 1 && "Invalid number of operands!");
1823 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1824 }
1825
1826 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1827 assert(N == 1 && "Invalid number of operands!");
1828 assert(
1829 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1830
1831 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1832 MCRegister Reg = RI->getRegClass(AArch64::GPR32RegClassID)
1834
1836 }
1837
1838 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1839 assert(N == 1 && "Invalid number of operands!");
1840 assert(
1841 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1842
1843 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1844 MCRegister Reg = RI->getRegClass(AArch64::GPR64RegClassID)
1846
1848 }
1849
1850 template <int Width>
1851 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1852 unsigned Base;
1853 switch (Width) {
1854 case 8: Base = AArch64::B0; break;
1855 case 16: Base = AArch64::H0; break;
1856 case 32: Base = AArch64::S0; break;
1857 case 64: Base = AArch64::D0; break;
1858 case 128: Base = AArch64::Q0; break;
1859 default:
1860 llvm_unreachable("Unsupported width");
1861 }
1862 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1863 }
1864
1865 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1866 assert(N == 1 && "Invalid number of operands!");
1867 MCRegister Reg = getReg();
1868 // Normalise to PPR
1869 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1870 Reg = Reg - AArch64::PN0 + AArch64::P0;
1872 }
1873
1874 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1875 assert(N == 1 && "Invalid number of operands!");
1876 Inst.addOperand(
1877 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1878 }
1879
1880 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1881 assert(N == 1 && "Invalid number of operands!");
1882 assert(
1883 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1884 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1885 }
1886
1887 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1888 assert(N == 1 && "Invalid number of operands!");
1889 assert(
1890 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1892 }
1893
1894 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1895 assert(N == 1 && "Invalid number of operands!");
1897 }
1898
1899 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1900 assert(N == 1 && "Invalid number of operands!");
1902 }
1903
1904 enum VecListIndexType {
1905 VecListIdx_DReg = 0,
1906 VecListIdx_QReg = 1,
1907 VecListIdx_ZReg = 2,
1908 VecListIdx_PReg = 3,
1909 };
1910
1911 template <VecListIndexType RegTy, unsigned NumRegs,
1912 bool IsConsecutive = false>
1913 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1914 assert(N == 1 && "Invalid number of operands!");
1915 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1916 "Expected consecutive registers");
1917 static const unsigned FirstRegs[][5] = {
1918 /* DReg */ { AArch64::Q0,
1919 AArch64::D0, AArch64::D0_D1,
1920 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1921 /* QReg */ { AArch64::Q0,
1922 AArch64::Q0, AArch64::Q0_Q1,
1923 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1924 /* ZReg */ { AArch64::Z0,
1925 AArch64::Z0, AArch64::Z0_Z1,
1926 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1927 /* PReg */ { AArch64::P0,
1928 AArch64::P0, AArch64::P0_P1 }
1929 };
1930
1931 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1932 " NumRegs must be <= 4 for ZRegs");
1933
1934 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1935 " NumRegs must be <= 2 for PRegs");
1936
1937 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1938 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1939 FirstRegs[(unsigned)RegTy][0]));
1940 }
1941
1942 template <unsigned NumRegs>
1943 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1944 assert(N == 1 && "Invalid number of operands!");
1945 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1946
1947 switch (NumRegs) {
1948 case 2:
1949 if (getVectorListStart() < AArch64::Z16) {
1950 assert((getVectorListStart() < AArch64::Z8) &&
1951 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1953 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1954 } else {
1955 assert((getVectorListStart() < AArch64::Z24) &&
1956 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1958 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1959 }
1960 break;
1961 case 4:
1962 if (getVectorListStart() < AArch64::Z16) {
1963 assert((getVectorListStart() < AArch64::Z4) &&
1964 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1966 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1967 } else {
1968 assert((getVectorListStart() < AArch64::Z20) &&
1969 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1971 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1972 }
1973 break;
1974 default:
1975 llvm_unreachable("Unsupported number of registers for strided vec list");
1976 }
1977 }
1978
1979 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1980 assert(N == 1 && "Invalid number of operands!");
1981 unsigned RegMask = getMatrixTileListRegMask();
1982 assert(RegMask <= 0xFF && "Invalid mask!");
1983 Inst.addOperand(MCOperand::createImm(RegMask));
1984 }
1985
1986 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1987 assert(N == 1 && "Invalid number of operands!");
1988 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1989 }
1990
1991 template <unsigned ImmIs0, unsigned ImmIs1>
1992 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1993 assert(N == 1 && "Invalid number of operands!");
1994 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1995 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1996 }
1997
1998 void addImmOperands(MCInst &Inst, unsigned N) const {
1999 assert(N == 1 && "Invalid number of operands!");
2000 // If this is a pageoff symrefexpr with an addend, adjust the addend
2001 // to be only the page-offset portion. Otherwise, just add the expr
2002 // as-is.
2003 addExpr(Inst, getImm());
2004 }
2005
2006 template <int Shift>
2007 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2008 assert(N == 2 && "Invalid number of operands!");
2009 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2010 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
2011 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
2012 } else if (isShiftedImm()) {
2013 addExpr(Inst, getShiftedImmVal());
2014 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
2015 } else {
2016 addExpr(Inst, getImm());
2018 }
2019 }
2020
2021 template <int Shift>
2022 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2023 assert(N == 2 && "Invalid number of operands!");
2024 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2025 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
2026 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
2027 } else
2028 llvm_unreachable("Not a shifted negative immediate");
2029 }
2030
2031 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2032 assert(N == 1 && "Invalid number of operands!");
2034 }
2035
2036 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
2037 assert(N == 1 && "Invalid number of operands!");
2038 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2039 if (!MCE)
2040 addExpr(Inst, getImm());
2041 else
2042 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
2043 }
2044
2045 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2046 addImmOperands(Inst, N);
2047 }
2048
2049 template<int Scale>
2050 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2051 assert(N == 1 && "Invalid number of operands!");
2052 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2053
2054 if (!MCE) {
2056 return;
2057 }
2058 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2059 }
2060
2061 void addUImm6Operands(MCInst &Inst, unsigned N) const {
2062 assert(N == 1 && "Invalid number of operands!");
2063 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2065 }
2066
2067 template <int Scale>
2068 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
2069 assert(N == 1 && "Invalid number of operands!");
2070 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2071 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2072 }
2073
2074 template <int Scale>
2075 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2076 assert(N == 1 && "Invalid number of operands!");
2077 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
2078 }
2079
2080 template <typename T>
2081 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2082 assert(N == 1 && "Invalid number of operands!");
2083 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2084 std::make_unsigned_t<T> Val = MCE->getValue();
2085 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2086 Inst.addOperand(MCOperand::createImm(encoding));
2087 }
2088
2089 template <typename T>
2090 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2091 assert(N == 1 && "Invalid number of operands!");
2092 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2093 std::make_unsigned_t<T> Val = ~MCE->getValue();
2094 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2095 Inst.addOperand(MCOperand::createImm(encoding));
2096 }
2097
2098 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2099 assert(N == 1 && "Invalid number of operands!");
2100 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2101 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
2102 Inst.addOperand(MCOperand::createImm(encoding));
2103 }
2104
2105 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2106 // Branch operands don't encode the low bits, so shift them off
2107 // here. If it's a label, however, just put it on directly as there's
2108 // not enough information now to do anything.
2109 assert(N == 1 && "Invalid number of operands!");
2110 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2111 if (!MCE) {
2112 addExpr(Inst, getImm());
2113 return;
2114 }
2115 assert(MCE && "Invalid constant immediate operand!");
2116 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2117 }
2118
2119 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2120 // PC-relative operands don't encode the low bits, so shift them off
2121 // here. If it's a label, however, just put it on directly as there's
2122 // not enough information now to do anything.
2123 assert(N == 1 && "Invalid number of operands!");
2124 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2125 if (!MCE) {
2126 addExpr(Inst, getImm());
2127 return;
2128 }
2129 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2130 }
2131
2132 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2133 // Branch operands don't encode the low bits, so shift them off
2134 // here. If it's a label, however, just put it on directly as there's
2135 // not enough information now to do anything.
2136 assert(N == 1 && "Invalid number of operands!");
2137 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2138 if (!MCE) {
2139 addExpr(Inst, getImm());
2140 return;
2141 }
2142 assert(MCE && "Invalid constant immediate operand!");
2143 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2144 }
2145
2146 void addPCRelLabel9Operands(MCInst &Inst, unsigned N) const {
2147 // Branch operands don't encode the low bits, so shift them off
2148 // here. If it's a label, however, just put it on directly as there's
2149 // not enough information now to do anything.
2150 assert(N == 1 && "Invalid number of operands!");
2151 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2152 if (!MCE) {
2153 addExpr(Inst, getImm());
2154 return;
2155 }
2156 assert(MCE && "Invalid constant immediate operand!");
2157 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2158 }
2159
2160 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2161 // Branch operands don't encode the low bits, so shift them off
2162 // here. If it's a label, however, just put it on directly as there's
2163 // not enough information now to do anything.
2164 assert(N == 1 && "Invalid number of operands!");
2165 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2166 if (!MCE) {
2167 addExpr(Inst, getImm());
2168 return;
2169 }
2170 assert(MCE && "Invalid constant immediate operand!");
2171 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2172 }
2173
2174 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2175 assert(N == 1 && "Invalid number of operands!");
2177 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2178 }
2179
2180 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2181 assert(N == 1 && "Invalid number of operands!");
2182 Inst.addOperand(MCOperand::createImm(getBarrier()));
2183 }
2184
2185 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2186 assert(N == 1 && "Invalid number of operands!");
2187 Inst.addOperand(MCOperand::createImm(getBarrier()));
2188 }
2189
2190 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2191 assert(N == 1 && "Invalid number of operands!");
2192
2193 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2194 }
2195
2196 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2197 assert(N == 1 && "Invalid number of operands!");
2198
2199 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2200 }
2201
2202 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2203 assert(N == 1 && "Invalid number of operands!");
2204
2205 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2206 }
2207
2208 void addSVCROperands(MCInst &Inst, unsigned N) const {
2209 assert(N == 1 && "Invalid number of operands!");
2210
2211 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2212 }
2213
2214 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2215 assert(N == 1 && "Invalid number of operands!");
2216
2217 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2218 }
2219
2220 void addSysCROperands(MCInst &Inst, unsigned N) const {
2221 assert(N == 1 && "Invalid number of operands!");
2222 Inst.addOperand(MCOperand::createImm(getSysCR()));
2223 }
2224
2225 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2226 assert(N == 1 && "Invalid number of operands!");
2227 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2228 }
2229
2230 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2231 assert(N == 1 && "Invalid number of operands!");
2232 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2233 }
2234
2235 void addPHintOperands(MCInst &Inst, unsigned N) const {
2236 assert(N == 1 && "Invalid number of operands!");
2237 Inst.addOperand(MCOperand::createImm(getPHint()));
2238 }
2239
2240 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2241 assert(N == 1 && "Invalid number of operands!");
2242 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2243 }
2244
2245 void addCMHPriorityHintOperands(MCInst &Inst, unsigned N) const {
2246 assert(N == 1 && "Invalid number of operands!");
2247 Inst.addOperand(MCOperand::createImm(getCMHPriorityHint()));
2248 }
2249
2250 void addTIndexHintOperands(MCInst &Inst, unsigned N) const {
2251 assert(N == 1 && "Invalid number of operands!");
2252 Inst.addOperand(MCOperand::createImm(getTIndexHint()));
2253 }
2254
2255 void addShifterOperands(MCInst &Inst, unsigned N) const {
2256 assert(N == 1 && "Invalid number of operands!");
2257 unsigned Imm =
2258 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2260 }
2261
2262 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2263 assert(N == 1 && "Invalid number of operands!");
2264 unsigned Imm = getShiftExtendAmount();
2266 }
2267
2268 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2269 assert(N == 1 && "Invalid number of operands!");
2270
2271 if (!isScalarReg())
2272 return;
2273
2274 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2275 MCRegister Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2277 if (Reg != AArch64::XZR)
2278 llvm_unreachable("wrong register");
2279
2280 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2281 }
2282
2283 void addExtendOperands(MCInst &Inst, unsigned N) const {
2284 assert(N == 1 && "Invalid number of operands!");
2285 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2286 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2287 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2289 }
2290
2291 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2292 assert(N == 1 && "Invalid number of operands!");
2293 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2294 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2295 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2297 }
2298
2299 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2300 assert(N == 2 && "Invalid number of operands!");
2301 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2302 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2303 Inst.addOperand(MCOperand::createImm(IsSigned));
2304 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2305 }
2306
2307 // For 8-bit load/store instructions with a register offset, both the
2308 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2309 // they're disambiguated by whether the shift was explicit or implicit rather
2310 // than its size.
2311 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2312 assert(N == 2 && "Invalid number of operands!");
2313 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2314 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2315 Inst.addOperand(MCOperand::createImm(IsSigned));
2316 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2317 }
2318
2319 template<int Shift>
2320 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2321 assert(N == 1 && "Invalid number of operands!");
2322
2323 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2324 if (CE) {
2325 uint64_t Value = CE->getValue();
2326 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2327 } else {
2328 addExpr(Inst, getImm());
2329 }
2330 }
2331
2332 template<int Shift>
2333 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2334 assert(N == 1 && "Invalid number of operands!");
2335
2336 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2337 uint64_t Value = CE->getValue();
2338 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2339 }
2340
2341 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2342 assert(N == 1 && "Invalid number of operands!");
2343 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2344 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2345 }
2346
2347 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2348 assert(N == 1 && "Invalid number of operands!");
2349 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2350 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2351 }
2352
2353 void print(raw_ostream &OS, const MCAsmInfo &MAI) const override;
2354
2355 static std::unique_ptr<AArch64Operand>
2356 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2357 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2358 Op->Tok.Data = Str.data();
2359 Op->Tok.Length = Str.size();
2360 Op->Tok.IsSuffix = IsSuffix;
2361 Op->StartLoc = S;
2362 Op->EndLoc = S;
2363 return Op;
2364 }
2365
2366 static std::unique_ptr<AArch64Operand>
2367 CreateReg(MCRegister Reg, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2368 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2370 unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
2371 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2372 Op->Reg.Reg = Reg;
2373 Op->Reg.Kind = Kind;
2374 Op->Reg.ElementWidth = 0;
2375 Op->Reg.EqualityTy = EqTy;
2376 Op->Reg.ShiftExtend.Type = ExtTy;
2377 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2378 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2379 Op->StartLoc = S;
2380 Op->EndLoc = E;
2381 return Op;
2382 }
2383
2384 static std::unique_ptr<AArch64Operand> CreateVectorReg(
2385 MCRegister Reg, RegKind Kind, unsigned ElementWidth, SMLoc S, SMLoc E,
2386 MCContext &Ctx, AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2387 unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
2388 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2389 Kind == RegKind::SVEPredicateVector ||
2390 Kind == RegKind::SVEPredicateAsCounter) &&
2391 "Invalid vector kind");
2392 auto Op = CreateReg(Reg, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2393 HasExplicitAmount);
2394 Op->Reg.ElementWidth = ElementWidth;
2395 return Op;
2396 }
2397
2398 static std::unique_ptr<AArch64Operand>
2399 CreateVectorList(MCRegister Reg, unsigned Count, unsigned Stride,
2400 unsigned NumElements, unsigned ElementWidth,
2401 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2402 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2403 Op->VectorList.Reg = Reg;
2404 Op->VectorList.Count = Count;
2405 Op->VectorList.Stride = Stride;
2406 Op->VectorList.NumElements = NumElements;
2407 Op->VectorList.ElementWidth = ElementWidth;
2408 Op->VectorList.RegisterKind = RegisterKind;
2409 Op->StartLoc = S;
2410 Op->EndLoc = E;
2411 return Op;
2412 }
2413
2414 static std::unique_ptr<AArch64Operand>
2415 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2416 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2417 Op->VectorIndex.Val = Idx;
2418 Op->StartLoc = S;
2419 Op->EndLoc = E;
2420 return Op;
2421 }
2422
2423 static std::unique_ptr<AArch64Operand>
2424 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2425 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2426 Op->MatrixTileList.RegMask = RegMask;
2427 Op->StartLoc = S;
2428 Op->EndLoc = E;
2429 return Op;
2430 }
2431
2432 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2433 const unsigned ElementWidth) {
2434 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2435 RegMap = {
2436 {{0, AArch64::ZAB0},
2437 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2438 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2439 {{8, AArch64::ZAB0},
2440 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2441 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2442 {{16, AArch64::ZAH0},
2443 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2444 {{16, AArch64::ZAH1},
2445 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2446 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2447 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2448 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2449 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2450 };
2451
2452 if (ElementWidth == 64)
2453 OutRegs.insert(Reg);
2454 else {
2455 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2456 assert(!Regs.empty() && "Invalid tile or element width!");
2457 OutRegs.insert_range(Regs);
2458 }
2459 }
2460
2461 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2462 SMLoc E, MCContext &Ctx) {
2463 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2464 Op->Imm.Val = Val;
2465 Op->StartLoc = S;
2466 Op->EndLoc = E;
2467 return Op;
2468 }
2469
2470 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2471 unsigned ShiftAmount,
2472 SMLoc S, SMLoc E,
2473 MCContext &Ctx) {
2474 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2475 Op->ShiftedImm .Val = Val;
2476 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2477 Op->StartLoc = S;
2478 Op->EndLoc = E;
2479 return Op;
2480 }
2481
2482 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2483 unsigned Last, SMLoc S,
2484 SMLoc E,
2485 MCContext &Ctx) {
2486 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2487 Op->ImmRange.First = First;
2488 Op->ImmRange.Last = Last;
2489 Op->EndLoc = E;
2490 return Op;
2491 }
2492
2493 static std::unique_ptr<AArch64Operand>
2494 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2495 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2496 Op->CondCode.Code = Code;
2497 Op->StartLoc = S;
2498 Op->EndLoc = E;
2499 return Op;
2500 }
2501
2502 static std::unique_ptr<AArch64Operand>
2503 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2504 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2505 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2506 Op->FPImm.IsExact = IsExact;
2507 Op->StartLoc = S;
2508 Op->EndLoc = S;
2509 return Op;
2510 }
2511
2512 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2513 StringRef Str,
2514 SMLoc S,
2515 MCContext &Ctx,
2516 bool HasnXSModifier) {
2517 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2518 Op->Barrier.Val = Val;
2519 Op->Barrier.Data = Str.data();
2520 Op->Barrier.Length = Str.size();
2521 Op->Barrier.HasnXSModifier = HasnXSModifier;
2522 Op->StartLoc = S;
2523 Op->EndLoc = S;
2524 return Op;
2525 }
2526
2527 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2528 uint32_t MRSReg,
2529 uint32_t MSRReg,
2530 uint32_t PStateField,
2531 MCContext &Ctx) {
2532 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2533 Op->SysReg.Data = Str.data();
2534 Op->SysReg.Length = Str.size();
2535 Op->SysReg.MRSReg = MRSReg;
2536 Op->SysReg.MSRReg = MSRReg;
2537 Op->SysReg.PStateField = PStateField;
2538 Op->StartLoc = S;
2539 Op->EndLoc = S;
2540 return Op;
2541 }
2542
2543 static std::unique_ptr<AArch64Operand>
2544 CreatePHintInst(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2545 auto Op = std::make_unique<AArch64Operand>(k_PHint, Ctx);
2546 Op->PHint.Val = Val;
2547 Op->PHint.Data = Str.data();
2548 Op->PHint.Length = Str.size();
2549 Op->StartLoc = S;
2550 Op->EndLoc = S;
2551 return Op;
2552 }
2553
2554 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2555 SMLoc E, MCContext &Ctx) {
2556 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2557 Op->SysCRImm.Val = Val;
2558 Op->StartLoc = S;
2559 Op->EndLoc = E;
2560 return Op;
2561 }
2562
2563 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2564 StringRef Str,
2565 SMLoc S,
2566 MCContext &Ctx) {
2567 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2568 Op->Prefetch.Val = Val;
2569 Op->Barrier.Data = Str.data();
2570 Op->Barrier.Length = Str.size();
2571 Op->StartLoc = S;
2572 Op->EndLoc = S;
2573 return Op;
2574 }
2575
2576 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2577 StringRef Str,
2578 SMLoc S,
2579 MCContext &Ctx) {
2580 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2581 Op->PSBHint.Val = Val;
2582 Op->PSBHint.Data = Str.data();
2583 Op->PSBHint.Length = Str.size();
2584 Op->StartLoc = S;
2585 Op->EndLoc = S;
2586 return Op;
2587 }
2588
2589 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2590 StringRef Str,
2591 SMLoc S,
2592 MCContext &Ctx) {
2593 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2594 Op->BTIHint.Val = Val | 32;
2595 Op->BTIHint.Data = Str.data();
2596 Op->BTIHint.Length = Str.size();
2597 Op->StartLoc = S;
2598 Op->EndLoc = S;
2599 return Op;
2600 }
2601
2602 static std::unique_ptr<AArch64Operand>
2603 CreateCMHPriorityHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2604 auto Op = std::make_unique<AArch64Operand>(k_CMHPriorityHint, Ctx);
2605 Op->CMHPriorityHint.Val = Val;
2606 Op->CMHPriorityHint.Data = Str.data();
2607 Op->CMHPriorityHint.Length = Str.size();
2608 Op->StartLoc = S;
2609 Op->EndLoc = S;
2610 return Op;
2611 }
2612
2613 static std::unique_ptr<AArch64Operand>
2614 CreateTIndexHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2615 auto Op = std::make_unique<AArch64Operand>(k_TIndexHint, Ctx);
2616 Op->TIndexHint.Val = Val;
2617 Op->TIndexHint.Data = Str.data();
2618 Op->TIndexHint.Length = Str.size();
2619 Op->StartLoc = S;
2620 Op->EndLoc = S;
2621 return Op;
2622 }
2623
2624 static std::unique_ptr<AArch64Operand>
2625 CreateMatrixRegister(MCRegister Reg, unsigned ElementWidth, MatrixKind Kind,
2626 SMLoc S, SMLoc E, MCContext &Ctx) {
2627 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2628 Op->MatrixReg.Reg = Reg;
2629 Op->MatrixReg.ElementWidth = ElementWidth;
2630 Op->MatrixReg.Kind = Kind;
2631 Op->StartLoc = S;
2632 Op->EndLoc = E;
2633 return Op;
2634 }
2635
2636 static std::unique_ptr<AArch64Operand>
2637 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2638 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2639 Op->SVCR.PStateField = PStateField;
2640 Op->SVCR.Data = Str.data();
2641 Op->SVCR.Length = Str.size();
2642 Op->StartLoc = S;
2643 Op->EndLoc = S;
2644 return Op;
2645 }
2646
2647 static std::unique_ptr<AArch64Operand>
2648 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2649 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2650 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2651 Op->ShiftExtend.Type = ShOp;
2652 Op->ShiftExtend.Amount = Val;
2653 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2654 Op->StartLoc = S;
2655 Op->EndLoc = E;
2656 return Op;
2657 }
2658};
2659
2660} // end anonymous namespace.
2661
2662void AArch64Operand::print(raw_ostream &OS, const MCAsmInfo &MAI) const {
2663 switch (Kind) {
2664 case k_FPImm:
2665 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2666 if (!getFPImmIsExact())
2667 OS << " (inexact)";
2668 OS << ">";
2669 break;
2670 case k_Barrier: {
2671 StringRef Name = getBarrierName();
2672 if (!Name.empty())
2673 OS << "<barrier " << Name << ">";
2674 else
2675 OS << "<barrier invalid #" << getBarrier() << ">";
2676 break;
2677 }
2678 case k_Immediate:
2679 MAI.printExpr(OS, *getImm());
2680 break;
2681 case k_ShiftedImm: {
2682 unsigned Shift = getShiftedImmShift();
2683 OS << "<shiftedimm ";
2684 MAI.printExpr(OS, *getShiftedImmVal());
2685 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2686 break;
2687 }
2688 case k_ImmRange: {
2689 OS << "<immrange ";
2690 OS << getFirstImmVal();
2691 OS << ":" << getLastImmVal() << ">";
2692 break;
2693 }
2694 case k_CondCode:
2695 OS << "<condcode " << getCondCode() << ">";
2696 break;
2697 case k_VectorList: {
2698 OS << "<vectorlist ";
2699 MCRegister Reg = getVectorListStart();
2700 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2701 OS << Reg.id() + i * getVectorListStride() << " ";
2702 OS << ">";
2703 break;
2704 }
2705 case k_VectorIndex:
2706 OS << "<vectorindex " << getVectorIndex() << ">";
2707 break;
2708 case k_SysReg:
2709 OS << "<sysreg: " << getSysReg() << '>';
2710 break;
2711 case k_Token:
2712 OS << "'" << getToken() << "'";
2713 break;
2714 case k_SysCR:
2715 OS << "c" << getSysCR();
2716 break;
2717 case k_Prefetch: {
2718 StringRef Name = getPrefetchName();
2719 if (!Name.empty())
2720 OS << "<prfop " << Name << ">";
2721 else
2722 OS << "<prfop invalid #" << getPrefetch() << ">";
2723 break;
2724 }
2725 case k_PSBHint:
2726 OS << getPSBHintName();
2727 break;
2728 case k_PHint:
2729 OS << getPHintName();
2730 break;
2731 case k_BTIHint:
2732 OS << getBTIHintName();
2733 break;
2734 case k_CMHPriorityHint:
2735 OS << getCMHPriorityHintName();
2736 break;
2737 case k_TIndexHint:
2738 OS << getTIndexHintName();
2739 break;
2740 case k_MatrixRegister:
2741 OS << "<matrix " << getMatrixReg().id() << ">";
2742 break;
2743 case k_MatrixTileList: {
2744 OS << "<matrixlist ";
2745 unsigned RegMask = getMatrixTileListRegMask();
2746 unsigned MaxBits = 8;
2747 for (unsigned I = MaxBits; I > 0; --I)
2748 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2749 OS << '>';
2750 break;
2751 }
2752 case k_SVCR: {
2753 OS << getSVCR();
2754 break;
2755 }
2756 case k_Register:
2757 OS << "<register " << getReg().id() << ">";
2758 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2759 break;
2760 [[fallthrough]];
2761 case k_ShiftExtend:
2762 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2763 << getShiftExtendAmount();
2764 if (!hasShiftExtendAmount())
2765 OS << "<imp>";
2766 OS << '>';
2767 break;
2768 }
2769}
2770
2771/// @name Auto-generated Match Functions
2772/// {
2773
2775
2776/// }
2777
2778static unsigned MatchNeonVectorRegName(StringRef Name) {
2779 return StringSwitch<unsigned>(Name.lower())
2780 .Case("v0", AArch64::Q0)
2781 .Case("v1", AArch64::Q1)
2782 .Case("v2", AArch64::Q2)
2783 .Case("v3", AArch64::Q3)
2784 .Case("v4", AArch64::Q4)
2785 .Case("v5", AArch64::Q5)
2786 .Case("v6", AArch64::Q6)
2787 .Case("v7", AArch64::Q7)
2788 .Case("v8", AArch64::Q8)
2789 .Case("v9", AArch64::Q9)
2790 .Case("v10", AArch64::Q10)
2791 .Case("v11", AArch64::Q11)
2792 .Case("v12", AArch64::Q12)
2793 .Case("v13", AArch64::Q13)
2794 .Case("v14", AArch64::Q14)
2795 .Case("v15", AArch64::Q15)
2796 .Case("v16", AArch64::Q16)
2797 .Case("v17", AArch64::Q17)
2798 .Case("v18", AArch64::Q18)
2799 .Case("v19", AArch64::Q19)
2800 .Case("v20", AArch64::Q20)
2801 .Case("v21", AArch64::Q21)
2802 .Case("v22", AArch64::Q22)
2803 .Case("v23", AArch64::Q23)
2804 .Case("v24", AArch64::Q24)
2805 .Case("v25", AArch64::Q25)
2806 .Case("v26", AArch64::Q26)
2807 .Case("v27", AArch64::Q27)
2808 .Case("v28", AArch64::Q28)
2809 .Case("v29", AArch64::Q29)
2810 .Case("v30", AArch64::Q30)
2811 .Case("v31", AArch64::Q31)
2812 .Default(0);
2813}
2814
2815/// Returns an optional pair of (#elements, element-width) if Suffix
2816/// is a valid vector kind. Where the number of elements in a vector
2817/// or the vector width is implicit or explicitly unknown (but still a
2818/// valid suffix kind), 0 is used.
2819static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2820 RegKind VectorKind) {
2821 std::pair<int, int> Res = {-1, -1};
2822
2823 switch (VectorKind) {
2824 case RegKind::NeonVector:
2826 .Case("", {0, 0})
2827 .Case(".1d", {1, 64})
2828 .Case(".1q", {1, 128})
2829 // '.2h' needed for fp16 scalar pairwise reductions
2830 .Case(".2h", {2, 16})
2831 .Case(".2b", {2, 8})
2832 .Case(".2s", {2, 32})
2833 .Case(".2d", {2, 64})
2834 // '.4b' is another special case for the ARMv8.2a dot product
2835 // operand
2836 .Case(".4b", {4, 8})
2837 .Case(".4h", {4, 16})
2838 .Case(".4s", {4, 32})
2839 .Case(".8b", {8, 8})
2840 .Case(".8h", {8, 16})
2841 .Case(".16b", {16, 8})
2842 // Accept the width neutral ones, too, for verbose syntax. If
2843 // those aren't used in the right places, the token operand won't
2844 // match so all will work out.
2845 .Case(".b", {0, 8})
2846 .Case(".h", {0, 16})
2847 .Case(".s", {0, 32})
2848 .Case(".d", {0, 64})
2849 .Default({-1, -1});
2850 break;
2851 case RegKind::SVEPredicateAsCounter:
2852 case RegKind::SVEPredicateVector:
2853 case RegKind::SVEDataVector:
2854 case RegKind::Matrix:
2856 .Case("", {0, 0})
2857 .Case(".b", {0, 8})
2858 .Case(".h", {0, 16})
2859 .Case(".s", {0, 32})
2860 .Case(".d", {0, 64})
2861 .Case(".q", {0, 128})
2862 .Default({-1, -1});
2863 break;
2864 default:
2865 llvm_unreachable("Unsupported RegKind");
2866 }
2867
2868 if (Res == std::make_pair(-1, -1))
2869 return std::nullopt;
2870
2871 return std::optional<std::pair<int, int>>(Res);
2872}
2873
2874static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2875 return parseVectorKind(Suffix, VectorKind).has_value();
2876}
2877
2879 return StringSwitch<unsigned>(Name.lower())
2880 .Case("z0", AArch64::Z0)
2881 .Case("z1", AArch64::Z1)
2882 .Case("z2", AArch64::Z2)
2883 .Case("z3", AArch64::Z3)
2884 .Case("z4", AArch64::Z4)
2885 .Case("z5", AArch64::Z5)
2886 .Case("z6", AArch64::Z6)
2887 .Case("z7", AArch64::Z7)
2888 .Case("z8", AArch64::Z8)
2889 .Case("z9", AArch64::Z9)
2890 .Case("z10", AArch64::Z10)
2891 .Case("z11", AArch64::Z11)
2892 .Case("z12", AArch64::Z12)
2893 .Case("z13", AArch64::Z13)
2894 .Case("z14", AArch64::Z14)
2895 .Case("z15", AArch64::Z15)
2896 .Case("z16", AArch64::Z16)
2897 .Case("z17", AArch64::Z17)
2898 .Case("z18", AArch64::Z18)
2899 .Case("z19", AArch64::Z19)
2900 .Case("z20", AArch64::Z20)
2901 .Case("z21", AArch64::Z21)
2902 .Case("z22", AArch64::Z22)
2903 .Case("z23", AArch64::Z23)
2904 .Case("z24", AArch64::Z24)
2905 .Case("z25", AArch64::Z25)
2906 .Case("z26", AArch64::Z26)
2907 .Case("z27", AArch64::Z27)
2908 .Case("z28", AArch64::Z28)
2909 .Case("z29", AArch64::Z29)
2910 .Case("z30", AArch64::Z30)
2911 .Case("z31", AArch64::Z31)
2912 .Default(0);
2913}
2914
2916 return StringSwitch<unsigned>(Name.lower())
2917 .Case("p0", AArch64::P0)
2918 .Case("p1", AArch64::P1)
2919 .Case("p2", AArch64::P2)
2920 .Case("p3", AArch64::P3)
2921 .Case("p4", AArch64::P4)
2922 .Case("p5", AArch64::P5)
2923 .Case("p6", AArch64::P6)
2924 .Case("p7", AArch64::P7)
2925 .Case("p8", AArch64::P8)
2926 .Case("p9", AArch64::P9)
2927 .Case("p10", AArch64::P10)
2928 .Case("p11", AArch64::P11)
2929 .Case("p12", AArch64::P12)
2930 .Case("p13", AArch64::P13)
2931 .Case("p14", AArch64::P14)
2932 .Case("p15", AArch64::P15)
2933 .Default(0);
2934}
2935
2937 return StringSwitch<unsigned>(Name.lower())
2938 .Case("pn0", AArch64::PN0)
2939 .Case("pn1", AArch64::PN1)
2940 .Case("pn2", AArch64::PN2)
2941 .Case("pn3", AArch64::PN3)
2942 .Case("pn4", AArch64::PN4)
2943 .Case("pn5", AArch64::PN5)
2944 .Case("pn6", AArch64::PN6)
2945 .Case("pn7", AArch64::PN7)
2946 .Case("pn8", AArch64::PN8)
2947 .Case("pn9", AArch64::PN9)
2948 .Case("pn10", AArch64::PN10)
2949 .Case("pn11", AArch64::PN11)
2950 .Case("pn12", AArch64::PN12)
2951 .Case("pn13", AArch64::PN13)
2952 .Case("pn14", AArch64::PN14)
2953 .Case("pn15", AArch64::PN15)
2954 .Default(0);
2955}
2956
2958 return StringSwitch<unsigned>(Name.lower())
2959 .Case("za0.d", AArch64::ZAD0)
2960 .Case("za1.d", AArch64::ZAD1)
2961 .Case("za2.d", AArch64::ZAD2)
2962 .Case("za3.d", AArch64::ZAD3)
2963 .Case("za4.d", AArch64::ZAD4)
2964 .Case("za5.d", AArch64::ZAD5)
2965 .Case("za6.d", AArch64::ZAD6)
2966 .Case("za7.d", AArch64::ZAD7)
2967 .Case("za0.s", AArch64::ZAS0)
2968 .Case("za1.s", AArch64::ZAS1)
2969 .Case("za2.s", AArch64::ZAS2)
2970 .Case("za3.s", AArch64::ZAS3)
2971 .Case("za0.h", AArch64::ZAH0)
2972 .Case("za1.h", AArch64::ZAH1)
2973 .Case("za0.b", AArch64::ZAB0)
2974 .Default(0);
2975}
2976
2977static unsigned matchMatrixRegName(StringRef Name) {
2978 return StringSwitch<unsigned>(Name.lower())
2979 .Case("za", AArch64::ZA)
2980 .Case("za0.q", AArch64::ZAQ0)
2981 .Case("za1.q", AArch64::ZAQ1)
2982 .Case("za2.q", AArch64::ZAQ2)
2983 .Case("za3.q", AArch64::ZAQ3)
2984 .Case("za4.q", AArch64::ZAQ4)
2985 .Case("za5.q", AArch64::ZAQ5)
2986 .Case("za6.q", AArch64::ZAQ6)
2987 .Case("za7.q", AArch64::ZAQ7)
2988 .Case("za8.q", AArch64::ZAQ8)
2989 .Case("za9.q", AArch64::ZAQ9)
2990 .Case("za10.q", AArch64::ZAQ10)
2991 .Case("za11.q", AArch64::ZAQ11)
2992 .Case("za12.q", AArch64::ZAQ12)
2993 .Case("za13.q", AArch64::ZAQ13)
2994 .Case("za14.q", AArch64::ZAQ14)
2995 .Case("za15.q", AArch64::ZAQ15)
2996 .Case("za0.d", AArch64::ZAD0)
2997 .Case("za1.d", AArch64::ZAD1)
2998 .Case("za2.d", AArch64::ZAD2)
2999 .Case("za3.d", AArch64::ZAD3)
3000 .Case("za4.d", AArch64::ZAD4)
3001 .Case("za5.d", AArch64::ZAD5)
3002 .Case("za6.d", AArch64::ZAD6)
3003 .Case("za7.d", AArch64::ZAD7)
3004 .Case("za0.s", AArch64::ZAS0)
3005 .Case("za1.s", AArch64::ZAS1)
3006 .Case("za2.s", AArch64::ZAS2)
3007 .Case("za3.s", AArch64::ZAS3)
3008 .Case("za0.h", AArch64::ZAH0)
3009 .Case("za1.h", AArch64::ZAH1)
3010 .Case("za0.b", AArch64::ZAB0)
3011 .Case("za0h.q", AArch64::ZAQ0)
3012 .Case("za1h.q", AArch64::ZAQ1)
3013 .Case("za2h.q", AArch64::ZAQ2)
3014 .Case("za3h.q", AArch64::ZAQ3)
3015 .Case("za4h.q", AArch64::ZAQ4)
3016 .Case("za5h.q", AArch64::ZAQ5)
3017 .Case("za6h.q", AArch64::ZAQ6)
3018 .Case("za7h.q", AArch64::ZAQ7)
3019 .Case("za8h.q", AArch64::ZAQ8)
3020 .Case("za9h.q", AArch64::ZAQ9)
3021 .Case("za10h.q", AArch64::ZAQ10)
3022 .Case("za11h.q", AArch64::ZAQ11)
3023 .Case("za12h.q", AArch64::ZAQ12)
3024 .Case("za13h.q", AArch64::ZAQ13)
3025 .Case("za14h.q", AArch64::ZAQ14)
3026 .Case("za15h.q", AArch64::ZAQ15)
3027 .Case("za0h.d", AArch64::ZAD0)
3028 .Case("za1h.d", AArch64::ZAD1)
3029 .Case("za2h.d", AArch64::ZAD2)
3030 .Case("za3h.d", AArch64::ZAD3)
3031 .Case("za4h.d", AArch64::ZAD4)
3032 .Case("za5h.d", AArch64::ZAD5)
3033 .Case("za6h.d", AArch64::ZAD6)
3034 .Case("za7h.d", AArch64::ZAD7)
3035 .Case("za0h.s", AArch64::ZAS0)
3036 .Case("za1h.s", AArch64::ZAS1)
3037 .Case("za2h.s", AArch64::ZAS2)
3038 .Case("za3h.s", AArch64::ZAS3)
3039 .Case("za0h.h", AArch64::ZAH0)
3040 .Case("za1h.h", AArch64::ZAH1)
3041 .Case("za0h.b", AArch64::ZAB0)
3042 .Case("za0v.q", AArch64::ZAQ0)
3043 .Case("za1v.q", AArch64::ZAQ1)
3044 .Case("za2v.q", AArch64::ZAQ2)
3045 .Case("za3v.q", AArch64::ZAQ3)
3046 .Case("za4v.q", AArch64::ZAQ4)
3047 .Case("za5v.q", AArch64::ZAQ5)
3048 .Case("za6v.q", AArch64::ZAQ6)
3049 .Case("za7v.q", AArch64::ZAQ7)
3050 .Case("za8v.q", AArch64::ZAQ8)
3051 .Case("za9v.q", AArch64::ZAQ9)
3052 .Case("za10v.q", AArch64::ZAQ10)
3053 .Case("za11v.q", AArch64::ZAQ11)
3054 .Case("za12v.q", AArch64::ZAQ12)
3055 .Case("za13v.q", AArch64::ZAQ13)
3056 .Case("za14v.q", AArch64::ZAQ14)
3057 .Case("za15v.q", AArch64::ZAQ15)
3058 .Case("za0v.d", AArch64::ZAD0)
3059 .Case("za1v.d", AArch64::ZAD1)
3060 .Case("za2v.d", AArch64::ZAD2)
3061 .Case("za3v.d", AArch64::ZAD3)
3062 .Case("za4v.d", AArch64::ZAD4)
3063 .Case("za5v.d", AArch64::ZAD5)
3064 .Case("za6v.d", AArch64::ZAD6)
3065 .Case("za7v.d", AArch64::ZAD7)
3066 .Case("za0v.s", AArch64::ZAS0)
3067 .Case("za1v.s", AArch64::ZAS1)
3068 .Case("za2v.s", AArch64::ZAS2)
3069 .Case("za3v.s", AArch64::ZAS3)
3070 .Case("za0v.h", AArch64::ZAH0)
3071 .Case("za1v.h", AArch64::ZAH1)
3072 .Case("za0v.b", AArch64::ZAB0)
3073 .Default(0);
3074}
3075
3076bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
3077 SMLoc &EndLoc) {
3078 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
3079}
3080
3081ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
3082 SMLoc &EndLoc) {
3083 StartLoc = getLoc();
3084 ParseStatus Res = tryParseScalarRegister(Reg);
3085 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3086 return Res;
3087}
3088
3089// Matches a register name or register alias previously defined by '.req'
3090MCRegister AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3091 RegKind Kind) {
3092 MCRegister Reg = MCRegister();
3093 if ((Reg = matchSVEDataVectorRegName(Name)))
3094 return Kind == RegKind::SVEDataVector ? Reg : MCRegister();
3095
3096 if ((Reg = matchSVEPredicateVectorRegName(Name)))
3097 return Kind == RegKind::SVEPredicateVector ? Reg : MCRegister();
3098
3100 return Kind == RegKind::SVEPredicateAsCounter ? Reg : MCRegister();
3101
3102 if ((Reg = MatchNeonVectorRegName(Name)))
3103 return Kind == RegKind::NeonVector ? Reg : MCRegister();
3104
3105 if ((Reg = matchMatrixRegName(Name)))
3106 return Kind == RegKind::Matrix ? Reg : MCRegister();
3107
3108 if (Name.equals_insensitive("zt0"))
3109 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3110
3111 // The parsed register must be of RegKind Scalar
3112 if ((Reg = MatchRegisterName(Name)))
3113 return (Kind == RegKind::Scalar) ? Reg : MCRegister();
3114
3115 if (!Reg) {
3116 // Handle a few common aliases of registers.
3117 if (MCRegister Reg = StringSwitch<unsigned>(Name.lower())
3118 .Case("fp", AArch64::FP)
3119 .Case("lr", AArch64::LR)
3120 .Case("x31", AArch64::XZR)
3121 .Case("w31", AArch64::WZR)
3122 .Default(0))
3123 return Kind == RegKind::Scalar ? Reg : MCRegister();
3124
3125 // Check for aliases registered via .req. Canonicalize to lower case.
3126 // That's more consistent since register names are case insensitive, and
3127 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3128 auto Entry = RegisterReqs.find(Name.lower());
3129 if (Entry == RegisterReqs.end())
3130 return MCRegister();
3131
3132 // set Reg if the match is the right kind of register
3133 if (Kind == Entry->getValue().first)
3134 Reg = Entry->getValue().second;
3135 }
3136 return Reg;
3137}
3138
3139unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3140 switch (K) {
3141 case RegKind::Scalar:
3142 case RegKind::NeonVector:
3143 case RegKind::SVEDataVector:
3144 return 32;
3145 case RegKind::Matrix:
3146 case RegKind::SVEPredicateVector:
3147 case RegKind::SVEPredicateAsCounter:
3148 return 16;
3149 case RegKind::LookupTable:
3150 return 1;
3151 }
3152 llvm_unreachable("Unsupported RegKind");
3153}
3154
3155/// tryParseScalarRegister - Try to parse a register name. The token must be an
3156/// Identifier when called, and if it is a register name the token is eaten and
3157/// the register is added to the operand list.
3158ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3159 const AsmToken &Tok = getTok();
3160 if (Tok.isNot(AsmToken::Identifier))
3161 return ParseStatus::NoMatch;
3162
3163 std::string lowerCase = Tok.getString().lower();
3164 MCRegister Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3165 if (!Reg)
3166 return ParseStatus::NoMatch;
3167
3168 RegNum = Reg;
3169 Lex(); // Eat identifier token.
3170 return ParseStatus::Success;
3171}
3172
3173/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3174ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3175 SMLoc S = getLoc();
3176
3177 if (getTok().isNot(AsmToken::Identifier))
3178 return Error(S, "Expected cN operand where 0 <= N <= 15");
3179
3180 StringRef Tok = getTok().getIdentifier();
3181 if (Tok[0] != 'c' && Tok[0] != 'C')
3182 return Error(S, "Expected cN operand where 0 <= N <= 15");
3183
3184 uint32_t CRNum;
3185 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3186 if (BadNum || CRNum > 15)
3187 return Error(S, "Expected cN operand where 0 <= N <= 15");
3188
3189 Lex(); // Eat identifier token.
3190 Operands.push_back(
3191 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3192 return ParseStatus::Success;
3193}
3194
3195// Either an identifier for named values or a 6-bit immediate.
3196ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3197 SMLoc S = getLoc();
3198 const AsmToken &Tok = getTok();
3199
3200 unsigned MaxVal = 63;
3201
3202 // Immediate case, with optional leading hash:
3203 if (parseOptionalToken(AsmToken::Hash) ||
3204 Tok.is(AsmToken::Integer)) {
3205 const MCExpr *ImmVal;
3206 if (getParser().parseExpression(ImmVal))
3207 return ParseStatus::Failure;
3208
3209 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3210 if (!MCE)
3211 return TokError("immediate value expected for prefetch operand");
3212 unsigned prfop = MCE->getValue();
3213 if (prfop > MaxVal)
3214 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3215 "] expected");
3216
3217 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3218 Operands.push_back(AArch64Operand::CreatePrefetch(
3219 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3220 return ParseStatus::Success;
3221 }
3222
3223 if (Tok.isNot(AsmToken::Identifier))
3224 return TokError("prefetch hint expected");
3225
3226 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3227 if (!RPRFM)
3228 return TokError("prefetch hint expected");
3229
3230 Operands.push_back(AArch64Operand::CreatePrefetch(
3231 RPRFM->Encoding, Tok.getString(), S, getContext()));
3232 Lex(); // Eat identifier token.
3233 return ParseStatus::Success;
3234}
3235
3236/// tryParsePrefetch - Try to parse a prefetch operand.
3237template <bool IsSVEPrefetch>
3238ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3239 SMLoc S = getLoc();
3240 const AsmToken &Tok = getTok();
3241
3242 auto LookupByName = [](StringRef N) {
3243 if (IsSVEPrefetch) {
3244 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3245 return std::optional<unsigned>(Res->Encoding);
3246 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3247 return std::optional<unsigned>(Res->Encoding);
3248 return std::optional<unsigned>();
3249 };
3250
3251 auto LookupByEncoding = [](unsigned E) {
3252 if (IsSVEPrefetch) {
3253 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3254 return std::optional<StringRef>(Res->Name);
3255 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3256 return std::optional<StringRef>(Res->Name);
3257 return std::optional<StringRef>();
3258 };
3259 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3260
3261 // Either an identifier for named values or a 5-bit immediate.
3262 // Eat optional hash.
3263 if (parseOptionalToken(AsmToken::Hash) ||
3264 Tok.is(AsmToken::Integer)) {
3265 const MCExpr *ImmVal;
3266 if (getParser().parseExpression(ImmVal))
3267 return ParseStatus::Failure;
3268
3269 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3270 if (!MCE)
3271 return TokError("immediate value expected for prefetch operand");
3272 unsigned prfop = MCE->getValue();
3273 if (prfop > MaxVal)
3274 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3275 "] expected");
3276
3277 auto PRFM = LookupByEncoding(MCE->getValue());
3278 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3279 S, getContext()));
3280 return ParseStatus::Success;
3281 }
3282
3283 if (Tok.isNot(AsmToken::Identifier))
3284 return TokError("prefetch hint expected");
3285
3286 auto PRFM = LookupByName(Tok.getString());
3287 if (!PRFM)
3288 return TokError("prefetch hint expected");
3289
3290 Operands.push_back(AArch64Operand::CreatePrefetch(
3291 *PRFM, Tok.getString(), S, getContext()));
3292 Lex(); // Eat identifier token.
3293 return ParseStatus::Success;
3294}
3295
3296/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3297ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3298 SMLoc S = getLoc();
3299 const AsmToken &Tok = getTok();
3300 if (Tok.isNot(AsmToken::Identifier))
3301 return TokError("invalid operand for instruction");
3302
3303 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3304 if (!PSB)
3305 return TokError("invalid operand for instruction");
3306
3307 Operands.push_back(AArch64Operand::CreatePSBHint(
3308 PSB->Encoding, Tok.getString(), S, getContext()));
3309 Lex(); // Eat identifier token.
3310 return ParseStatus::Success;
3311}
3312
3313ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3314 SMLoc StartLoc = getLoc();
3315
3316 MCRegister RegNum;
3317
3318 // The case where xzr, xzr is not present is handled by an InstAlias.
3319
3320 auto RegTok = getTok(); // in case we need to backtrack
3321 if (!tryParseScalarRegister(RegNum).isSuccess())
3322 return ParseStatus::NoMatch;
3323
3324 if (RegNum != AArch64::XZR) {
3325 getLexer().UnLex(RegTok);
3326 return ParseStatus::NoMatch;
3327 }
3328
3329 if (parseComma())
3330 return ParseStatus::Failure;
3331
3332 if (!tryParseScalarRegister(RegNum).isSuccess())
3333 return TokError("expected register operand");
3334
3335 if (RegNum != AArch64::XZR)
3336 return TokError("xzr must be followed by xzr");
3337
3338 // We need to push something, since we claim this is an operand in .td.
3339 // See also AArch64AsmParser::parseKeywordOperand.
3340 Operands.push_back(AArch64Operand::CreateReg(
3341 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3342
3343 return ParseStatus::Success;
3344}
3345
3346/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3347ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3348 SMLoc S = getLoc();
3349 const AsmToken &Tok = getTok();
3350 if (Tok.isNot(AsmToken::Identifier))
3351 return TokError("invalid operand for instruction");
3352
3353 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3354 if (!BTI)
3355 return TokError("invalid operand for instruction");
3356
3357 Operands.push_back(AArch64Operand::CreateBTIHint(
3358 BTI->Encoding, Tok.getString(), S, getContext()));
3359 Lex(); // Eat identifier token.
3360 return ParseStatus::Success;
3361}
3362
3363/// tryParseCMHPriorityHint - Try to parse a CMHPriority operand
3364ParseStatus AArch64AsmParser::tryParseCMHPriorityHint(OperandVector &Operands) {
3365 SMLoc S = getLoc();
3366 const AsmToken &Tok = getTok();
3367 if (Tok.isNot(AsmToken::Identifier))
3368 return TokError("invalid operand for instruction");
3369
3370 auto CMHPriority =
3371 AArch64CMHPriorityHint::lookupCMHPriorityHintByName(Tok.getString());
3372 if (!CMHPriority)
3373 return TokError("invalid operand for instruction");
3374
3375 Operands.push_back(AArch64Operand::CreateCMHPriorityHint(
3376 CMHPriority->Encoding, Tok.getString(), S, getContext()));
3377 Lex(); // Eat identifier token.
3378 return ParseStatus::Success;
3379}
3380
3381/// tryParseTIndexHint - Try to parse a TIndex operand
3382ParseStatus AArch64AsmParser::tryParseTIndexHint(OperandVector &Operands) {
3383 SMLoc S = getLoc();
3384 const AsmToken &Tok = getTok();
3385 if (Tok.isNot(AsmToken::Identifier))
3386 return TokError("invalid operand for instruction");
3387
3388 auto TIndex = AArch64TIndexHint::lookupTIndexByName(Tok.getString());
3389 if (!TIndex)
3390 return TokError("invalid operand for instruction");
3391
3392 Operands.push_back(AArch64Operand::CreateTIndexHint(
3393 TIndex->Encoding, Tok.getString(), S, getContext()));
3394 Lex(); // Eat identifier token.
3395 return ParseStatus::Success;
3396}
3397
3398/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3399/// instruction.
3400ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3401 SMLoc S = getLoc();
3402 const MCExpr *Expr = nullptr;
3403
3404 if (getTok().is(AsmToken::Hash)) {
3405 Lex(); // Eat hash token.
3406 }
3407
3408 if (parseSymbolicImmVal(Expr))
3409 return ParseStatus::Failure;
3410
3411 AArch64::Specifier ELFSpec;
3412 AArch64::Specifier DarwinSpec;
3413 int64_t Addend;
3414 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3415 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3416 // No modifier was specified at all; this is the syntax for an ELF basic
3417 // ADRP relocation (unfortunately).
3418 Expr =
3420 } else if ((DarwinSpec == AArch64::S_MACHO_GOTPAGE ||
3421 DarwinSpec == AArch64::S_MACHO_TLVPPAGE) &&
3422 Addend != 0) {
3423 return Error(S, "gotpage label reference not allowed an addend");
3424 } else if (DarwinSpec != AArch64::S_MACHO_PAGE &&
3425 DarwinSpec != AArch64::S_MACHO_GOTPAGE &&
3426 DarwinSpec != AArch64::S_MACHO_TLVPPAGE &&
3427 ELFSpec != AArch64::S_ABS_PAGE_NC &&
3428 ELFSpec != AArch64::S_GOT_PAGE &&
3429 ELFSpec != AArch64::S_GOT_AUTH_PAGE &&
3430 ELFSpec != AArch64::S_GOT_PAGE_LO15 &&
3431 ELFSpec != AArch64::S_GOTTPREL_PAGE &&
3432 ELFSpec != AArch64::S_TLSDESC_PAGE &&
3433 ELFSpec != AArch64::S_TLSDESC_AUTH_PAGE) {
3434 // The operand must be an @page or @gotpage qualified symbolref.
3435 return Error(S, "page or gotpage label reference expected");
3436 }
3437 }
3438
3439 // We have either a label reference possibly with addend or an immediate. The
3440 // addend is a raw value here. The linker will adjust it to only reference the
3441 // page.
3442 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3443 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3444
3445 return ParseStatus::Success;
3446}
3447
3448/// tryParseAdrLabel - Parse and validate a source label for the ADR
3449/// instruction.
3450ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3451 SMLoc S = getLoc();
3452 const MCExpr *Expr = nullptr;
3453
3454 // Leave anything with a bracket to the default for SVE
3455 if (getTok().is(AsmToken::LBrac))
3456 return ParseStatus::NoMatch;
3457
3458 if (getTok().is(AsmToken::Hash))
3459 Lex(); // Eat hash token.
3460
3461 if (parseSymbolicImmVal(Expr))
3462 return ParseStatus::Failure;
3463
3464 AArch64::Specifier ELFSpec;
3465 AArch64::Specifier DarwinSpec;
3466 int64_t Addend;
3467 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3468 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3469 // No modifier was specified at all; this is the syntax for an ELF basic
3470 // ADR relocation (unfortunately).
3472 } else if (ELFSpec != AArch64::S_GOT_AUTH_PAGE) {
3473 // For tiny code model, we use :got_auth: operator to fill 21-bit imm of
3474 // adr. It's not actually GOT entry page address but the GOT address
3475 // itself - we just share the same variant kind with :got_auth: operator
3476 // applied for adrp.
3477 // TODO: can we somehow get current TargetMachine object to call
3478 // getCodeModel() on it to ensure we are using tiny code model?
3479 return Error(S, "unexpected adr label");
3480 }
3481 }
3482
3483 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3484 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3485 return ParseStatus::Success;
3486}
3487
3488/// tryParseFPImm - A floating point immediate expression operand.
3489template <bool AddFPZeroAsLiteral>
3490ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3491 SMLoc S = getLoc();
3492
3493 bool Hash = parseOptionalToken(AsmToken::Hash);
3494
3495 // Handle negation, as that still comes through as a separate token.
3496 bool isNegative = parseOptionalToken(AsmToken::Minus);
3497
3498 const AsmToken &Tok = getTok();
3499 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3500 if (!Hash)
3501 return ParseStatus::NoMatch;
3502 return TokError("invalid floating point immediate");
3503 }
3504
3505 // Parse hexadecimal representation.
3506 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3507 if (Tok.getIntVal() > 255 || isNegative)
3508 return TokError("encoded floating point value out of range");
3509
3511 Operands.push_back(
3512 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3513 } else {
3514 // Parse FP representation.
3515 APFloat RealVal(APFloat::IEEEdouble());
3516 auto StatusOrErr =
3517 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3518 if (errorToBool(StatusOrErr.takeError()))
3519 return TokError("invalid floating point representation");
3520
3521 if (isNegative)
3522 RealVal.changeSign();
3523
3524 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3525 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3526 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3527 } else
3528 Operands.push_back(AArch64Operand::CreateFPImm(
3529 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3530 }
3531
3532 Lex(); // Eat the token.
3533
3534 return ParseStatus::Success;
3535}
3536
3537/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3538/// a shift suffix, for example '#1, lsl #12'.
3539ParseStatus
3540AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3541 SMLoc S = getLoc();
3542
3543 if (getTok().is(AsmToken::Hash))
3544 Lex(); // Eat '#'
3545 else if (getTok().isNot(AsmToken::Integer))
3546 // Operand should start from # or should be integer, emit error otherwise.
3547 return ParseStatus::NoMatch;
3548
3549 if (getTok().is(AsmToken::Integer) &&
3550 getLexer().peekTok().is(AsmToken::Colon))
3551 return tryParseImmRange(Operands);
3552
3553 const MCExpr *Imm = nullptr;
3554 if (parseSymbolicImmVal(Imm))
3555 return ParseStatus::Failure;
3556 else if (getTok().isNot(AsmToken::Comma)) {
3557 Operands.push_back(
3558 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3559 return ParseStatus::Success;
3560 }
3561
3562 // Eat ','
3563 Lex();
3564 StringRef VecGroup;
3565 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3566 Operands.push_back(
3567 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3568 Operands.push_back(
3569 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3570 return ParseStatus::Success;
3571 }
3572
3573 // The optional operand must be "lsl #N" where N is non-negative.
3574 if (!getTok().is(AsmToken::Identifier) ||
3575 !getTok().getIdentifier().equals_insensitive("lsl"))
3576 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3577
3578 // Eat 'lsl'
3579 Lex();
3580
3581 parseOptionalToken(AsmToken::Hash);
3582
3583 if (getTok().isNot(AsmToken::Integer))
3584 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3585
3586 int64_t ShiftAmount = getTok().getIntVal();
3587
3588 if (ShiftAmount < 0)
3589 return Error(getLoc(), "positive shift amount required");
3590 Lex(); // Eat the number
3591
3592 // Just in case the optional lsl #0 is used for immediates other than zero.
3593 if (ShiftAmount == 0 && Imm != nullptr) {
3594 Operands.push_back(
3595 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3596 return ParseStatus::Success;
3597 }
3598
3599 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3600 getLoc(), getContext()));
3601 return ParseStatus::Success;
3602}
3603
3604/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3605/// suggestion to help common typos.
3607AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3608 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3609 .Case("eq", AArch64CC::EQ)
3610 .Case("ne", AArch64CC::NE)
3611 .Case("cs", AArch64CC::HS)
3612 .Case("hs", AArch64CC::HS)
3613 .Case("cc", AArch64CC::LO)
3614 .Case("lo", AArch64CC::LO)
3615 .Case("mi", AArch64CC::MI)
3616 .Case("pl", AArch64CC::PL)
3617 .Case("vs", AArch64CC::VS)
3618 .Case("vc", AArch64CC::VC)
3619 .Case("hi", AArch64CC::HI)
3620 .Case("ls", AArch64CC::LS)
3621 .Case("ge", AArch64CC::GE)
3622 .Case("lt", AArch64CC::LT)
3623 .Case("gt", AArch64CC::GT)
3624 .Case("le", AArch64CC::LE)
3625 .Case("al", AArch64CC::AL)
3626 .Case("nv", AArch64CC::NV)
3627 // SVE condition code aliases:
3628 .Case("none", AArch64CC::EQ)
3629 .Case("any", AArch64CC::NE)
3630 .Case("nlast", AArch64CC::HS)
3631 .Case("last", AArch64CC::LO)
3632 .Case("first", AArch64CC::MI)
3633 .Case("nfrst", AArch64CC::PL)
3634 .Case("pmore", AArch64CC::HI)
3635 .Case("plast", AArch64CC::LS)
3636 .Case("tcont", AArch64CC::GE)
3637 .Case("tstop", AArch64CC::LT)
3638 .Default(AArch64CC::Invalid);
3639
3640 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3641 Suggestion = "nfrst";
3642
3643 return CC;
3644}
3645
3646/// parseCondCode - Parse a Condition Code operand.
3647bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3648 bool invertCondCode) {
3649 SMLoc S = getLoc();
3650 const AsmToken &Tok = getTok();
3651 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3652
3653 StringRef Cond = Tok.getString();
3654 std::string Suggestion;
3655 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3656 if (CC == AArch64CC::Invalid) {
3657 std::string Msg = "invalid condition code";
3658 if (!Suggestion.empty())
3659 Msg += ", did you mean " + Suggestion + "?";
3660 return TokError(Msg);
3661 }
3662 Lex(); // Eat identifier token.
3663
3664 if (invertCondCode) {
3665 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3666 return TokError("condition codes AL and NV are invalid for this instruction");
3668 }
3669
3670 Operands.push_back(
3671 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3672 return false;
3673}
3674
3675ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3676 const AsmToken &Tok = getTok();
3677 SMLoc S = getLoc();
3678
3679 if (Tok.isNot(AsmToken::Identifier))
3680 return TokError("invalid operand for instruction");
3681
3682 unsigned PStateImm = -1;
3683 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3684 if (!SVCR)
3685 return ParseStatus::NoMatch;
3686 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3687 PStateImm = SVCR->Encoding;
3688
3689 Operands.push_back(
3690 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3691 Lex(); // Eat identifier token.
3692 return ParseStatus::Success;
3693}
3694
3695ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3696 const AsmToken &Tok = getTok();
3697 SMLoc S = getLoc();
3698
3699 StringRef Name = Tok.getString();
3700
3701 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3702 Lex(); // eat "za[.(b|h|s|d)]"
3703 unsigned ElementWidth = 0;
3704 auto DotPosition = Name.find('.');
3705 if (DotPosition != StringRef::npos) {
3706 const auto &KindRes =
3707 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3708 if (!KindRes)
3709 return TokError(
3710 "Expected the register to be followed by element width suffix");
3711 ElementWidth = KindRes->second;
3712 }
3713 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3714 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3715 getContext()));
3716 if (getLexer().is(AsmToken::LBrac)) {
3717 // There's no comma after matrix operand, so we can parse the next operand
3718 // immediately.
3719 if (parseOperand(Operands, false, false))
3720 return ParseStatus::NoMatch;
3721 }
3722 return ParseStatus::Success;
3723 }
3724
3725 // Try to parse matrix register.
3726 MCRegister Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3727 if (!Reg)
3728 return ParseStatus::NoMatch;
3729
3730 size_t DotPosition = Name.find('.');
3731 assert(DotPosition != StringRef::npos && "Unexpected register");
3732
3733 StringRef Head = Name.take_front(DotPosition);
3734 StringRef Tail = Name.drop_front(DotPosition);
3735 StringRef RowOrColumn = Head.take_back();
3736
3737 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3738 .Case("h", MatrixKind::Row)
3739 .Case("v", MatrixKind::Col)
3740 .Default(MatrixKind::Tile);
3741
3742 // Next up, parsing the suffix
3743 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3744 if (!KindRes)
3745 return TokError(
3746 "Expected the register to be followed by element width suffix");
3747 unsigned ElementWidth = KindRes->second;
3748
3749 Lex();
3750
3751 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3752 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3753
3754 if (getLexer().is(AsmToken::LBrac)) {
3755 // There's no comma after matrix operand, so we can parse the next operand
3756 // immediately.
3757 if (parseOperand(Operands, false, false))
3758 return ParseStatus::NoMatch;
3759 }
3760 return ParseStatus::Success;
3761}
3762
3763/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3764/// them if present.
3765ParseStatus
3766AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3767 const AsmToken &Tok = getTok();
3768 std::string LowerID = Tok.getString().lower();
3770 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3771 .Case("lsl", AArch64_AM::LSL)
3772 .Case("lsr", AArch64_AM::LSR)
3773 .Case("asr", AArch64_AM::ASR)
3774 .Case("ror", AArch64_AM::ROR)
3775 .Case("msl", AArch64_AM::MSL)
3776 .Case("uxtb", AArch64_AM::UXTB)
3777 .Case("uxth", AArch64_AM::UXTH)
3778 .Case("uxtw", AArch64_AM::UXTW)
3779 .Case("uxtx", AArch64_AM::UXTX)
3780 .Case("sxtb", AArch64_AM::SXTB)
3781 .Case("sxth", AArch64_AM::SXTH)
3782 .Case("sxtw", AArch64_AM::SXTW)
3783 .Case("sxtx", AArch64_AM::SXTX)
3785
3787 return ParseStatus::NoMatch;
3788
3789 SMLoc S = Tok.getLoc();
3790 Lex();
3791
3792 bool Hash = parseOptionalToken(AsmToken::Hash);
3793
3794 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3795 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3796 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3797 ShOp == AArch64_AM::MSL) {
3798 // We expect a number here.
3799 return TokError("expected #imm after shift specifier");
3800 }
3801
3802 // "extend" type operations don't need an immediate, #0 is implicit.
3803 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3804 Operands.push_back(
3805 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3806 return ParseStatus::Success;
3807 }
3808
3809 // Make sure we do actually have a number, identifier or a parenthesized
3810 // expression.
3811 SMLoc E = getLoc();
3812 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3813 !getTok().is(AsmToken::Identifier))
3814 return Error(E, "expected integer shift amount");
3815
3816 const MCExpr *ImmVal;
3817 if (getParser().parseExpression(ImmVal))
3818 return ParseStatus::Failure;
3819
3820 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3821 if (!MCE)
3822 return Error(E, "expected constant '#imm' after shift specifier");
3823
3824 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3825 Operands.push_back(AArch64Operand::CreateShiftExtend(
3826 ShOp, MCE->getValue(), true, S, E, getContext()));
3827 return ParseStatus::Success;
3828}
3829
3830static const struct Extension {
3831 const char *Name;
3833} ExtensionMap[] = {
3834 {"crc", {AArch64::FeatureCRC}},
3835 {"sm4", {AArch64::FeatureSM4}},
3836 {"sha3", {AArch64::FeatureSHA3}},
3837 {"sha2", {AArch64::FeatureSHA2}},
3838 {"aes", {AArch64::FeatureAES}},
3839 {"crypto", {AArch64::FeatureCrypto}},
3840 {"fp", {AArch64::FeatureFPARMv8}},
3841 {"simd", {AArch64::FeatureNEON}},
3842 {"ras", {AArch64::FeatureRAS}},
3843 {"rasv2", {AArch64::FeatureRASv2}},
3844 {"lse", {AArch64::FeatureLSE}},
3845 {"predres", {AArch64::FeaturePredRes}},
3846 {"predres2", {AArch64::FeatureSPECRES2}},
3847 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3848 {"mte", {AArch64::FeatureMTE}},
3849 {"memtag", {AArch64::FeatureMTE}},
3850 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3851 {"pan", {AArch64::FeaturePAN}},
3852 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3853 {"ccpp", {AArch64::FeatureCCPP}},
3854 {"rcpc", {AArch64::FeatureRCPC}},
3855 {"rng", {AArch64::FeatureRandGen}},
3856 {"sve", {AArch64::FeatureSVE}},
3857 {"sve-b16b16", {AArch64::FeatureSVEB16B16}},
3858 {"sve2", {AArch64::FeatureSVE2}},
3859 {"sve-aes", {AArch64::FeatureSVEAES}},
3860 {"sve2-aes", {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3861 {"sve-sm4", {AArch64::FeatureSVESM4}},
3862 {"sve2-sm4", {AArch64::FeatureAliasSVE2SM4, AArch64::FeatureSVESM4}},
3863 {"sve-sha3", {AArch64::FeatureSVESHA3}},
3864 {"sve2-sha3", {AArch64::FeatureAliasSVE2SHA3, AArch64::FeatureSVESHA3}},
3865 {"sve-bitperm", {AArch64::FeatureSVEBitPerm}},
3866 {"sve2-bitperm",
3867 {AArch64::FeatureAliasSVE2BitPerm, AArch64::FeatureSVEBitPerm,
3868 AArch64::FeatureSVE2}},
3869 {"sve2p1", {AArch64::FeatureSVE2p1}},
3870 {"ls64", {AArch64::FeatureLS64}},
3871 {"xs", {AArch64::FeatureXS}},
3872 {"pauth", {AArch64::FeaturePAuth}},
3873 {"flagm", {AArch64::FeatureFlagM}},
3874 {"rme", {AArch64::FeatureRME}},
3875 {"sme", {AArch64::FeatureSME}},
3876 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3877 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3878 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3879 {"sme2", {AArch64::FeatureSME2}},
3880 {"sme2p1", {AArch64::FeatureSME2p1}},
3881 {"sme-b16b16", {AArch64::FeatureSMEB16B16}},
3882 {"hbc", {AArch64::FeatureHBC}},
3883 {"mops", {AArch64::FeatureMOPS}},
3884 {"mec", {AArch64::FeatureMEC}},
3885 {"the", {AArch64::FeatureTHE}},
3886 {"d128", {AArch64::FeatureD128}},
3887 {"lse128", {AArch64::FeatureLSE128}},
3888 {"ite", {AArch64::FeatureITE}},
3889 {"cssc", {AArch64::FeatureCSSC}},
3890 {"rcpc3", {AArch64::FeatureRCPC3}},
3891 {"gcs", {AArch64::FeatureGCS}},
3892 {"bf16", {AArch64::FeatureBF16}},
3893 {"compnum", {AArch64::FeatureComplxNum}},
3894 {"dotprod", {AArch64::FeatureDotProd}},
3895 {"f32mm", {AArch64::FeatureMatMulFP32}},
3896 {"f64mm", {AArch64::FeatureMatMulFP64}},
3897 {"fp16", {AArch64::FeatureFullFP16}},
3898 {"fp16fml", {AArch64::FeatureFP16FML}},
3899 {"i8mm", {AArch64::FeatureMatMulInt8}},
3900 {"lor", {AArch64::FeatureLOR}},
3901 {"profile", {AArch64::FeatureSPE}},
3902 // "rdma" is the name documented by binutils for the feature, but
3903 // binutils also accepts incomplete prefixes of features, so "rdm"
3904 // works too. Support both spellings here.
3905 {"rdm", {AArch64::FeatureRDM}},
3906 {"rdma", {AArch64::FeatureRDM}},
3907 {"sb", {AArch64::FeatureSB}},
3908 {"ssbs", {AArch64::FeatureSSBS}},
3909 {"fp8", {AArch64::FeatureFP8}},
3910 {"faminmax", {AArch64::FeatureFAMINMAX}},
3911 {"fp8fma", {AArch64::FeatureFP8FMA}},
3912 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3913 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3914 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3915 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3916 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3917 {"lut", {AArch64::FeatureLUT}},
3918 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3919 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3920 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3921 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3922 {"cpa", {AArch64::FeatureCPA}},
3923 {"tlbiw", {AArch64::FeatureTLBIW}},
3924 {"pops", {AArch64::FeaturePoPS}},
3925 {"cmpbr", {AArch64::FeatureCMPBR}},
3926 {"f8f32mm", {AArch64::FeatureF8F32MM}},
3927 {"f8f16mm", {AArch64::FeatureF8F16MM}},
3928 {"fprcvt", {AArch64::FeatureFPRCVT}},
3929 {"lsfe", {AArch64::FeatureLSFE}},
3930 {"sme2p2", {AArch64::FeatureSME2p2}},
3931 {"ssve-aes", {AArch64::FeatureSSVE_AES}},
3932 {"sve2p2", {AArch64::FeatureSVE2p2}},
3933 {"sve-aes2", {AArch64::FeatureSVEAES2}},
3934 {"sve-bfscale", {AArch64::FeatureSVEBFSCALE}},
3935 {"sve-f16f32mm", {AArch64::FeatureSVE_F16F32MM}},
3936 {"lsui", {AArch64::FeatureLSUI}},
3937 {"occmo", {AArch64::FeatureOCCMO}},
3938 {"ssve-bitperm", {AArch64::FeatureSSVE_BitPerm}},
3939 {"sme-mop4", {AArch64::FeatureSME_MOP4}},
3940 {"sme-tmop", {AArch64::FeatureSME_TMOP}},
3941 {"lscp", {AArch64::FeatureLSCP}},
3942 {"tlbid", {AArch64::FeatureTLBID}},
3943 {"mtetc", {AArch64::FeatureMTETC}},
3944 {"gcie", {AArch64::FeatureGCIE}},
3945 {"sme2p3", {AArch64::FeatureSME2p3}},
3946 {"sve2p3", {AArch64::FeatureSVE2p3}},
3947 {"sve-b16mm", {AArch64::FeatureSVE_B16MM}},
3948 {"f16mm", {AArch64::FeatureF16MM}},
3949 {"f16f32dot", {AArch64::FeatureF16F32DOT}},
3950 {"f16f32mm", {AArch64::FeatureF16F32MM}},
3951 {"mops-go", {AArch64::FeatureMOPS_GO}},
3952 {"poe2", {AArch64::FeatureS1POE2}},
3953 {"tev", {AArch64::FeatureTEV}},
3954 {"btie", {AArch64::FeatureBTIE}},
3955 {"dit", {AArch64::FeatureDIT}},
3956 {"brbe", {AArch64::FeatureBRBE}},
3957 {"bti", {AArch64::FeatureBranchTargetId}},
3958 {"fcma", {AArch64::FeatureComplxNum}},
3959 {"jscvt", {AArch64::FeatureJS}},
3960 {"pauth-lr", {AArch64::FeaturePAuthLR}},
3961 {"ssve-fexpa", {AArch64::FeatureSSVE_FEXPA}},
3962 {"wfxt", {AArch64::FeatureWFxT}},
3964
3965static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3966 if (FBS[AArch64::HasV8_0aOps])
3967 Str += "ARMv8a";
3968 if (FBS[AArch64::HasV8_1aOps])
3969 Str += "ARMv8.1a";
3970 else if (FBS[AArch64::HasV8_2aOps])
3971 Str += "ARMv8.2a";
3972 else if (FBS[AArch64::HasV8_3aOps])
3973 Str += "ARMv8.3a";
3974 else if (FBS[AArch64::HasV8_4aOps])
3975 Str += "ARMv8.4a";
3976 else if (FBS[AArch64::HasV8_5aOps])
3977 Str += "ARMv8.5a";
3978 else if (FBS[AArch64::HasV8_6aOps])
3979 Str += "ARMv8.6a";
3980 else if (FBS[AArch64::HasV8_7aOps])
3981 Str += "ARMv8.7a";
3982 else if (FBS[AArch64::HasV8_8aOps])
3983 Str += "ARMv8.8a";
3984 else if (FBS[AArch64::HasV8_9aOps])
3985 Str += "ARMv8.9a";
3986 else if (FBS[AArch64::HasV9_0aOps])
3987 Str += "ARMv9-a";
3988 else if (FBS[AArch64::HasV9_1aOps])
3989 Str += "ARMv9.1a";
3990 else if (FBS[AArch64::HasV9_2aOps])
3991 Str += "ARMv9.2a";
3992 else if (FBS[AArch64::HasV9_3aOps])
3993 Str += "ARMv9.3a";
3994 else if (FBS[AArch64::HasV9_4aOps])
3995 Str += "ARMv9.4a";
3996 else if (FBS[AArch64::HasV9_5aOps])
3997 Str += "ARMv9.5a";
3998 else if (FBS[AArch64::HasV9_6aOps])
3999 Str += "ARMv9.6a";
4000 else if (FBS[AArch64::HasV9_7aOps])
4001 Str += "ARMv9.7a";
4002 else if (FBS[AArch64::HasV8_0rOps])
4003 Str += "ARMv8r";
4004 else {
4005 SmallVector<std::string, 2> ExtMatches;
4006 for (const auto& Ext : ExtensionMap) {
4007 // Use & in case multiple features are enabled
4008 if ((FBS & Ext.Features) != FeatureBitset())
4009 ExtMatches.push_back(Ext.Name);
4010 }
4011 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
4012 }
4013}
4014
4015void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
4016 SMLoc S) {
4017 const uint16_t Op2 = Encoding & 7;
4018 const uint16_t Cm = (Encoding & 0x78) >> 3;
4019 const uint16_t Cn = (Encoding & 0x780) >> 7;
4020 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
4021
4022 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
4023
4024 Operands.push_back(
4025 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
4026 Operands.push_back(
4027 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
4028 Operands.push_back(
4029 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
4030 Expr = MCConstantExpr::create(Op2, getContext());
4031 Operands.push_back(
4032 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
4033}
4034
4035/// parseSysAlias - The IC, DC, AT, TLBI and GIC{R} and GSB instructions are
4036/// simple aliases for the SYS instruction. Parse them specially so that we
4037/// create a SYS MCInst.
4038bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
4039 OperandVector &Operands) {
4040 if (Name.contains('.'))
4041 return TokError("invalid operand");
4042
4043 Mnemonic = Name;
4044 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
4045
4046 const AsmToken &Tok = getTok();
4047 StringRef Op = Tok.getString();
4048 SMLoc S = Tok.getLoc();
4049 bool ExpectRegister = true;
4050 bool OptionalRegister = false;
4051 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
4052 bool hasTLBID = getSTI().hasFeature(AArch64::FeatureTLBID);
4053
4054 if (Mnemonic == "ic") {
4055 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
4056 if (!IC)
4057 return TokError("invalid operand for IC instruction");
4058 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
4059 std::string Str("IC " + std::string(IC->Name) + " requires: ");
4061 return TokError(Str);
4062 }
4063 ExpectRegister = IC->NeedsReg;
4064 createSysAlias(IC->Encoding, Operands, S);
4065 } else if (Mnemonic == "dc") {
4066 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
4067 if (!DC)
4068 return TokError("invalid operand for DC instruction");
4069 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
4070 std::string Str("DC " + std::string(DC->Name) + " requires: ");
4072 return TokError(Str);
4073 }
4074 createSysAlias(DC->Encoding, Operands, S);
4075 } else if (Mnemonic == "at") {
4076 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
4077 if (!AT)
4078 return TokError("invalid operand for AT instruction");
4079 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
4080 std::string Str("AT " + std::string(AT->Name) + " requires: ");
4082 return TokError(Str);
4083 }
4084 createSysAlias(AT->Encoding, Operands, S);
4085 } else if (Mnemonic == "tlbi") {
4086 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
4087 if (!TLBI)
4088 return TokError("invalid operand for TLBI instruction");
4089 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
4090 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
4092 return TokError(Str);
4093 }
4094 ExpectRegister = TLBI->RegUse == REG_REQUIRED;
4095 if (hasAll || hasTLBID)
4096 OptionalRegister = TLBI->RegUse == REG_OPTIONAL;
4097 createSysAlias(TLBI->Encoding, Operands, S);
4098 } else if (Mnemonic == "gic") {
4099 const AArch64GIC::GIC *GIC = AArch64GIC::lookupGICByName(Op);
4100 if (!GIC)
4101 return TokError("invalid operand for GIC instruction");
4102 else if (!GIC->haveFeatures(getSTI().getFeatureBits())) {
4103 std::string Str("GIC " + std::string(GIC->Name) + " requires: ");
4105 return TokError(Str);
4106 }
4107 ExpectRegister = GIC->NeedsReg;
4108 createSysAlias(GIC->Encoding, Operands, S);
4109 } else if (Mnemonic == "gsb") {
4110 const AArch64GSB::GSB *GSB = AArch64GSB::lookupGSBByName(Op);
4111 if (!GSB)
4112 return TokError("invalid operand for GSB instruction");
4113 else if (!GSB->haveFeatures(getSTI().getFeatureBits())) {
4114 std::string Str("GSB " + std::string(GSB->Name) + " requires: ");
4116 return TokError(Str);
4117 }
4118 ExpectRegister = false;
4119 createSysAlias(GSB->Encoding, Operands, S);
4120 } else if (Mnemonic == "plbi") {
4121 const AArch64PLBI::PLBI *PLBI = AArch64PLBI::lookupPLBIByName(Op);
4122 if (!PLBI)
4123 return TokError("invalid operand for PLBI instruction");
4124 else if (!PLBI->haveFeatures(getSTI().getFeatureBits())) {
4125 std::string Str("PLBI " + std::string(PLBI->Name) + " requires: ");
4127 return TokError(Str);
4128 }
4129 ExpectRegister = PLBI->RegUse == REG_REQUIRED;
4130 if (hasAll || hasTLBID)
4131 OptionalRegister = PLBI->RegUse == REG_OPTIONAL;
4132 createSysAlias(PLBI->Encoding, Operands, S);
4133 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" ||
4134 Mnemonic == "cosp") {
4135
4136 if (Op.lower() != "rctx")
4137 return TokError("invalid operand for prediction restriction instruction");
4138
4139 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
4140 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
4141
4142 if (Mnemonic == "cosp" && !hasSpecres2)
4143 return TokError("COSP requires: predres2");
4144 if (!hasPredres)
4145 return TokError(Mnemonic.upper() + "RCTX requires: predres");
4146
4147 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
4148 : Mnemonic == "dvp" ? 0b101
4149 : Mnemonic == "cosp" ? 0b110
4150 : Mnemonic == "cpp" ? 0b111
4151 : 0;
4152 assert(PRCTX_Op2 &&
4153 "Invalid mnemonic for prediction restriction instruction");
4154 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
4155 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
4156
4157 createSysAlias(Encoding, Operands, S);
4158 }
4159
4160 Lex(); // Eat operand.
4161
4162 bool HasRegister = false;
4163
4164 // Check for the optional register operand.
4165 if (parseOptionalToken(AsmToken::Comma)) {
4166 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
4167 return TokError("expected register operand");
4168 HasRegister = true;
4169 }
4170
4171 if (!OptionalRegister) {
4172 if (ExpectRegister && !HasRegister)
4173 return TokError("specified " + Mnemonic + " op requires a register");
4174 else if (!ExpectRegister && HasRegister)
4175 return TokError("specified " + Mnemonic + " op does not use a register");
4176 }
4177
4178 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4179 return true;
4180
4181 return false;
4182}
4183
4184/// parseSyslAlias - The GICR instructions are simple aliases for
4185/// the SYSL instruction. Parse them specially so that we create a
4186/// SYS MCInst.
4187bool AArch64AsmParser::parseSyslAlias(StringRef Name, SMLoc NameLoc,
4188 OperandVector &Operands) {
4189
4190 Mnemonic = Name;
4191 Operands.push_back(
4192 AArch64Operand::CreateToken("sysl", NameLoc, getContext()));
4193
4194 // Now expect two operands (identifier + register)
4195 SMLoc startLoc = getLoc();
4196 const AsmToken &regTok = getTok();
4197 StringRef reg = regTok.getString();
4198 MCRegister Reg = matchRegisterNameAlias(reg.lower(), RegKind::Scalar);
4199 if (!Reg)
4200 return TokError("expected register operand");
4201
4202 Operands.push_back(AArch64Operand::CreateReg(
4203 Reg, RegKind::Scalar, startLoc, getLoc(), getContext(), EqualsReg));
4204
4205 Lex(); // Eat token
4206 if (parseToken(AsmToken::Comma))
4207 return true;
4208
4209 // Check for identifier
4210 const AsmToken &operandTok = getTok();
4211 StringRef Op = operandTok.getString();
4212 SMLoc S2 = operandTok.getLoc();
4213 Lex(); // Eat token
4214
4215 if (Mnemonic == "gicr") {
4216 const AArch64GICR::GICR *GICR = AArch64GICR::lookupGICRByName(Op);
4217 if (!GICR)
4218 return Error(S2, "invalid operand for GICR instruction");
4219 else if (!GICR->haveFeatures(getSTI().getFeatureBits())) {
4220 std::string Str("GICR " + std::string(GICR->Name) + " requires: ");
4222 return Error(S2, Str);
4223 }
4224 createSysAlias(GICR->Encoding, Operands, S2);
4225 }
4226
4227 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4228 return true;
4229
4230 return false;
4231}
4232
4233/// parseSyspAlias - The TLBIP instructions are simple aliases for
4234/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
4235bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4236 OperandVector &Operands) {
4237 if (Name.contains('.'))
4238 return TokError("invalid operand");
4239
4240 Mnemonic = Name;
4241 Operands.push_back(
4242 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
4243
4244 const AsmToken &Tok = getTok();
4245 StringRef Op = Tok.getString();
4246 SMLoc S = Tok.getLoc();
4247
4248 if (Mnemonic == "tlbip") {
4249 const AArch64TLBIP::TLBIP *TLBIP = AArch64TLBIP::lookupTLBIPByName(Op);
4250 if (!TLBIP)
4251 return TokError("invalid operand for TLBIP instruction");
4252
4253 if (!TLBIP->haveFeatures(getSTI().getFeatureBits())) {
4254 std::string Str("instruction requires: ");
4255 Str += TLBIP->AllowWithTLBID ? "tlbid or d128" : "d128";
4256 return TokError(Str);
4257 }
4258 createSysAlias(TLBIP->Encoding, Operands, S);
4259 }
4260
4261 Lex(); // Eat operand.
4262
4263 if (parseComma())
4264 return true;
4265
4266 if (Tok.isNot(AsmToken::Identifier))
4267 return TokError("expected register identifier");
4268 auto Result = tryParseSyspXzrPair(Operands);
4269 if (Result.isNoMatch())
4270 Result = tryParseGPRSeqPair(Operands);
4271 if (!Result.isSuccess())
4272 return TokError("specified " + Mnemonic +
4273 " op requires a pair of registers");
4274
4275 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4276 return true;
4277
4278 return false;
4279}
4280
4281ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
4282 MCAsmParser &Parser = getParser();
4283 const AsmToken &Tok = getTok();
4284
4285 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
4286 return TokError("'csync' operand expected");
4287 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4288 // Immediate operand.
4289 const MCExpr *ImmVal;
4290 SMLoc ExprLoc = getLoc();
4291 AsmToken IntTok = Tok;
4292 if (getParser().parseExpression(ImmVal))
4293 return ParseStatus::Failure;
4294 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4295 if (!MCE)
4296 return Error(ExprLoc, "immediate value expected for barrier operand");
4297 int64_t Value = MCE->getValue();
4298 if (Mnemonic == "dsb" && Value > 15) {
4299 // This case is a no match here, but it might be matched by the nXS
4300 // variant. Deliberately not unlex the optional '#' as it is not necessary
4301 // to characterize an integer immediate.
4302 Parser.getLexer().UnLex(IntTok);
4303 return ParseStatus::NoMatch;
4304 }
4305 if (Value < 0 || Value > 15)
4306 return Error(ExprLoc, "barrier operand out of range");
4307 auto DB = AArch64DB::lookupDBByEncoding(Value);
4308 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
4309 ExprLoc, getContext(),
4310 false /*hasnXSModifier*/));
4311 return ParseStatus::Success;
4312 }
4313
4314 if (Tok.isNot(AsmToken::Identifier))
4315 return TokError("invalid operand for instruction");
4316
4317 StringRef Operand = Tok.getString();
4318 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4319 auto DB = AArch64DB::lookupDBByName(Operand);
4320 // The only valid named option for ISB is 'sy'
4321 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4322 return TokError("'sy' or #imm operand expected");
4323 // The only valid named option for TSB is 'csync'
4324 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4325 return TokError("'csync' operand expected");
4326 if (!DB && !TSB) {
4327 if (Mnemonic == "dsb") {
4328 // This case is a no match here, but it might be matched by the nXS
4329 // variant.
4330 return ParseStatus::NoMatch;
4331 }
4332 return TokError("invalid barrier option name");
4333 }
4334
4335 Operands.push_back(AArch64Operand::CreateBarrier(
4336 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
4337 getContext(), false /*hasnXSModifier*/));
4338 Lex(); // Consume the option
4339
4340 return ParseStatus::Success;
4341}
4342
4343ParseStatus
4344AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4345 const AsmToken &Tok = getTok();
4346
4347 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4348 if (Mnemonic != "dsb")
4349 return ParseStatus::Failure;
4350
4351 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4352 // Immediate operand.
4353 const MCExpr *ImmVal;
4354 SMLoc ExprLoc = getLoc();
4355 if (getParser().parseExpression(ImmVal))
4356 return ParseStatus::Failure;
4357 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4358 if (!MCE)
4359 return Error(ExprLoc, "immediate value expected for barrier operand");
4360 int64_t Value = MCE->getValue();
4361 // v8.7-A DSB in the nXS variant accepts only the following immediate
4362 // values: 16, 20, 24, 28.
4363 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4364 return Error(ExprLoc, "barrier operand out of range");
4365 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4366 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4367 ExprLoc, getContext(),
4368 true /*hasnXSModifier*/));
4369 return ParseStatus::Success;
4370 }
4371
4372 if (Tok.isNot(AsmToken::Identifier))
4373 return TokError("invalid operand for instruction");
4374
4375 StringRef Operand = Tok.getString();
4376 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4377
4378 if (!DB)
4379 return TokError("invalid barrier option name");
4380
4381 Operands.push_back(
4382 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4383 getContext(), true /*hasnXSModifier*/));
4384 Lex(); // Consume the option
4385
4386 return ParseStatus::Success;
4387}
4388
4389ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4390 const AsmToken &Tok = getTok();
4391
4392 if (Tok.isNot(AsmToken::Identifier))
4393 return ParseStatus::NoMatch;
4394
4395 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4396 return ParseStatus::NoMatch;
4397
4398 int MRSReg, MSRReg;
4399 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4400 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4401 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4402 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4403 } else
4404 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4405
4406 unsigned PStateImm = -1;
4407 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4408 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4409 PStateImm = PState15->Encoding;
4410 if (!PState15) {
4411 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4412 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4413 PStateImm = PState1->Encoding;
4414 }
4415
4416 Operands.push_back(
4417 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4418 PStateImm, getContext()));
4419 Lex(); // Eat identifier
4420
4421 return ParseStatus::Success;
4422}
4423
4424ParseStatus
4425AArch64AsmParser::tryParsePHintInstOperand(OperandVector &Operands) {
4426 SMLoc S = getLoc();
4427 const AsmToken &Tok = getTok();
4428 if (Tok.isNot(AsmToken::Identifier))
4429 return TokError("invalid operand for instruction");
4430
4432 if (!PH)
4433 return TokError("invalid operand for instruction");
4434
4435 Operands.push_back(AArch64Operand::CreatePHintInst(
4436 PH->Encoding, Tok.getString(), S, getContext()));
4437 Lex(); // Eat identifier token.
4438 return ParseStatus::Success;
4439}
4440
4441/// tryParseNeonVectorRegister - Parse a vector register operand.
4442bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4443 if (getTok().isNot(AsmToken::Identifier))
4444 return true;
4445
4446 SMLoc S = getLoc();
4447 // Check for a vector register specifier first.
4448 StringRef Kind;
4449 MCRegister Reg;
4450 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4451 if (!Res.isSuccess())
4452 return true;
4453
4454 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4455 if (!KindRes)
4456 return true;
4457
4458 unsigned ElementWidth = KindRes->second;
4459 Operands.push_back(
4460 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4461 S, getLoc(), getContext()));
4462
4463 // If there was an explicit qualifier, that goes on as a literal text
4464 // operand.
4465 if (!Kind.empty())
4466 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4467
4468 return tryParseVectorIndex(Operands).isFailure();
4469}
4470
4471ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4472 SMLoc SIdx = getLoc();
4473 if (parseOptionalToken(AsmToken::LBrac)) {
4474 const MCExpr *ImmVal;
4475 if (getParser().parseExpression(ImmVal))
4476 return ParseStatus::NoMatch;
4477 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4478 if (!MCE)
4479 return TokError("immediate value expected for vector index");
4480
4481 SMLoc E = getLoc();
4482
4483 if (parseToken(AsmToken::RBrac, "']' expected"))
4484 return ParseStatus::Failure;
4485
4486 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4487 E, getContext()));
4488 return ParseStatus::Success;
4489 }
4490
4491 return ParseStatus::NoMatch;
4492}
4493
4494// tryParseVectorRegister - Try to parse a vector register name with
4495// optional kind specifier. If it is a register specifier, eat the token
4496// and return it.
4497ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4498 StringRef &Kind,
4499 RegKind MatchKind) {
4500 const AsmToken &Tok = getTok();
4501
4502 if (Tok.isNot(AsmToken::Identifier))
4503 return ParseStatus::NoMatch;
4504
4505 StringRef Name = Tok.getString();
4506 // If there is a kind specifier, it's separated from the register name by
4507 // a '.'.
4508 size_t Start = 0, Next = Name.find('.');
4509 StringRef Head = Name.slice(Start, Next);
4510 MCRegister RegNum = matchRegisterNameAlias(Head, MatchKind);
4511
4512 if (RegNum) {
4513 if (Next != StringRef::npos) {
4514 Kind = Name.substr(Next);
4515 if (!isValidVectorKind(Kind, MatchKind))
4516 return TokError("invalid vector kind qualifier");
4517 }
4518 Lex(); // Eat the register token.
4519
4520 Reg = RegNum;
4521 return ParseStatus::Success;
4522 }
4523
4524 return ParseStatus::NoMatch;
4525}
4526
4527ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4528 OperandVector &Operands) {
4529 ParseStatus Status =
4530 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4531 if (!Status.isSuccess())
4532 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4533 return Status;
4534}
4535
4536/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4537template <RegKind RK>
4538ParseStatus
4539AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4540 // Check for a SVE predicate register specifier first.
4541 const SMLoc S = getLoc();
4542 StringRef Kind;
4543 MCRegister RegNum;
4544 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4545 if (!Res.isSuccess())
4546 return Res;
4547
4548 const auto &KindRes = parseVectorKind(Kind, RK);
4549 if (!KindRes)
4550 return ParseStatus::NoMatch;
4551
4552 unsigned ElementWidth = KindRes->second;
4553 Operands.push_back(AArch64Operand::CreateVectorReg(
4554 RegNum, RK, ElementWidth, S,
4555 getLoc(), getContext()));
4556
4557 if (getLexer().is(AsmToken::LBrac)) {
4558 if (RK == RegKind::SVEPredicateAsCounter) {
4559 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4560 if (ResIndex.isSuccess())
4561 return ParseStatus::Success;
4562 } else {
4563 // Indexed predicate, there's no comma so try parse the next operand
4564 // immediately.
4565 if (parseOperand(Operands, false, false))
4566 return ParseStatus::NoMatch;
4567 }
4568 }
4569
4570 // Not all predicates are followed by a '/m' or '/z'.
4571 if (getTok().isNot(AsmToken::Slash))
4572 return ParseStatus::Success;
4573
4574 // But when they do they shouldn't have an element type suffix.
4575 if (!Kind.empty())
4576 return Error(S, "not expecting size suffix");
4577
4578 // Add a literal slash as operand
4579 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4580
4581 Lex(); // Eat the slash.
4582
4583 // Zeroing or merging?
4584 auto Pred = getTok().getString().lower();
4585 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4586 return Error(getLoc(), "expecting 'z' predication");
4587
4588 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4589 return Error(getLoc(), "expecting 'm' or 'z' predication");
4590
4591 // Add zero/merge token.
4592 const char *ZM = Pred == "z" ? "z" : "m";
4593 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4594
4595 Lex(); // Eat zero/merge token.
4596 return ParseStatus::Success;
4597}
4598
4599/// parseRegister - Parse a register operand.
4600bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4601 // Try for a Neon vector register.
4602 if (!tryParseNeonVectorRegister(Operands))
4603 return false;
4604
4605 if (tryParseZTOperand(Operands).isSuccess())
4606 return false;
4607
4608 // Otherwise try for a scalar register.
4609 if (tryParseGPROperand<false>(Operands).isSuccess())
4610 return false;
4611
4612 return true;
4613}
4614
4615bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4616 bool HasELFModifier = false;
4617 AArch64::Specifier RefKind;
4618 SMLoc Loc = getLexer().getLoc();
4619 if (parseOptionalToken(AsmToken::Colon)) {
4620 HasELFModifier = true;
4621
4622 if (getTok().isNot(AsmToken::Identifier))
4623 return TokError("expect relocation specifier in operand after ':'");
4624
4625 std::string LowerCase = getTok().getIdentifier().lower();
4626 RefKind = StringSwitch<AArch64::Specifier>(LowerCase)
4627 .Case("lo12", AArch64::S_LO12)
4628 .Case("abs_g3", AArch64::S_ABS_G3)
4629 .Case("abs_g2", AArch64::S_ABS_G2)
4630 .Case("abs_g2_s", AArch64::S_ABS_G2_S)
4631 .Case("abs_g2_nc", AArch64::S_ABS_G2_NC)
4632 .Case("abs_g1", AArch64::S_ABS_G1)
4633 .Case("abs_g1_s", AArch64::S_ABS_G1_S)
4634 .Case("abs_g1_nc", AArch64::S_ABS_G1_NC)
4635 .Case("abs_g0", AArch64::S_ABS_G0)
4636 .Case("abs_g0_s", AArch64::S_ABS_G0_S)
4637 .Case("abs_g0_nc", AArch64::S_ABS_G0_NC)
4638 .Case("prel_g3", AArch64::S_PREL_G3)
4639 .Case("prel_g2", AArch64::S_PREL_G2)
4640 .Case("prel_g2_nc", AArch64::S_PREL_G2_NC)
4641 .Case("prel_g1", AArch64::S_PREL_G1)
4642 .Case("prel_g1_nc", AArch64::S_PREL_G1_NC)
4643 .Case("prel_g0", AArch64::S_PREL_G0)
4644 .Case("prel_g0_nc", AArch64::S_PREL_G0_NC)
4645 .Case("dtprel", AArch64::S_DTPREL)
4646 .Case("dtprel_g2", AArch64::S_DTPREL_G2)
4647 .Case("dtprel_g1", AArch64::S_DTPREL_G1)
4648 .Case("dtprel_g1_nc", AArch64::S_DTPREL_G1_NC)
4649 .Case("dtprel_g0", AArch64::S_DTPREL_G0)
4650 .Case("dtprel_g0_nc", AArch64::S_DTPREL_G0_NC)
4651 .Case("dtprel_hi12", AArch64::S_DTPREL_HI12)
4652 .Case("dtprel_lo12", AArch64::S_DTPREL_LO12)
4653 .Case("dtprel_lo12_nc", AArch64::S_DTPREL_LO12_NC)
4654 .Case("pg_hi21_nc", AArch64::S_ABS_PAGE_NC)
4655 .Case("tprel_g2", AArch64::S_TPREL_G2)
4656 .Case("tprel_g1", AArch64::S_TPREL_G1)
4657 .Case("tprel_g1_nc", AArch64::S_TPREL_G1_NC)
4658 .Case("tprel_g0", AArch64::S_TPREL_G0)
4659 .Case("tprel_g0_nc", AArch64::S_TPREL_G0_NC)
4660 .Case("tprel_hi12", AArch64::S_TPREL_HI12)
4661 .Case("tprel_lo12", AArch64::S_TPREL_LO12)
4662 .Case("tprel_lo12_nc", AArch64::S_TPREL_LO12_NC)
4663 .Case("tlsdesc_lo12", AArch64::S_TLSDESC_LO12)
4664 .Case("tlsdesc_auth_lo12", AArch64::S_TLSDESC_AUTH_LO12)
4665 .Case("got", AArch64::S_GOT_PAGE)
4666 .Case("gotpage_lo15", AArch64::S_GOT_PAGE_LO15)
4667 .Case("got_lo12", AArch64::S_GOT_LO12)
4668 .Case("got_auth", AArch64::S_GOT_AUTH_PAGE)
4669 .Case("got_auth_lo12", AArch64::S_GOT_AUTH_LO12)
4670 .Case("gottprel", AArch64::S_GOTTPREL_PAGE)
4671 .Case("gottprel_lo12", AArch64::S_GOTTPREL_LO12_NC)
4672 .Case("gottprel_g1", AArch64::S_GOTTPREL_G1)
4673 .Case("gottprel_g0_nc", AArch64::S_GOTTPREL_G0_NC)
4674 .Case("tlsdesc", AArch64::S_TLSDESC_PAGE)
4675 .Case("tlsdesc_auth", AArch64::S_TLSDESC_AUTH_PAGE)
4676 .Case("secrel_lo12", AArch64::S_SECREL_LO12)
4677 .Case("secrel_hi12", AArch64::S_SECREL_HI12)
4678 .Default(AArch64::S_INVALID);
4679
4680 if (RefKind == AArch64::S_INVALID)
4681 return TokError("expect relocation specifier in operand after ':'");
4682
4683 Lex(); // Eat identifier
4684
4685 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4686 return true;
4687 }
4688
4689 if (getParser().parseExpression(ImmVal))
4690 return true;
4691
4692 if (HasELFModifier)
4693 ImmVal = MCSpecifierExpr::create(ImmVal, RefKind, getContext(), Loc);
4694
4695 SMLoc EndLoc;
4696 if (getContext().getAsmInfo()->hasSubsectionsViaSymbols()) {
4697 if (getParser().parseAtSpecifier(ImmVal, EndLoc))
4698 return true;
4699 const MCExpr *Term;
4700 MCBinaryExpr::Opcode Opcode;
4701 if (parseOptionalToken(AsmToken::Plus))
4702 Opcode = MCBinaryExpr::Add;
4703 else if (parseOptionalToken(AsmToken::Minus))
4704 Opcode = MCBinaryExpr::Sub;
4705 else
4706 return false;
4707 if (getParser().parsePrimaryExpr(Term, EndLoc))
4708 return true;
4709 ImmVal = MCBinaryExpr::create(Opcode, ImmVal, Term, getContext());
4710 }
4711
4712 return false;
4713}
4714
4715ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4716 if (getTok().isNot(AsmToken::LCurly))
4717 return ParseStatus::NoMatch;
4718
4719 auto ParseMatrixTile = [this](unsigned &Reg,
4720 unsigned &ElementWidth) -> ParseStatus {
4721 StringRef Name = getTok().getString();
4722 size_t DotPosition = Name.find('.');
4723 if (DotPosition == StringRef::npos)
4724 return ParseStatus::NoMatch;
4725
4726 unsigned RegNum = matchMatrixTileListRegName(Name);
4727 if (!RegNum)
4728 return ParseStatus::NoMatch;
4729
4730 StringRef Tail = Name.drop_front(DotPosition);
4731 const std::optional<std::pair<int, int>> &KindRes =
4732 parseVectorKind(Tail, RegKind::Matrix);
4733 if (!KindRes)
4734 return TokError(
4735 "Expected the register to be followed by element width suffix");
4736 ElementWidth = KindRes->second;
4737 Reg = RegNum;
4738 Lex(); // Eat the register.
4739 return ParseStatus::Success;
4740 };
4741
4742 SMLoc S = getLoc();
4743 auto LCurly = getTok();
4744 Lex(); // Eat left bracket token.
4745
4746 // Empty matrix list
4747 if (parseOptionalToken(AsmToken::RCurly)) {
4748 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4749 /*RegMask=*/0, S, getLoc(), getContext()));
4750 return ParseStatus::Success;
4751 }
4752
4753 // Try parse {za} alias early
4754 if (getTok().getString().equals_insensitive("za")) {
4755 Lex(); // Eat 'za'
4756
4757 if (parseToken(AsmToken::RCurly, "'}' expected"))
4758 return ParseStatus::Failure;
4759
4760 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4761 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4762 return ParseStatus::Success;
4763 }
4764
4765 SMLoc TileLoc = getLoc();
4766
4767 unsigned FirstReg, ElementWidth;
4768 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4769 if (!ParseRes.isSuccess()) {
4770 getLexer().UnLex(LCurly);
4771 return ParseRes;
4772 }
4773
4774 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4775
4776 unsigned PrevReg = FirstReg;
4777
4778 SmallSet<unsigned, 8> DRegs;
4779 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4780
4781 SmallSet<unsigned, 8> SeenRegs;
4782 SeenRegs.insert(FirstReg);
4783
4784 while (parseOptionalToken(AsmToken::Comma)) {
4785 TileLoc = getLoc();
4786 unsigned Reg, NextElementWidth;
4787 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4788 if (!ParseRes.isSuccess())
4789 return ParseRes;
4790
4791 // Element size must match on all regs in the list.
4792 if (ElementWidth != NextElementWidth)
4793 return Error(TileLoc, "mismatched register size suffix");
4794
4795 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4796 Warning(TileLoc, "tile list not in ascending order");
4797
4798 if (SeenRegs.contains(Reg))
4799 Warning(TileLoc, "duplicate tile in list");
4800 else {
4801 SeenRegs.insert(Reg);
4802 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4803 }
4804
4805 PrevReg = Reg;
4806 }
4807
4808 if (parseToken(AsmToken::RCurly, "'}' expected"))
4809 return ParseStatus::Failure;
4810
4811 unsigned RegMask = 0;
4812 for (auto Reg : DRegs)
4813 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4814 RI->getEncodingValue(AArch64::ZAD0));
4815 Operands.push_back(
4816 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4817
4818 return ParseStatus::Success;
4819}
4820
4821template <RegKind VectorKind>
4822ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4823 bool ExpectMatch) {
4824 MCAsmParser &Parser = getParser();
4825 if (!getTok().is(AsmToken::LCurly))
4826 return ParseStatus::NoMatch;
4827
4828 // Wrapper around parse function
4829 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4830 bool NoMatchIsError) -> ParseStatus {
4831 auto RegTok = getTok();
4832 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4833 if (ParseRes.isSuccess()) {
4834 if (parseVectorKind(Kind, VectorKind))
4835 return ParseRes;
4836 llvm_unreachable("Expected a valid vector kind");
4837 }
4838
4839 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4840 RegTok.getString().equals_insensitive("zt0"))
4841 return ParseStatus::NoMatch;
4842
4843 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4844 (ParseRes.isNoMatch() && NoMatchIsError &&
4845 !RegTok.getString().starts_with_insensitive("za")))
4846 return Error(Loc, "vector register expected");
4847
4848 return ParseStatus::NoMatch;
4849 };
4850
4851 unsigned NumRegs = getNumRegsForRegKind(VectorKind);
4852 SMLoc S = getLoc();
4853 auto LCurly = getTok();
4854 Lex(); // Eat left bracket token.
4855
4856 StringRef Kind;
4857 MCRegister FirstReg;
4858 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4859
4860 // Put back the original left bracket if there was no match, so that
4861 // different types of list-operands can be matched (e.g. SVE, Neon).
4862 if (ParseRes.isNoMatch())
4863 Parser.getLexer().UnLex(LCurly);
4864
4865 if (!ParseRes.isSuccess())
4866 return ParseRes;
4867
4868 MCRegister PrevReg = FirstReg;
4869 unsigned Count = 1;
4870
4871 unsigned Stride = 1;
4872 if (parseOptionalToken(AsmToken::Minus)) {
4873 SMLoc Loc = getLoc();
4874 StringRef NextKind;
4875
4876 MCRegister Reg;
4877 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4878 if (!ParseRes.isSuccess())
4879 return ParseRes;
4880
4881 // Any Kind suffices must match on all regs in the list.
4882 if (Kind != NextKind)
4883 return Error(Loc, "mismatched register size suffix");
4884
4885 unsigned Space =
4886 (PrevReg < Reg) ? (Reg - PrevReg) : (NumRegs - (PrevReg - Reg));
4887
4888 if (Space == 0 || Space > 3)
4889 return Error(Loc, "invalid number of vectors");
4890
4891 Count += Space;
4892 }
4893 else {
4894 bool HasCalculatedStride = false;
4895 while (parseOptionalToken(AsmToken::Comma)) {
4896 SMLoc Loc = getLoc();
4897 StringRef NextKind;
4898 MCRegister Reg;
4899 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4900 if (!ParseRes.isSuccess())
4901 return ParseRes;
4902
4903 // Any Kind suffices must match on all regs in the list.
4904 if (Kind != NextKind)
4905 return Error(Loc, "mismatched register size suffix");
4906
4907 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4908 unsigned PrevRegVal =
4909 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4910 if (!HasCalculatedStride) {
4911 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4912 : (NumRegs - (PrevRegVal - RegVal));
4913 HasCalculatedStride = true;
4914 }
4915
4916 // Register must be incremental (with a wraparound at last register).
4917 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4918 return Error(Loc, "registers must have the same sequential stride");
4919
4920 PrevReg = Reg;
4921 ++Count;
4922 }
4923 }
4924
4925 if (parseToken(AsmToken::RCurly, "'}' expected"))
4926 return ParseStatus::Failure;
4927
4928 if (Count > 4)
4929 return Error(S, "invalid number of vectors");
4930
4931 unsigned NumElements = 0;
4932 unsigned ElementWidth = 0;
4933 if (!Kind.empty()) {
4934 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4935 std::tie(NumElements, ElementWidth) = *VK;
4936 }
4937
4938 Operands.push_back(AArch64Operand::CreateVectorList(
4939 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4940 getLoc(), getContext()));
4941
4942 if (getTok().is(AsmToken::LBrac)) {
4943 ParseStatus Res = tryParseVectorIndex(Operands);
4944 if (Res.isFailure())
4945 return ParseStatus::Failure;
4946 return ParseStatus::Success;
4947 }
4948
4949 return ParseStatus::Success;
4950}
4951
4952/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4953bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4954 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4955 if (!ParseRes.isSuccess())
4956 return true;
4957
4958 return tryParseVectorIndex(Operands).isFailure();
4959}
4960
4961ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4962 SMLoc StartLoc = getLoc();
4963
4964 MCRegister RegNum;
4965 ParseStatus Res = tryParseScalarRegister(RegNum);
4966 if (!Res.isSuccess())
4967 return Res;
4968
4969 if (!parseOptionalToken(AsmToken::Comma)) {
4970 Operands.push_back(AArch64Operand::CreateReg(
4971 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4972 return ParseStatus::Success;
4973 }
4974
4975 parseOptionalToken(AsmToken::Hash);
4976
4977 if (getTok().isNot(AsmToken::Integer))
4978 return Error(getLoc(), "index must be absent or #0");
4979
4980 const MCExpr *ImmVal;
4981 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4982 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4983 return Error(getLoc(), "index must be absent or #0");
4984
4985 Operands.push_back(AArch64Operand::CreateReg(
4986 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4987 return ParseStatus::Success;
4988}
4989
4990ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4991 SMLoc StartLoc = getLoc();
4992 const AsmToken &Tok = getTok();
4993 std::string Name = Tok.getString().lower();
4994
4995 MCRegister Reg = matchRegisterNameAlias(Name, RegKind::LookupTable);
4996
4997 if (!Reg)
4998 return ParseStatus::NoMatch;
4999
5000 Operands.push_back(AArch64Operand::CreateReg(
5001 Reg, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
5002 Lex(); // Eat register.
5003
5004 // Check if register is followed by an index
5005 if (parseOptionalToken(AsmToken::LBrac)) {
5006 Operands.push_back(
5007 AArch64Operand::CreateToken("[", getLoc(), getContext()));
5008 const MCExpr *ImmVal;
5009 if (getParser().parseExpression(ImmVal))
5010 return ParseStatus::NoMatch;
5011 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
5012 if (!MCE)
5013 return TokError("immediate value expected for vector index");
5014 Operands.push_back(AArch64Operand::CreateImm(
5015 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
5016 getLoc(), getContext()));
5017 if (parseOptionalToken(AsmToken::Comma))
5018 if (parseOptionalMulOperand(Operands))
5019 return ParseStatus::Failure;
5020 if (parseToken(AsmToken::RBrac, "']' expected"))
5021 return ParseStatus::Failure;
5022 Operands.push_back(
5023 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5024 }
5025 return ParseStatus::Success;
5026}
5027
5028template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
5029ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
5030 SMLoc StartLoc = getLoc();
5031
5032 MCRegister RegNum;
5033 ParseStatus Res = tryParseScalarRegister(RegNum);
5034 if (!Res.isSuccess())
5035 return Res;
5036
5037 // No shift/extend is the default.
5038 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
5039 Operands.push_back(AArch64Operand::CreateReg(
5040 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
5041 return ParseStatus::Success;
5042 }
5043
5044 // Eat the comma
5045 Lex();
5046
5047 // Match the shift
5049 Res = tryParseOptionalShiftExtend(ExtOpnd);
5050 if (!Res.isSuccess())
5051 return Res;
5052
5053 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
5054 Operands.push_back(AArch64Operand::CreateReg(
5055 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
5056 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
5057 Ext->hasShiftExtendAmount()));
5058
5059 return ParseStatus::Success;
5060}
5061
5062bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
5063 MCAsmParser &Parser = getParser();
5064
5065 // Some SVE instructions have a decoration after the immediate, i.e.
5066 // "mul vl". We parse them here and add tokens, which must be present in the
5067 // asm string in the tablegen instruction.
5068 bool NextIsVL =
5069 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
5070 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
5071 if (!getTok().getString().equals_insensitive("mul") ||
5072 !(NextIsVL || NextIsHash))
5073 return true;
5074
5075 Operands.push_back(
5076 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
5077 Lex(); // Eat the "mul"
5078
5079 if (NextIsVL) {
5080 Operands.push_back(
5081 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
5082 Lex(); // Eat the "vl"
5083 return false;
5084 }
5085
5086 if (NextIsHash) {
5087 Lex(); // Eat the #
5088 SMLoc S = getLoc();
5089
5090 // Parse immediate operand.
5091 const MCExpr *ImmVal;
5092 if (!Parser.parseExpression(ImmVal))
5093 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
5094 Operands.push_back(AArch64Operand::CreateImm(
5095 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
5096 getContext()));
5097 return false;
5098 }
5099 }
5100
5101 return Error(getLoc(), "expected 'vl' or '#<imm>'");
5102}
5103
5104bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
5105 StringRef &VecGroup) {
5106 MCAsmParser &Parser = getParser();
5107 auto Tok = Parser.getTok();
5108 if (Tok.isNot(AsmToken::Identifier))
5109 return true;
5110
5111 StringRef VG = StringSwitch<StringRef>(Tok.getString().lower())
5112 .Case("vgx2", "vgx2")
5113 .Case("vgx4", "vgx4")
5114 .Default("");
5115
5116 if (VG.empty())
5117 return true;
5118
5119 VecGroup = VG;
5120 Parser.Lex(); // Eat vgx[2|4]
5121 return false;
5122}
5123
5124bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
5125 auto Tok = getTok();
5126 if (Tok.isNot(AsmToken::Identifier))
5127 return true;
5128
5129 auto Keyword = Tok.getString();
5130 Keyword = StringSwitch<StringRef>(Keyword.lower())
5131 .Case("sm", "sm")
5132 .Case("za", "za")
5133 .Default(Keyword);
5134 Operands.push_back(
5135 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
5136
5137 Lex();
5138 return false;
5139}
5140
5141/// parseOperand - Parse a arm instruction operand. For now this parses the
5142/// operand regardless of the mnemonic.
5143bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
5144 bool invertCondCode) {
5145 MCAsmParser &Parser = getParser();
5146
5147 ParseStatus ResTy =
5148 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
5149
5150 // Check if the current operand has a custom associated parser, if so, try to
5151 // custom parse the operand, or fallback to the general approach.
5152 if (ResTy.isSuccess())
5153 return false;
5154 // If there wasn't a custom match, try the generic matcher below. Otherwise,
5155 // there was a match, but an error occurred, in which case, just return that
5156 // the operand parsing failed.
5157 if (ResTy.isFailure())
5158 return true;
5159
5160 // Nothing custom, so do general case parsing.
5161 SMLoc S, E;
5162 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
5163 if (parseOptionalToken(AsmToken::Comma)) {
5164 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
5165 if (!Res.isNoMatch())
5166 return Res.isFailure();
5167 getLexer().UnLex(SavedTok);
5168 }
5169 return false;
5170 };
5171 switch (getLexer().getKind()) {
5172 default: {
5173 SMLoc S = getLoc();
5174 const MCExpr *Expr;
5175 if (parseSymbolicImmVal(Expr))
5176 return Error(S, "invalid operand");
5177
5178 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5179 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
5180 return parseOptionalShiftExtend(getTok());
5181 }
5182 case AsmToken::LBrac: {
5183 Operands.push_back(
5184 AArch64Operand::CreateToken("[", getLoc(), getContext()));
5185 Lex(); // Eat '['
5186
5187 // There's no comma after a '[', so we can parse the next operand
5188 // immediately.
5189 return parseOperand(Operands, false, false);
5190 }
5191 case AsmToken::LCurly: {
5192 if (!parseNeonVectorList(Operands))
5193 return false;
5194
5195 Operands.push_back(
5196 AArch64Operand::CreateToken("{", getLoc(), getContext()));
5197 Lex(); // Eat '{'
5198
5199 // There's no comma after a '{', so we can parse the next operand
5200 // immediately.
5201 return parseOperand(Operands, false, false);
5202 }
5203 case AsmToken::Identifier: {
5204 // See if this is a "VG" decoration used by SME instructions.
5205 StringRef VecGroup;
5206 if (!parseOptionalVGOperand(Operands, VecGroup)) {
5207 Operands.push_back(
5208 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
5209 return false;
5210 }
5211 // If we're expecting a Condition Code operand, then just parse that.
5212 if (isCondCode)
5213 return parseCondCode(Operands, invertCondCode);
5214
5215 // If it's a register name, parse it.
5216 if (!parseRegister(Operands)) {
5217 // Parse an optional shift/extend modifier.
5218 AsmToken SavedTok = getTok();
5219 if (parseOptionalToken(AsmToken::Comma)) {
5220 // The operand after the register may be a label (e.g. ADR/ADRP). Check
5221 // such cases and don't report an error when <label> happens to match a
5222 // shift/extend modifier.
5223 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
5224 /*ParseForAllFeatures=*/true);
5225 if (!Res.isNoMatch())
5226 return Res.isFailure();
5227 Res = tryParseOptionalShiftExtend(Operands);
5228 if (!Res.isNoMatch())
5229 return Res.isFailure();
5230 getLexer().UnLex(SavedTok);
5231 }
5232 return false;
5233 }
5234
5235 // See if this is a "mul vl" decoration or "mul #<int>" operand used
5236 // by SVE instructions.
5237 if (!parseOptionalMulOperand(Operands))
5238 return false;
5239
5240 // If this is a two-word mnemonic, parse its special keyword
5241 // operand as an identifier.
5242 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
5243 Mnemonic == "gcsb")
5244 return parseKeywordOperand(Operands);
5245
5246 // This was not a register so parse other operands that start with an
5247 // identifier (like labels) as expressions and create them as immediates.
5248 const MCExpr *IdVal, *Term;
5249 S = getLoc();
5250 if (getParser().parseExpression(IdVal))
5251 return true;
5252 if (getParser().parseAtSpecifier(IdVal, E))
5253 return true;
5254 std::optional<MCBinaryExpr::Opcode> Opcode;
5255 if (parseOptionalToken(AsmToken::Plus))
5256 Opcode = MCBinaryExpr::Add;
5257 else if (parseOptionalToken(AsmToken::Minus))
5258 Opcode = MCBinaryExpr::Sub;
5259 if (Opcode) {
5260 if (getParser().parsePrimaryExpr(Term, E))
5261 return true;
5262 IdVal = MCBinaryExpr::create(*Opcode, IdVal, Term, getContext());
5263 }
5264 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
5265
5266 // Parse an optional shift/extend modifier.
5267 return parseOptionalShiftExtend(getTok());
5268 }
5269 case AsmToken::Integer:
5270 case AsmToken::Real:
5271 case AsmToken::Hash: {
5272 // #42 -> immediate.
5273 S = getLoc();
5274
5275 parseOptionalToken(AsmToken::Hash);
5276
5277 // Parse a negative sign
5278 bool isNegative = false;
5279 if (getTok().is(AsmToken::Minus)) {
5280 isNegative = true;
5281 // We need to consume this token only when we have a Real, otherwise
5282 // we let parseSymbolicImmVal take care of it
5283 if (Parser.getLexer().peekTok().is(AsmToken::Real))
5284 Lex();
5285 }
5286
5287 // The only Real that should come through here is a literal #0.0 for
5288 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
5289 // so convert the value.
5290 const AsmToken &Tok = getTok();
5291 if (Tok.is(AsmToken::Real)) {
5292 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
5293 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5294 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
5295 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
5296 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
5297 return TokError("unexpected floating point literal");
5298 else if (IntVal != 0 || isNegative)
5299 return TokError("expected floating-point constant #0.0");
5300 Lex(); // Eat the token.
5301
5302 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
5303 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
5304 return false;
5305 }
5306
5307 const MCExpr *ImmVal;
5308 if (parseSymbolicImmVal(ImmVal))
5309 return true;
5310
5311 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5312 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
5313
5314 // Parse an optional shift/extend modifier.
5315 return parseOptionalShiftExtend(Tok);
5316 }
5317 case AsmToken::Equal: {
5318 SMLoc Loc = getLoc();
5319 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5320 return TokError("unexpected token in operand");
5321 Lex(); // Eat '='
5322 const MCExpr *SubExprVal;
5323 if (getParser().parseExpression(SubExprVal))
5324 return true;
5325
5326 if (Operands.size() < 2 ||
5327 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
5328 return Error(Loc, "Only valid when first operand is register");
5329
5330 bool IsXReg =
5331 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5332 Operands[1]->getReg());
5333
5334 MCContext& Ctx = getContext();
5335 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
5336 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
5337 if (isa<MCConstantExpr>(SubExprVal)) {
5338 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
5339 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
5340 while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) {
5341 ShiftAmt += 16;
5342 Imm >>= 16;
5343 }
5344 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
5345 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
5346 Operands.push_back(AArch64Operand::CreateImm(
5347 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
5348 if (ShiftAmt)
5349 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
5350 ShiftAmt, true, S, E, Ctx));
5351 return false;
5352 }
5353 APInt Simm = APInt(64, Imm << ShiftAmt);
5354 // check if the immediate is an unsigned or signed 32-bit int for W regs
5355 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
5356 return Error(Loc, "Immediate too large for register");
5357 }
5358 // If it is a label or an imm that cannot fit in a movz, put it into CP.
5359 const MCExpr *CPLoc =
5360 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
5361 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
5362 return false;
5363 }
5364 }
5365}
5366
5367bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
5368 const MCExpr *Expr = nullptr;
5369 SMLoc L = getLoc();
5370 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5371 return true;
5372 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5373 if (check(!Value, L, "expected constant expression"))
5374 return true;
5375 Out = Value->getValue();
5376 return false;
5377}
5378
5379bool AArch64AsmParser::parseComma() {
5380 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
5381 return true;
5382 // Eat the comma
5383 Lex();
5384 return false;
5385}
5386
5387bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5388 unsigned First, unsigned Last) {
5389 MCRegister Reg;
5390 SMLoc Start, End;
5391 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register"))
5392 return true;
5393
5394 // Special handling for FP and LR; they aren't linearly after x28 in
5395 // the registers enum.
5396 unsigned RangeEnd = Last;
5397 if (Base == AArch64::X0) {
5398 if (Last == AArch64::FP) {
5399 RangeEnd = AArch64::X28;
5400 if (Reg == AArch64::FP) {
5401 Out = 29;
5402 return false;
5403 }
5404 }
5405 if (Last == AArch64::LR) {
5406 RangeEnd = AArch64::X28;
5407 if (Reg == AArch64::FP) {
5408 Out = 29;
5409 return false;
5410 } else if (Reg == AArch64::LR) {
5411 Out = 30;
5412 return false;
5413 }
5414 }
5415 }
5416
5417 if (check(Reg < First || Reg > RangeEnd, Start,
5418 Twine("expected register in range ") +
5421 return true;
5422 Out = Reg - Base;
5423 return false;
5424}
5425
5426bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5427 const MCParsedAsmOperand &Op2) const {
5428 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5429 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5430
5431 if (AOp1.isVectorList() && AOp2.isVectorList())
5432 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5433 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5434 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5435
5436 if (!AOp1.isReg() || !AOp2.isReg())
5437 return false;
5438
5439 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5440 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5441 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5442
5443 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5444 "Testing equality of non-scalar registers not supported");
5445
5446 // Check if a registers match their sub/super register classes.
5447 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5448 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
5449 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5450 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
5451 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5452 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
5453 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5454 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
5455
5456 return false;
5457}
5458
5459/// Parse an AArch64 instruction mnemonic followed by its operands.
5460bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &Info,
5461 StringRef Name, SMLoc NameLoc,
5462 OperandVector &Operands) {
5463 Name = StringSwitch<StringRef>(Name.lower())
5464 .Case("beq", "b.eq")
5465 .Case("bne", "b.ne")
5466 .Case("bhs", "b.hs")
5467 .Case("bcs", "b.cs")
5468 .Case("blo", "b.lo")
5469 .Case("bcc", "b.cc")
5470 .Case("bmi", "b.mi")
5471 .Case("bpl", "b.pl")
5472 .Case("bvs", "b.vs")
5473 .Case("bvc", "b.vc")
5474 .Case("bhi", "b.hi")
5475 .Case("bls", "b.ls")
5476 .Case("bge", "b.ge")
5477 .Case("blt", "b.lt")
5478 .Case("bgt", "b.gt")
5479 .Case("ble", "b.le")
5480 .Case("bal", "b.al")
5481 .Case("bnv", "b.nv")
5482 .Default(Name);
5483
5484 // First check for the AArch64-specific .req directive.
5485 if (getTok().is(AsmToken::Identifier) &&
5486 getTok().getIdentifier().lower() == ".req") {
5487 parseDirectiveReq(Name, NameLoc);
5488 // We always return 'error' for this, as we're done with this
5489 // statement and don't need to match the 'instruction."
5490 return true;
5491 }
5492
5493 // Create the leading tokens for the mnemonic, split by '.' characters.
5494 size_t Start = 0, Next = Name.find('.');
5495 StringRef Head = Name.slice(Start, Next);
5496
5497 // IC, DC, AT, TLBI, PLBI, GIC{R}, GSB and Prediction invalidation
5498 // instructions are aliases for the SYS instruction.
5499 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5500 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp" ||
5501 Head == "plbi" || Head == "gic" || Head == "gsb")
5502 return parseSysAlias(Head, NameLoc, Operands);
5503
5504 // GICR instructions are aliases for the SYSL instruction.
5505 if (Head == "gicr")
5506 return parseSyslAlias(Head, NameLoc, Operands);
5507
5508 // TLBIP instructions are aliases for the SYSP instruction.
5509 if (Head == "tlbip")
5510 return parseSyspAlias(Head, NameLoc, Operands);
5511
5512 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
5513 Mnemonic = Head;
5514
5515 // Handle condition codes for a branch mnemonic
5516 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5517 Start = Next;
5518 Next = Name.find('.', Start + 1);
5519 Head = Name.slice(Start + 1, Next);
5520
5521 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5522 (Head.data() - Name.data()));
5523 std::string Suggestion;
5524 AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5525 if (CC == AArch64CC::Invalid) {
5526 std::string Msg = "invalid condition code";
5527 if (!Suggestion.empty())
5528 Msg += ", did you mean " + Suggestion + "?";
5529 return Error(SuffixLoc, Msg);
5530 }
5531 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5532 /*IsSuffix=*/true));
5533 Operands.push_back(
5534 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5535 }
5536
5537 // Add the remaining tokens in the mnemonic.
5538 while (Next != StringRef::npos) {
5539 Start = Next;
5540 Next = Name.find('.', Start + 1);
5541 Head = Name.slice(Start, Next);
5542 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5543 (Head.data() - Name.data()) + 1);
5544 Operands.push_back(AArch64Operand::CreateToken(
5545 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5546 }
5547
5548 // Conditional compare instructions have a Condition Code operand, which needs
5549 // to be parsed and an immediate operand created.
5550 bool condCodeFourthOperand =
5551 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5552 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5553 Head == "csinc" || Head == "csinv" || Head == "csneg");
5554
5555 // These instructions are aliases to some of the conditional select
5556 // instructions. However, the condition code is inverted in the aliased
5557 // instruction.
5558 //
5559 // FIXME: Is this the correct way to handle these? Or should the parser
5560 // generate the aliased instructions directly?
5561 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5562 bool condCodeThirdOperand =
5563 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5564
5565 // Read the remaining operands.
5566 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5567
5568 unsigned N = 1;
5569 do {
5570 // Parse and remember the operand.
5571 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5572 (N == 3 && condCodeThirdOperand) ||
5573 (N == 2 && condCodeSecondOperand),
5574 condCodeSecondOperand || condCodeThirdOperand)) {
5575 return true;
5576 }
5577
5578 // After successfully parsing some operands there are three special cases
5579 // to consider (i.e. notional operands not separated by commas). Two are
5580 // due to memory specifiers:
5581 // + An RBrac will end an address for load/store/prefetch
5582 // + An '!' will indicate a pre-indexed operation.
5583 //
5584 // And a further case is '}', which ends a group of tokens specifying the
5585 // SME accumulator array 'ZA' or tile vector, i.e.
5586 //
5587 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5588 //
5589 // It's someone else's responsibility to make sure these tokens are sane
5590 // in the given context!
5591
5592 if (parseOptionalToken(AsmToken::RBrac))
5593 Operands.push_back(
5594 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5595 if (parseOptionalToken(AsmToken::Exclaim))
5596 Operands.push_back(
5597 AArch64Operand::CreateToken("!", getLoc(), getContext()));
5598 if (parseOptionalToken(AsmToken::RCurly))
5599 Operands.push_back(
5600 AArch64Operand::CreateToken("}", getLoc(), getContext()));
5601
5602 ++N;
5603 } while (parseOptionalToken(AsmToken::Comma));
5604 }
5605
5606 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5607 return true;
5608
5609 return false;
5610}
5611
5612static inline bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg) {
5613 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5614 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5615 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5616 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5617 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5618 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5619 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5620}
5621
5622static bool isMovPrfxable(unsigned TSFlags) {
5623 unsigned Flags = TSFlags & AArch64::DestructiveInstTypeMask;
5624 return Flags != AArch64::NotDestructive &&
5626}
5627
5628// FIXME: This entire function is a giant hack to provide us with decent
5629// operand range validation/diagnostics until TableGen/MC can be extended
5630// to support autogeneration of this kind of validation.
5631bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5632 SmallVectorImpl<SMLoc> &Loc) {
5633 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5634 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5635
5636 // A prefix only applies to the instruction following it. Here we extract
5637 // prefix information for the next instruction before validating the current
5638 // one so that in the case of failure we don't erroneously continue using the
5639 // current prefix.
5640 PrefixInfo Prefix = NextPrefix;
5641 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5642
5643 // Before validating the instruction in isolation we run through the rules
5644 // applicable when it follows a prefix instruction.
5645 // NOTE: brk & hlt can be prefixed but require no additional validation.
5646 if (Prefix.isActive() &&
5647 (Inst.getOpcode() != AArch64::BRK) &&
5648 (Inst.getOpcode() != AArch64::HLT)) {
5649
5650 // Prefixed instructions must have a destructive operand.
5651 if (!isMovPrfxable(MCID.TSFlags))
5652 return Error(IDLoc, "instruction is unpredictable when following a"
5653 " movprfx, suggest replacing movprfx with mov");
5654
5655 // Destination operands must match.
5656 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5657 return Error(Loc[0], "instruction is unpredictable when following a"
5658 " movprfx writing to a different destination");
5659
5660 // Destination operand must not be used in any other location.
5661 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5662 if (Inst.getOperand(i).isReg() &&
5663 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5664 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5665 return Error(Loc[0], "instruction is unpredictable when following a"
5666 " movprfx and destination also used as non-destructive"
5667 " source");
5668 }
5669
5670 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5671 if (Prefix.isPredicated()) {
5672 int PgIdx = -1;
5673
5674 // Find the instructions general predicate.
5675 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5676 if (Inst.getOperand(i).isReg() &&
5677 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5678 PgIdx = i;
5679 break;
5680 }
5681
5682 // Instruction must be predicated if the movprfx is predicated.
5683 if (PgIdx == -1 ||
5685 return Error(IDLoc, "instruction is unpredictable when following a"
5686 " predicated movprfx, suggest using unpredicated movprfx");
5687
5688 // Instruction must use same general predicate as the movprfx.
5689 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5690 return Error(IDLoc, "instruction is unpredictable when following a"
5691 " predicated movprfx using a different general predicate");
5692
5693 // Instruction element type must match the movprfx.
5694 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5695 return Error(IDLoc, "instruction is unpredictable when following a"
5696 " predicated movprfx with a different element size");
5697 }
5698 }
5699
5700 // On ARM64EC, only valid registers may be used. Warn against using
5701 // explicitly disallowed registers.
5702 if (IsWindowsArm64EC) {
5703 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
5704 if (Inst.getOperand(i).isReg()) {
5705 MCRegister Reg = Inst.getOperand(i).getReg();
5706 // At this point, vector registers are matched to their
5707 // appropriately sized alias.
5708 if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
5709 (Reg == AArch64::W14 || Reg == AArch64::X14) ||
5710 (Reg == AArch64::W23 || Reg == AArch64::X23) ||
5711 (Reg == AArch64::W24 || Reg == AArch64::X24) ||
5712 (Reg == AArch64::W28 || Reg == AArch64::X28) ||
5713 (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) ||
5714 (Reg >= AArch64::D16 && Reg <= AArch64::D31) ||
5715 (Reg >= AArch64::S16 && Reg <= AArch64::S31) ||
5716 (Reg >= AArch64::H16 && Reg <= AArch64::H31) ||
5717 (Reg >= AArch64::B16 && Reg <= AArch64::B31)) {
5718 Warning(IDLoc, "register " + Twine(RI->getName(Reg)) +
5719 " is disallowed on ARM64EC.");
5720 }
5721 }
5722 }
5723 }
5724
5725 // Check for indexed addressing modes w/ the base register being the
5726 // same as a destination/source register or pair load where
5727 // the Rt == Rt2. All of those are undefined behaviour.
5728 switch (Inst.getOpcode()) {
5729 case AArch64::LDPSWpre:
5730 case AArch64::LDPWpost:
5731 case AArch64::LDPWpre:
5732 case AArch64::LDPXpost:
5733 case AArch64::LDPXpre: {
5734 MCRegister Rt = Inst.getOperand(1).getReg();
5735 MCRegister Rt2 = Inst.getOperand(2).getReg();
5736 MCRegister Rn = Inst.getOperand(3).getReg();
5737 if (RI->isSubRegisterEq(Rn, Rt))
5738 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5739 "is also a destination");
5740 if (RI->isSubRegisterEq(Rn, Rt2))
5741 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5742 "is also a destination");
5743 [[fallthrough]];
5744 }
5745 case AArch64::LDR_ZA:
5746 case AArch64::STR_ZA: {
5747 if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() &&
5748 Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm())
5749 return Error(Loc[1],
5750 "unpredictable instruction, immediate and offset mismatch.");
5751 break;
5752 }
5753 case AArch64::LDPDi:
5754 case AArch64::LDPQi:
5755 case AArch64::LDPSi:
5756 case AArch64::LDPSWi:
5757 case AArch64::LDPWi:
5758 case AArch64::LDPXi: {
5759 MCRegister Rt = Inst.getOperand(0).getReg();
5760 MCRegister Rt2 = Inst.getOperand(1).getReg();
5761 if (Rt == Rt2)
5762 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5763 break;
5764 }
5765 case AArch64::LDPDpost:
5766 case AArch64::LDPDpre:
5767 case AArch64::LDPQpost:
5768 case AArch64::LDPQpre:
5769 case AArch64::LDPSpost:
5770 case AArch64::LDPSpre:
5771 case AArch64::LDPSWpost: {
5772 MCRegister Rt = Inst.getOperand(1).getReg();
5773 MCRegister Rt2 = Inst.getOperand(2).getReg();
5774 if (Rt == Rt2)
5775 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5776 break;
5777 }
5778 case AArch64::STPDpost:
5779 case AArch64::STPDpre:
5780 case AArch64::STPQpost:
5781 case AArch64::STPQpre:
5782 case AArch64::STPSpost:
5783 case AArch64::STPSpre:
5784 case AArch64::STPWpost:
5785 case AArch64::STPWpre:
5786 case AArch64::STPXpost:
5787 case AArch64::STPXpre: {
5788 MCRegister Rt = Inst.getOperand(1).getReg();
5789 MCRegister Rt2 = Inst.getOperand(2).getReg();
5790 MCRegister Rn = Inst.getOperand(3).getReg();
5791 if (RI->isSubRegisterEq(Rn, Rt))
5792 return Error(Loc[0], "unpredictable STP instruction, writeback base "
5793 "is also a source");
5794 if (RI->isSubRegisterEq(Rn, Rt2))
5795 return Error(Loc[1], "unpredictable STP instruction, writeback base "
5796 "is also a source");
5797 break;
5798 }
5799 case AArch64::LDRBBpre:
5800 case AArch64::LDRBpre:
5801 case AArch64::LDRHHpre:
5802 case AArch64::LDRHpre:
5803 case AArch64::LDRSBWpre:
5804 case AArch64::LDRSBXpre:
5805 case AArch64::LDRSHWpre:
5806 case AArch64::LDRSHXpre:
5807 case AArch64::LDRSWpre:
5808 case AArch64::LDRWpre:
5809 case AArch64::LDRXpre:
5810 case AArch64::LDRBBpost:
5811 case AArch64::LDRBpost:
5812 case AArch64::LDRHHpost:
5813 case AArch64::LDRHpost:
5814 case AArch64::LDRSBWpost:
5815 case AArch64::LDRSBXpost:
5816 case AArch64::LDRSHWpost:
5817 case AArch64::LDRSHXpost:
5818 case AArch64::LDRSWpost:
5819 case AArch64::LDRWpost:
5820 case AArch64::LDRXpost: {
5821 MCRegister Rt = Inst.getOperand(1).getReg();
5822 MCRegister Rn = Inst.getOperand(2).getReg();
5823 if (RI->isSubRegisterEq(Rn, Rt))
5824 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5825 "is also a source");
5826 break;
5827 }
5828 case AArch64::STRBBpost:
5829 case AArch64::STRBpost:
5830 case AArch64::STRHHpost:
5831 case AArch64::STRHpost:
5832 case AArch64::STRWpost:
5833 case AArch64::STRXpost:
5834 case AArch64::STRBBpre:
5835 case AArch64::STRBpre:
5836 case AArch64::STRHHpre:
5837 case AArch64::STRHpre:
5838 case AArch64::STRWpre:
5839 case AArch64::STRXpre: {
5840 MCRegister Rt = Inst.getOperand(1).getReg();
5841 MCRegister Rn = Inst.getOperand(2).getReg();
5842 if (RI->isSubRegisterEq(Rn, Rt))
5843 return Error(Loc[0], "unpredictable STR instruction, writeback base "
5844 "is also a source");
5845 break;
5846 }
5847 case AArch64::STXRB:
5848 case AArch64::STXRH:
5849 case AArch64::STXRW:
5850 case AArch64::STXRX:
5851 case AArch64::STLXRB:
5852 case AArch64::STLXRH:
5853 case AArch64::STLXRW:
5854 case AArch64::STLXRX: {
5855 MCRegister Rs = Inst.getOperand(0).getReg();
5856 MCRegister Rt = Inst.getOperand(1).getReg();
5857 MCRegister Rn = Inst.getOperand(2).getReg();
5858 if (RI->isSubRegisterEq(Rt, Rs) ||
5859 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5860 return Error(Loc[0],
5861 "unpredictable STXR instruction, status is also a source");
5862 break;
5863 }
5864 case AArch64::STXPW:
5865 case AArch64::STXPX:
5866 case AArch64::STLXPW:
5867 case AArch64::STLXPX: {
5868 MCRegister Rs = Inst.getOperand(0).getReg();
5869 MCRegister Rt1 = Inst.getOperand(1).getReg();
5870 MCRegister Rt2 = Inst.getOperand(2).getReg();
5871 MCRegister Rn = Inst.getOperand(3).getReg();
5872 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5873 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5874 return Error(Loc[0],
5875 "unpredictable STXP instruction, status is also a source");
5876 break;
5877 }
5878 case AArch64::LDRABwriteback:
5879 case AArch64::LDRAAwriteback: {
5880 MCRegister Xt = Inst.getOperand(0).getReg();
5881 MCRegister Xn = Inst.getOperand(1).getReg();
5882 if (Xt == Xn)
5883 return Error(Loc[0],
5884 "unpredictable LDRA instruction, writeback base"
5885 " is also a destination");
5886 break;
5887 }
5888 }
5889
5890 // Check v8.8-A memops instructions.
5891 switch (Inst.getOpcode()) {
5892 case AArch64::CPYFP:
5893 case AArch64::CPYFPWN:
5894 case AArch64::CPYFPRN:
5895 case AArch64::CPYFPN:
5896 case AArch64::CPYFPWT:
5897 case AArch64::CPYFPWTWN:
5898 case AArch64::CPYFPWTRN:
5899 case AArch64::CPYFPWTN:
5900 case AArch64::CPYFPRT:
5901 case AArch64::CPYFPRTWN:
5902 case AArch64::CPYFPRTRN:
5903 case AArch64::CPYFPRTN:
5904 case AArch64::CPYFPT:
5905 case AArch64::CPYFPTWN:
5906 case AArch64::CPYFPTRN:
5907 case AArch64::CPYFPTN:
5908 case AArch64::CPYFM:
5909 case AArch64::CPYFMWN:
5910 case AArch64::CPYFMRN:
5911 case AArch64::CPYFMN:
5912 case AArch64::CPYFMWT:
5913 case AArch64::CPYFMWTWN:
5914 case AArch64::CPYFMWTRN:
5915 case AArch64::CPYFMWTN:
5916 case AArch64::CPYFMRT:
5917 case AArch64::CPYFMRTWN:
5918 case AArch64::CPYFMRTRN:
5919 case AArch64::CPYFMRTN:
5920 case AArch64::CPYFMT:
5921 case AArch64::CPYFMTWN:
5922 case AArch64::CPYFMTRN:
5923 case AArch64::CPYFMTN:
5924 case AArch64::CPYFE:
5925 case AArch64::CPYFEWN:
5926 case AArch64::CPYFERN:
5927 case AArch64::CPYFEN:
5928 case AArch64::CPYFEWT:
5929 case AArch64::CPYFEWTWN:
5930 case AArch64::CPYFEWTRN:
5931 case AArch64::CPYFEWTN:
5932 case AArch64::CPYFERT:
5933 case AArch64::CPYFERTWN:
5934 case AArch64::CPYFERTRN:
5935 case AArch64::CPYFERTN:
5936 case AArch64::CPYFET:
5937 case AArch64::CPYFETWN:
5938 case AArch64::CPYFETRN:
5939 case AArch64::CPYFETN:
5940 case AArch64::CPYP:
5941 case AArch64::CPYPWN:
5942 case AArch64::CPYPRN:
5943 case AArch64::CPYPN:
5944 case AArch64::CPYPWT:
5945 case AArch64::CPYPWTWN:
5946 case AArch64::CPYPWTRN:
5947 case AArch64::CPYPWTN:
5948 case AArch64::CPYPRT:
5949 case AArch64::CPYPRTWN:
5950 case AArch64::CPYPRTRN:
5951 case AArch64::CPYPRTN:
5952 case AArch64::CPYPT:
5953 case AArch64::CPYPTWN:
5954 case AArch64::CPYPTRN:
5955 case AArch64::CPYPTN:
5956 case AArch64::CPYM:
5957 case AArch64::CPYMWN:
5958 case AArch64::CPYMRN:
5959 case AArch64::CPYMN:
5960 case AArch64::CPYMWT:
5961 case AArch64::CPYMWTWN:
5962 case AArch64::CPYMWTRN:
5963 case AArch64::CPYMWTN:
5964 case AArch64::CPYMRT:
5965 case AArch64::CPYMRTWN:
5966 case AArch64::CPYMRTRN:
5967 case AArch64::CPYMRTN:
5968 case AArch64::CPYMT:
5969 case AArch64::CPYMTWN:
5970 case AArch64::CPYMTRN:
5971 case AArch64::CPYMTN:
5972 case AArch64::CPYE:
5973 case AArch64::CPYEWN:
5974 case AArch64::CPYERN:
5975 case AArch64::CPYEN:
5976 case AArch64::CPYEWT:
5977 case AArch64::CPYEWTWN:
5978 case AArch64::CPYEWTRN:
5979 case AArch64::CPYEWTN:
5980 case AArch64::CPYERT:
5981 case AArch64::CPYERTWN:
5982 case AArch64::CPYERTRN:
5983 case AArch64::CPYERTN:
5984 case AArch64::CPYET:
5985 case AArch64::CPYETWN:
5986 case AArch64::CPYETRN:
5987 case AArch64::CPYETN: {
5988 // Xd_wb == op0, Xs_wb == op1, Xn_wb == op2
5989 MCRegister Xd = Inst.getOperand(3).getReg();
5990 MCRegister Xs = Inst.getOperand(4).getReg();
5991 MCRegister Xn = Inst.getOperand(5).getReg();
5992
5993 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
5994 assert(Xs == Inst.getOperand(1).getReg() && "Xs_wb and Xs do not match");
5995 assert(Xn == Inst.getOperand(2).getReg() && "Xn_wb and Xn do not match");
5996
5997 if (Xd == Xs)
5998 return Error(Loc[0], "invalid CPY instruction, destination and source"
5999 " registers are the same");
6000 if (Xd == Xn)
6001 return Error(Loc[0], "invalid CPY instruction, destination and size"
6002 " registers are the same");
6003 if (Xs == Xn)
6004 return Error(Loc[0], "invalid CPY instruction, source and size"
6005 " registers are the same");
6006 break;
6007 }
6008 case AArch64::SETP:
6009 case AArch64::SETPT:
6010 case AArch64::SETPN:
6011 case AArch64::SETPTN:
6012 case AArch64::SETM:
6013 case AArch64::SETMT:
6014 case AArch64::SETMN:
6015 case AArch64::SETMTN:
6016 case AArch64::SETE:
6017 case AArch64::SETET:
6018 case AArch64::SETEN:
6019 case AArch64::SETETN:
6020 case AArch64::SETGP:
6021 case AArch64::SETGPT:
6022 case AArch64::SETGPN:
6023 case AArch64::SETGPTN:
6024 case AArch64::SETGM:
6025 case AArch64::SETGMT:
6026 case AArch64::SETGMN:
6027 case AArch64::SETGMTN:
6028 case AArch64::MOPSSETGE:
6029 case AArch64::MOPSSETGET:
6030 case AArch64::MOPSSETGEN:
6031 case AArch64::MOPSSETGETN: {
6032 // Xd_wb == op0, Xn_wb == op1
6033 MCRegister Xd = Inst.getOperand(2).getReg();
6034 MCRegister Xn = Inst.getOperand(3).getReg();
6035 MCRegister Xm = Inst.getOperand(4).getReg();
6036
6037 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6038 assert(Xn == Inst.getOperand(1).getReg() && "Xn_wb and Xn do not match");
6039
6040 if (Xd == Xn)
6041 return Error(Loc[0], "invalid SET instruction, destination and size"
6042 " registers are the same");
6043 if (Xd == Xm)
6044 return Error(Loc[0], "invalid SET instruction, destination and source"
6045 " registers are the same");
6046 if (Xn == Xm)
6047 return Error(Loc[0], "invalid SET instruction, source and size"
6048 " registers are the same");
6049 break;
6050 }
6051 case AArch64::SETGOP:
6052 case AArch64::SETGOPT:
6053 case AArch64::SETGOPN:
6054 case AArch64::SETGOPTN:
6055 case AArch64::SETGOM:
6056 case AArch64::SETGOMT:
6057 case AArch64::SETGOMN:
6058 case AArch64::SETGOMTN:
6059 case AArch64::SETGOE:
6060 case AArch64::SETGOET:
6061 case AArch64::SETGOEN:
6062 case AArch64::SETGOETN: {
6063 // Xd_wb == op0, Xn_wb == op1
6064 MCRegister Xd = Inst.getOperand(2).getReg();
6065 MCRegister Xn = Inst.getOperand(3).getReg();
6066
6067 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6068 assert(Xn == Inst.getOperand(1).getReg() && "Xn_wb and Xn do not match");
6069
6070 if (Xd == Xn)
6071 return Error(Loc[0], "invalid SET instruction, destination and size"
6072 " registers are the same");
6073 break;
6074 }
6075 }
6076
6077 // Now check immediate ranges. Separate from the above as there is overlap
6078 // in the instructions being checked and this keeps the nested conditionals
6079 // to a minimum.
6080 switch (Inst.getOpcode()) {
6081 case AArch64::ADDSWri:
6082 case AArch64::ADDSXri:
6083 case AArch64::ADDWri:
6084 case AArch64::ADDXri:
6085 case AArch64::SUBSWri:
6086 case AArch64::SUBSXri:
6087 case AArch64::SUBWri:
6088 case AArch64::SUBXri: {
6089 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
6090 // some slight duplication here.
6091 if (Inst.getOperand(2).isExpr()) {
6092 const MCExpr *Expr = Inst.getOperand(2).getExpr();
6093 AArch64::Specifier ELFSpec;
6094 AArch64::Specifier DarwinSpec;
6095 int64_t Addend;
6096 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
6097
6098 // Only allow these with ADDXri.
6099 if ((DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
6100 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) &&
6101 Inst.getOpcode() == AArch64::ADDXri)
6102 return false;
6103
6104 // Only allow these with ADDXri/ADDWri
6112 ELFSpec) &&
6113 (Inst.getOpcode() == AArch64::ADDXri ||
6114 Inst.getOpcode() == AArch64::ADDWri))
6115 return false;
6116
6117 // Don't allow symbol refs in the immediate field otherwise
6118 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
6119 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
6120 // 'cmp w0, 'borked')
6121 return Error(Loc.back(), "invalid immediate expression");
6122 }
6123 // We don't validate more complex expressions here
6124 }
6125 return false;
6126 }
6127 default:
6128 return false;
6129 }
6130}
6131
6133 const FeatureBitset &FBS,
6134 unsigned VariantID = 0);
6135
6136bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
6138 OperandVector &Operands) {
6139 switch (ErrCode) {
6140 case Match_InvalidTiedOperand: {
6141 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
6142 if (Op.isVectorList())
6143 return Error(Loc, "operand must match destination register list");
6144
6145 assert(Op.isReg() && "Unexpected operand type");
6146 switch (Op.getRegEqualityTy()) {
6147 case RegConstraintEqualityTy::EqualsSubReg:
6148 return Error(Loc, "operand must be 64-bit form of destination register");
6149 case RegConstraintEqualityTy::EqualsSuperReg:
6150 return Error(Loc, "operand must be 32-bit form of destination register");
6151 case RegConstraintEqualityTy::EqualsReg:
6152 return Error(Loc, "operand must match destination register");
6153 }
6154 llvm_unreachable("Unknown RegConstraintEqualityTy");
6155 }
6156 case Match_MissingFeature:
6157 return Error(Loc,
6158 "instruction requires a CPU feature not currently enabled");
6159 case Match_InvalidOperand:
6160 return Error(Loc, "invalid operand for instruction");
6161 case Match_InvalidSuffix:
6162 return Error(Loc, "invalid type suffix for instruction");
6163 case Match_InvalidCondCode:
6164 return Error(Loc, "expected AArch64 condition code");
6165 case Match_AddSubRegExtendSmall:
6166 return Error(Loc,
6167 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
6168 case Match_AddSubRegExtendLarge:
6169 return Error(Loc,
6170 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
6171 case Match_AddSubSecondSource:
6172 return Error(Loc,
6173 "expected compatible register, symbol or integer in range [0, 4095]");
6174 case Match_LogicalSecondSource:
6175 return Error(Loc, "expected compatible register or logical immediate");
6176 case Match_InvalidMovImm32Shift:
6177 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
6178 case Match_InvalidMovImm64Shift:
6179 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
6180 case Match_AddSubRegShift32:
6181 return Error(Loc,
6182 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
6183 case Match_AddSubRegShift64:
6184 return Error(Loc,
6185 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
6186 case Match_InvalidFPImm:
6187 return Error(Loc,
6188 "expected compatible register or floating-point constant");
6189 case Match_InvalidMemoryIndexedSImm6:
6190 return Error(Loc, "index must be an integer in range [-32, 31].");
6191 case Match_InvalidMemoryIndexedSImm5:
6192 return Error(Loc, "index must be an integer in range [-16, 15].");
6193 case Match_InvalidMemoryIndexed1SImm4:
6194 return Error(Loc, "index must be an integer in range [-8, 7].");
6195 case Match_InvalidMemoryIndexed2SImm4:
6196 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
6197 case Match_InvalidMemoryIndexed3SImm4:
6198 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
6199 case Match_InvalidMemoryIndexed4SImm4:
6200 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
6201 case Match_InvalidMemoryIndexed16SImm4:
6202 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
6203 case Match_InvalidMemoryIndexed32SImm4:
6204 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
6205 case Match_InvalidMemoryIndexed1SImm6:
6206 return Error(Loc, "index must be an integer in range [-32, 31].");
6207 case Match_InvalidMemoryIndexedSImm8:
6208 return Error(Loc, "index must be an integer in range [-128, 127].");
6209 case Match_InvalidMemoryIndexedSImm9:
6210 return Error(Loc, "index must be an integer in range [-256, 255].");
6211 case Match_InvalidMemoryIndexed16SImm9:
6212 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
6213 case Match_InvalidMemoryIndexed8SImm10:
6214 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
6215 case Match_InvalidMemoryIndexed4SImm7:
6216 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
6217 case Match_InvalidMemoryIndexed8SImm7:
6218 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
6219 case Match_InvalidMemoryIndexed16SImm7:
6220 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
6221 case Match_InvalidMemoryIndexed8UImm5:
6222 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
6223 case Match_InvalidMemoryIndexed8UImm3:
6224 return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
6225 case Match_InvalidMemoryIndexed4UImm5:
6226 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
6227 case Match_InvalidMemoryIndexed2UImm5:
6228 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
6229 case Match_InvalidMemoryIndexed8UImm6:
6230 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
6231 case Match_InvalidMemoryIndexed16UImm6:
6232 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
6233 case Match_InvalidMemoryIndexed4UImm6:
6234 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
6235 case Match_InvalidMemoryIndexed2UImm6:
6236 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
6237 case Match_InvalidMemoryIndexed1UImm6:
6238 return Error(Loc, "index must be in range [0, 63].");
6239 case Match_InvalidMemoryWExtend8:
6240 return Error(Loc,
6241 "expected 'uxtw' or 'sxtw' with optional shift of #0");
6242 case Match_InvalidMemoryWExtend16:
6243 return Error(Loc,
6244 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
6245 case Match_InvalidMemoryWExtend32:
6246 return Error(Loc,
6247 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
6248 case Match_InvalidMemoryWExtend64:
6249 return Error(Loc,
6250 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
6251 case Match_InvalidMemoryWExtend128:
6252 return Error(Loc,
6253 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
6254 case Match_InvalidMemoryXExtend8:
6255 return Error(Loc,
6256 "expected 'lsl' or 'sxtx' with optional shift of #0");
6257 case Match_InvalidMemoryXExtend16:
6258 return Error(Loc,
6259 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
6260 case Match_InvalidMemoryXExtend32:
6261 return Error(Loc,
6262 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
6263 case Match_InvalidMemoryXExtend64:
6264 return Error(Loc,
6265 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
6266 case Match_InvalidMemoryXExtend128:
6267 return Error(Loc,
6268 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
6269 case Match_InvalidMemoryIndexed1:
6270 return Error(Loc, "index must be an integer in range [0, 4095].");
6271 case Match_InvalidMemoryIndexed2:
6272 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
6273 case Match_InvalidMemoryIndexed4:
6274 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
6275 case Match_InvalidMemoryIndexed8:
6276 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
6277 case Match_InvalidMemoryIndexed16:
6278 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
6279 case Match_InvalidImm0_0:
6280 return Error(Loc, "immediate must be 0.");
6281 case Match_InvalidImm0_1:
6282 return Error(Loc, "immediate must be an integer in range [0, 1].");
6283 case Match_InvalidImm0_3:
6284 return Error(Loc, "immediate must be an integer in range [0, 3].");
6285 case Match_InvalidImm0_7:
6286 return Error(Loc, "immediate must be an integer in range [0, 7].");
6287 case Match_InvalidImm0_15:
6288 return Error(Loc, "immediate must be an integer in range [0, 15].");
6289 case Match_InvalidImm0_31:
6290 return Error(Loc, "immediate must be an integer in range [0, 31].");
6291 case Match_InvalidImm0_63:
6292 return Error(Loc, "immediate must be an integer in range [0, 63].");
6293 case Match_InvalidImm0_127:
6294 return Error(Loc, "immediate must be an integer in range [0, 127].");
6295 case Match_InvalidImm0_255:
6296 return Error(Loc, "immediate must be an integer in range [0, 255].");
6297 case Match_InvalidImm0_65535:
6298 return Error(Loc, "immediate must be an integer in range [0, 65535].");
6299 case Match_InvalidImm1_8:
6300 return Error(Loc, "immediate must be an integer in range [1, 8].");
6301 case Match_InvalidImm1_16:
6302 return Error(Loc, "immediate must be an integer in range [1, 16].");
6303 case Match_InvalidImm1_32:
6304 return Error(Loc, "immediate must be an integer in range [1, 32].");
6305 case Match_InvalidImm1_64:
6306 return Error(Loc, "immediate must be an integer in range [1, 64].");
6307 case Match_InvalidImmM1_62:
6308 return Error(Loc, "immediate must be an integer in range [-1, 62].");
6309 case Match_InvalidMemoryIndexedRange2UImm0:
6310 return Error(Loc, "vector select offset must be the immediate range 0:1.");
6311 case Match_InvalidMemoryIndexedRange2UImm1:
6312 return Error(Loc, "vector select offset must be an immediate range of the "
6313 "form <immf>:<imml>, where the first "
6314 "immediate is a multiple of 2 in the range [0, 2], and "
6315 "the second immediate is immf + 1.");
6316 case Match_InvalidMemoryIndexedRange2UImm2:
6317 case Match_InvalidMemoryIndexedRange2UImm3:
6318 return Error(
6319 Loc,
6320 "vector select offset must be an immediate range of the form "
6321 "<immf>:<imml>, "
6322 "where the first immediate is a multiple of 2 in the range [0, 6] or "
6323 "[0, 14] "
6324 "depending on the instruction, and the second immediate is immf + 1.");
6325 case Match_InvalidMemoryIndexedRange4UImm0:
6326 return Error(Loc, "vector select offset must be the immediate range 0:3.");
6327 case Match_InvalidMemoryIndexedRange4UImm1:
6328 case Match_InvalidMemoryIndexedRange4UImm2:
6329 return Error(
6330 Loc,
6331 "vector select offset must be an immediate range of the form "
6332 "<immf>:<imml>, "
6333 "where the first immediate is a multiple of 4 in the range [0, 4] or "
6334 "[0, 12] "
6335 "depending on the instruction, and the second immediate is immf + 3.");
6336 case Match_InvalidSVEAddSubImm8:
6337 return Error(Loc, "immediate must be an integer in range [0, 255]"
6338 " with a shift amount of 0");
6339 case Match_InvalidSVEAddSubImm16:
6340 case Match_InvalidSVEAddSubImm32:
6341 case Match_InvalidSVEAddSubImm64:
6342 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
6343 "multiple of 256 in range [256, 65280]");
6344 case Match_InvalidSVECpyImm8:
6345 return Error(Loc, "immediate must be an integer in range [-128, 255]"
6346 " with a shift amount of 0");
6347 case Match_InvalidSVECpyImm16:
6348 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6349 "multiple of 256 in range [-32768, 65280]");
6350 case Match_InvalidSVECpyImm32:
6351 case Match_InvalidSVECpyImm64:
6352 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6353 "multiple of 256 in range [-32768, 32512]");
6354 case Match_InvalidIndexRange0_0:
6355 return Error(Loc, "expected lane specifier '[0]'");
6356 case Match_InvalidIndexRange1_1:
6357 return Error(Loc, "expected lane specifier '[1]'");
6358 case Match_InvalidIndexRange0_15:
6359 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6360 case Match_InvalidIndexRange0_7:
6361 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6362 case Match_InvalidIndexRange0_3:
6363 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6364 case Match_InvalidIndexRange0_1:
6365 return Error(Loc, "vector lane must be an integer in range [0, 1].");
6366 case Match_InvalidSVEIndexRange0_63:
6367 return Error(Loc, "vector lane must be an integer in range [0, 63].");
6368 case Match_InvalidSVEIndexRange0_31:
6369 return Error(Loc, "vector lane must be an integer in range [0, 31].");
6370 case Match_InvalidSVEIndexRange0_15:
6371 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6372 case Match_InvalidSVEIndexRange0_7:
6373 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6374 case Match_InvalidSVEIndexRange0_3:
6375 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6376 case Match_InvalidLabel:
6377 return Error(Loc, "expected label or encodable integer pc offset");
6378 case Match_MRS:
6379 return Error(Loc, "expected readable system register");
6380 case Match_MSR:
6381 case Match_InvalidSVCR:
6382 return Error(Loc, "expected writable system register or pstate");
6383 case Match_InvalidComplexRotationEven:
6384 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
6385 case Match_InvalidComplexRotationOdd:
6386 return Error(Loc, "complex rotation must be 90 or 270.");
6387 case Match_MnemonicFail: {
6388 std::string Suggestion = AArch64MnemonicSpellCheck(
6389 ((AArch64Operand &)*Operands[0]).getToken(),
6390 ComputeAvailableFeatures(STI->getFeatureBits()));
6391 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
6392 }
6393 case Match_InvalidGPR64shifted8:
6394 return Error(Loc, "register must be x0..x30 or xzr, without shift");
6395 case Match_InvalidGPR64shifted16:
6396 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
6397 case Match_InvalidGPR64shifted32:
6398 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
6399 case Match_InvalidGPR64shifted64:
6400 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
6401 case Match_InvalidGPR64shifted128:
6402 return Error(
6403 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
6404 case Match_InvalidGPR64NoXZRshifted8:
6405 return Error(Loc, "register must be x0..x30 without shift");
6406 case Match_InvalidGPR64NoXZRshifted16:
6407 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
6408 case Match_InvalidGPR64NoXZRshifted32:
6409 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
6410 case Match_InvalidGPR64NoXZRshifted64:
6411 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
6412 case Match_InvalidGPR64NoXZRshifted128:
6413 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
6414 case Match_InvalidZPR32UXTW8:
6415 case Match_InvalidZPR32SXTW8:
6416 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6417 case Match_InvalidZPR32UXTW16:
6418 case Match_InvalidZPR32SXTW16:
6419 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6420 case Match_InvalidZPR32UXTW32:
6421 case Match_InvalidZPR32SXTW32:
6422 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6423 case Match_InvalidZPR32UXTW64:
6424 case Match_InvalidZPR32SXTW64:
6425 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6426 case Match_InvalidZPR64UXTW8:
6427 case Match_InvalidZPR64SXTW8:
6428 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6429 case Match_InvalidZPR64UXTW16:
6430 case Match_InvalidZPR64SXTW16:
6431 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6432 case Match_InvalidZPR64UXTW32:
6433 case Match_InvalidZPR64SXTW32:
6434 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6435 case Match_InvalidZPR64UXTW64:
6436 case Match_InvalidZPR64SXTW64:
6437 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6438 case Match_InvalidZPR32LSL8:
6439 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
6440 case Match_InvalidZPR32LSL16:
6441 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6442 case Match_InvalidZPR32LSL32:
6443 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6444 case Match_InvalidZPR32LSL64:
6445 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6446 case Match_InvalidZPR64LSL8:
6447 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
6448 case Match_InvalidZPR64LSL16:
6449 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6450 case Match_InvalidZPR64LSL32:
6451 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6452 case Match_InvalidZPR64LSL64:
6453 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6454 case Match_InvalidZPR0:
6455 return Error(Loc, "expected register without element width suffix");
6456 case Match_InvalidZPR8:
6457 case Match_InvalidZPR16:
6458 case Match_InvalidZPR32:
6459 case Match_InvalidZPR64:
6460 case Match_InvalidZPR128:
6461 return Error(Loc, "invalid element width");
6462 case Match_InvalidZPR_3b8:
6463 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
6464 case Match_InvalidZPR_3b16:
6465 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
6466 case Match_InvalidZPR_3b32:
6467 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
6468 case Match_InvalidZPR_4b8:
6469 return Error(Loc,
6470 "Invalid restricted vector register, expected z0.b..z15.b");
6471 case Match_InvalidZPR_4b16:
6472 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
6473 case Match_InvalidZPR_4b32:
6474 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
6475 case Match_InvalidZPR_4b64:
6476 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
6477 case Match_InvalidZPRMul2_Lo8:
6478 return Error(Loc, "Invalid restricted vector register, expected even "
6479 "register in z0.b..z14.b");
6480 case Match_InvalidZPRMul2_Hi8:
6481 return Error(Loc, "Invalid restricted vector register, expected even "
6482 "register in z16.b..z30.b");
6483 case Match_InvalidZPRMul2_Lo16:
6484 return Error(Loc, "Invalid restricted vector register, expected even "
6485 "register in z0.h..z14.h");
6486 case Match_InvalidZPRMul2_Hi16:
6487 return Error(Loc, "Invalid restricted vector register, expected even "
6488 "register in z16.h..z30.h");
6489 case Match_InvalidZPRMul2_Lo32:
6490 return Error(Loc, "Invalid restricted vector register, expected even "
6491 "register in z0.s..z14.s");
6492 case Match_InvalidZPRMul2_Hi32:
6493 return Error(Loc, "Invalid restricted vector register, expected even "
6494 "register in z16.s..z30.s");
6495 case Match_InvalidZPRMul2_Lo64:
6496 return Error(Loc, "Invalid restricted vector register, expected even "
6497 "register in z0.d..z14.d");
6498 case Match_InvalidZPRMul2_Hi64:
6499 return Error(Loc, "Invalid restricted vector register, expected even "
6500 "register in z16.d..z30.d");
6501 case Match_InvalidZPR_K0:
6502 return Error(Loc, "invalid restricted vector register, expected register "
6503 "in z20..z23 or z28..z31");
6504 case Match_InvalidSVEPattern:
6505 return Error(Loc, "invalid predicate pattern");
6506 case Match_InvalidSVEPPRorPNRAnyReg:
6507 case Match_InvalidSVEPPRorPNRBReg:
6508 case Match_InvalidSVEPredicateAnyReg:
6509 case Match_InvalidSVEPredicateBReg:
6510 case Match_InvalidSVEPredicateHReg:
6511 case Match_InvalidSVEPredicateSReg:
6512 case Match_InvalidSVEPredicateDReg:
6513 return Error(Loc, "invalid predicate register.");
6514 case Match_InvalidSVEPredicate3bAnyReg:
6515 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6516 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6517 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6518 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6519 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6520 return Error(Loc, "Invalid predicate register, expected PN in range "
6521 "pn8..pn15 with element suffix.");
6522 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6523 return Error(Loc, "invalid restricted predicate-as-counter register "
6524 "expected pn8..pn15");
6525 case Match_InvalidSVEPNPredicateBReg:
6526 case Match_InvalidSVEPNPredicateHReg:
6527 case Match_InvalidSVEPNPredicateSReg:
6528 case Match_InvalidSVEPNPredicateDReg:
6529 return Error(Loc, "Invalid predicate register, expected PN in range "
6530 "pn0..pn15 with element suffix.");
6531 case Match_InvalidSVEVecLenSpecifier:
6532 return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
6533 case Match_InvalidSVEPredicateListMul2x8:
6534 case Match_InvalidSVEPredicateListMul2x16:
6535 case Match_InvalidSVEPredicateListMul2x32:
6536 case Match_InvalidSVEPredicateListMul2x64:
6537 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6538 "predicate registers, where the first vector is a multiple of 2 "
6539 "and with correct element type");
6540 case Match_InvalidSVEExactFPImmOperandHalfOne:
6541 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
6542 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6543 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
6544 case Match_InvalidSVEExactFPImmOperandZeroOne:
6545 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
6546 case Match_InvalidMatrixTileVectorH8:
6547 case Match_InvalidMatrixTileVectorV8:
6548 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
6549 case Match_InvalidMatrixTileVectorH16:
6550 case Match_InvalidMatrixTileVectorV16:
6551 return Error(Loc,
6552 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6553 case Match_InvalidMatrixTileVectorH32:
6554 case Match_InvalidMatrixTileVectorV32:
6555 return Error(Loc,
6556 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6557 case Match_InvalidMatrixTileVectorH64:
6558 case Match_InvalidMatrixTileVectorV64:
6559 return Error(Loc,
6560 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6561 case Match_InvalidMatrixTileVectorH128:
6562 case Match_InvalidMatrixTileVectorV128:
6563 return Error(Loc,
6564 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6565 case Match_InvalidMatrixTile16:
6566 return Error(Loc, "invalid matrix operand, expected za[0-1].h");
6567 case Match_InvalidMatrixTile32:
6568 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
6569 case Match_InvalidMatrixTile64:
6570 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
6571 case Match_InvalidMatrix:
6572 return Error(Loc, "invalid matrix operand, expected za");
6573 case Match_InvalidMatrix8:
6574 return Error(Loc, "invalid matrix operand, expected suffix .b");
6575 case Match_InvalidMatrix16:
6576 return Error(Loc, "invalid matrix operand, expected suffix .h");
6577 case Match_InvalidMatrix32:
6578 return Error(Loc, "invalid matrix operand, expected suffix .s");
6579 case Match_InvalidMatrix64:
6580 return Error(Loc, "invalid matrix operand, expected suffix .d");
6581 case Match_InvalidMatrixIndexGPR32_12_15:
6582 return Error(Loc, "operand must be a register in range [w12, w15]");
6583 case Match_InvalidMatrixIndexGPR32_8_11:
6584 return Error(Loc, "operand must be a register in range [w8, w11]");
6585 case Match_InvalidSVEVectorList2x8Mul2:
6586 case Match_InvalidSVEVectorList2x16Mul2:
6587 case Match_InvalidSVEVectorList2x32Mul2:
6588 case Match_InvalidSVEVectorList2x64Mul2:
6589 case Match_InvalidSVEVectorList2x128Mul2:
6590 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6591 "SVE vectors, where the first vector is a multiple of 2 "
6592 "and with matching element types");
6593 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6594 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6595 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6596 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6597 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6598 "SVE vectors in the range z0-z14, where the first vector "
6599 "is a multiple of 2 "
6600 "and with matching element types");
6601 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6602 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6603 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6604 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6605 return Error(Loc,
6606 "Invalid vector list, expected list with 2 consecutive "
6607 "SVE vectors in the range z16-z30, where the first vector "
6608 "is a multiple of 2 "
6609 "and with matching element types");
6610 case Match_InvalidSVEVectorList4x8Mul4:
6611 case Match_InvalidSVEVectorList4x16Mul4:
6612 case Match_InvalidSVEVectorList4x32Mul4:
6613 case Match_InvalidSVEVectorList4x64Mul4:
6614 case Match_InvalidSVEVectorList4x128Mul4:
6615 return Error(Loc, "Invalid vector list, expected list with 4 consecutive "
6616 "SVE vectors, where the first vector is a multiple of 4 "
6617 "and with matching element types");
6618 case Match_InvalidLookupTable:
6619 return Error(Loc, "Invalid lookup table, expected zt0");
6620 case Match_InvalidSVEVectorListStrided2x8:
6621 case Match_InvalidSVEVectorListStrided2x16:
6622 case Match_InvalidSVEVectorListStrided2x32:
6623 case Match_InvalidSVEVectorListStrided2x64:
6624 return Error(
6625 Loc,
6626 "Invalid vector list, expected list with each SVE vector in the list "
6627 "8 registers apart, and the first register in the range [z0, z7] or "
6628 "[z16, z23] and with correct element type");
6629 case Match_InvalidSVEVectorListStrided4x8:
6630 case Match_InvalidSVEVectorListStrided4x16:
6631 case Match_InvalidSVEVectorListStrided4x32:
6632 case Match_InvalidSVEVectorListStrided4x64:
6633 return Error(
6634 Loc,
6635 "Invalid vector list, expected list with each SVE vector in the list "
6636 "4 registers apart, and the first register in the range [z0, z3] or "
6637 "[z16, z19] and with correct element type");
6638 case Match_AddSubLSLImm3ShiftLarge:
6639 return Error(Loc,
6640 "expected 'lsl' with optional integer in range [0, 7]");
6641 default:
6642 llvm_unreachable("unexpected error code!");
6643 }
6644}
6645
6646static const char *getSubtargetFeatureName(uint64_t Val);
6647
6648bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6649 OperandVector &Operands,
6650 MCStreamer &Out,
6652 bool MatchingInlineAsm) {
6653 assert(!Operands.empty() && "Unexpected empty operand list!");
6654 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6655 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6656
6657 StringRef Tok = Op.getToken();
6658 unsigned NumOperands = Operands.size();
6659
6660 if (NumOperands == 4 && Tok == "lsl") {
6661 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6662 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6663 if (Op2.isScalarReg() && Op3.isImm()) {
6664 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6665 if (Op3CE) {
6666 uint64_t Op3Val = Op3CE->getValue();
6667 uint64_t NewOp3Val = 0;
6668 uint64_t NewOp4Val = 0;
6669 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6670 Op2.getReg())) {
6671 NewOp3Val = (32 - Op3Val) & 0x1f;
6672 NewOp4Val = 31 - Op3Val;
6673 } else {
6674 NewOp3Val = (64 - Op3Val) & 0x3f;
6675 NewOp4Val = 63 - Op3Val;
6676 }
6677
6678 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
6679 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
6680
6681 Operands[0] =
6682 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
6683 Operands.push_back(AArch64Operand::CreateImm(
6684 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
6685 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6686 Op3.getEndLoc(), getContext());
6687 }
6688 }
6689 } else if (NumOperands == 4 && Tok == "bfc") {
6690 // FIXME: Horrible hack to handle BFC->BFM alias.
6691 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6692 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6693 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6694
6695 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6696 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
6697 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
6698
6699 if (LSBCE && WidthCE) {
6700 uint64_t LSB = LSBCE->getValue();
6701 uint64_t Width = WidthCE->getValue();
6702
6703 uint64_t RegWidth = 0;
6704 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6705 Op1.getReg()))
6706 RegWidth = 64;
6707 else
6708 RegWidth = 32;
6709
6710 if (LSB >= RegWidth)
6711 return Error(LSBOp.getStartLoc(),
6712 "expected integer in range [0, 31]");
6713 if (Width < 1 || Width > RegWidth)
6714 return Error(WidthOp.getStartLoc(),
6715 "expected integer in range [1, 32]");
6716
6717 uint64_t ImmR = 0;
6718 if (RegWidth == 32)
6719 ImmR = (32 - LSB) & 0x1f;
6720 else
6721 ImmR = (64 - LSB) & 0x3f;
6722
6723 uint64_t ImmS = Width - 1;
6724
6725 if (ImmR != 0 && ImmS >= ImmR)
6726 return Error(WidthOp.getStartLoc(),
6727 "requested insert overflows register");
6728
6729 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
6730 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
6731 Operands[0] =
6732 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
6733 Operands[2] = AArch64Operand::CreateReg(
6734 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6735 SMLoc(), SMLoc(), getContext());
6736 Operands[3] = AArch64Operand::CreateImm(
6737 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
6738 Operands.emplace_back(
6739 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6740 WidthOp.getEndLoc(), getContext()));
6741 }
6742 }
6743 } else if (NumOperands == 5) {
6744 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6745 // UBFIZ -> UBFM aliases.
6746 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6747 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6748 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6749 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6750
6751 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6752 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6753 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6754
6755 if (Op3CE && Op4CE) {
6756 uint64_t Op3Val = Op3CE->getValue();
6757 uint64_t Op4Val = Op4CE->getValue();
6758
6759 uint64_t RegWidth = 0;
6760 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6761 Op1.getReg()))
6762 RegWidth = 64;
6763 else
6764 RegWidth = 32;
6765
6766 if (Op3Val >= RegWidth)
6767 return Error(Op3.getStartLoc(),
6768 "expected integer in range [0, 31]");
6769 if (Op4Val < 1 || Op4Val > RegWidth)
6770 return Error(Op4.getStartLoc(),
6771 "expected integer in range [1, 32]");
6772
6773 uint64_t NewOp3Val = 0;
6774 if (RegWidth == 32)
6775 NewOp3Val = (32 - Op3Val) & 0x1f;
6776 else
6777 NewOp3Val = (64 - Op3Val) & 0x3f;
6778
6779 uint64_t NewOp4Val = Op4Val - 1;
6780
6781 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6782 return Error(Op4.getStartLoc(),
6783 "requested insert overflows register");
6784
6785 const MCExpr *NewOp3 =
6786 MCConstantExpr::create(NewOp3Val, getContext());
6787 const MCExpr *NewOp4 =
6788 MCConstantExpr::create(NewOp4Val, getContext());
6789 Operands[3] = AArch64Operand::CreateImm(
6790 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
6791 Operands[4] = AArch64Operand::CreateImm(
6792 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6793 if (Tok == "bfi")
6794 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6795 getContext());
6796 else if (Tok == "sbfiz")
6797 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6798 getContext());
6799 else if (Tok == "ubfiz")
6800 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6801 getContext());
6802 else
6803 llvm_unreachable("No valid mnemonic for alias?");
6804 }
6805 }
6806
6807 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6808 // UBFX -> UBFM aliases.
6809 } else if (NumOperands == 5 &&
6810 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6811 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6812 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6813 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6814
6815 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6816 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6817 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6818
6819 if (Op3CE && Op4CE) {
6820 uint64_t Op3Val = Op3CE->getValue();
6821 uint64_t Op4Val = Op4CE->getValue();
6822
6823 uint64_t RegWidth = 0;
6824 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6825 Op1.getReg()))
6826 RegWidth = 64;
6827 else
6828 RegWidth = 32;
6829
6830 if (Op3Val >= RegWidth)
6831 return Error(Op3.getStartLoc(),
6832 "expected integer in range [0, 31]");
6833 if (Op4Val < 1 || Op4Val > RegWidth)
6834 return Error(Op4.getStartLoc(),
6835 "expected integer in range [1, 32]");
6836
6837 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6838
6839 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6840 return Error(Op4.getStartLoc(),
6841 "requested extract overflows register");
6842
6843 const MCExpr *NewOp4 =
6844 MCConstantExpr::create(NewOp4Val, getContext());
6845 Operands[4] = AArch64Operand::CreateImm(
6846 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6847 if (Tok == "bfxil")
6848 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6849 getContext());
6850 else if (Tok == "sbfx")
6851 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6852 getContext());
6853 else if (Tok == "ubfx")
6854 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6855 getContext());
6856 else
6857 llvm_unreachable("No valid mnemonic for alias?");
6858 }
6859 }
6860 }
6861 }
6862
6863 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6864 // instruction for FP registers correctly in some rare circumstances. Convert
6865 // it to a safe instruction and warn (because silently changing someone's
6866 // assembly is rude).
6867 if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6868 NumOperands == 4 && Tok == "movi") {
6869 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6870 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6871 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6872 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6873 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6874 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6875 if (Suffix.lower() == ".2d" &&
6876 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
6877 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
6878 " correctly on this CPU, converting to equivalent movi.16b");
6879 // Switch the suffix to .16b.
6880 unsigned Idx = Op1.isToken() ? 1 : 2;
6881 Operands[Idx] =
6882 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
6883 }
6884 }
6885 }
6886
6887 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6888 // InstAlias can't quite handle this since the reg classes aren't
6889 // subclasses.
6890 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6891 // The source register can be Wn here, but the matcher expects a
6892 // GPR64. Twiddle it here if necessary.
6893 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6894 if (Op.isScalarReg()) {
6895 MCRegister Reg = getXRegFromWReg(Op.getReg());
6896 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6897 Op.getStartLoc(), Op.getEndLoc(),
6898 getContext());
6899 }
6900 }
6901 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6902 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6903 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6904 if (Op.isScalarReg() &&
6905 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6906 Op.getReg())) {
6907 // The source register can be Wn here, but the matcher expects a
6908 // GPR64. Twiddle it here if necessary.
6909 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6910 if (Op.isScalarReg()) {
6911 MCRegister Reg = getXRegFromWReg(Op.getReg());
6912 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6913 Op.getStartLoc(),
6914 Op.getEndLoc(), getContext());
6915 }
6916 }
6917 }
6918 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6919 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6920 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6921 if (Op.isScalarReg() &&
6922 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6923 Op.getReg())) {
6924 // The source register can be Wn here, but the matcher expects a
6925 // GPR32. Twiddle it here if necessary.
6926 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6927 if (Op.isScalarReg()) {
6928 MCRegister Reg = getWRegFromXReg(Op.getReg());
6929 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6930 Op.getStartLoc(),
6931 Op.getEndLoc(), getContext());
6932 }
6933 }
6934 }
6935
6936 MCInst Inst;
6937 FeatureBitset MissingFeatures;
6938 // First try to match against the secondary set of tables containing the
6939 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6940 unsigned MatchResult =
6941 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6942 MatchingInlineAsm, 1);
6943
6944 // If that fails, try against the alternate table containing long-form NEON:
6945 // "fadd v0.2s, v1.2s, v2.2s"
6946 if (MatchResult != Match_Success) {
6947 // But first, save the short-form match result: we can use it in case the
6948 // long-form match also fails.
6949 auto ShortFormNEONErrorInfo = ErrorInfo;
6950 auto ShortFormNEONMatchResult = MatchResult;
6951 auto ShortFormNEONMissingFeatures = MissingFeatures;
6952
6953 MatchResult =
6954 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6955 MatchingInlineAsm, 0);
6956
6957 // Now, both matches failed, and the long-form match failed on the mnemonic
6958 // suffix token operand. The short-form match failure is probably more
6959 // relevant: use it instead.
6960 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6961 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6962 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6963 MatchResult = ShortFormNEONMatchResult;
6964 ErrorInfo = ShortFormNEONErrorInfo;
6965 MissingFeatures = ShortFormNEONMissingFeatures;
6966 }
6967 }
6968
6969 switch (MatchResult) {
6970 case Match_Success: {
6971 // Perform range checking and other semantic validations
6972 SmallVector<SMLoc, 8> OperandLocs;
6973 NumOperands = Operands.size();
6974 for (unsigned i = 1; i < NumOperands; ++i)
6975 OperandLocs.push_back(Operands[i]->getStartLoc());
6976 if (validateInstruction(Inst, IDLoc, OperandLocs))
6977 return true;
6978
6979 Inst.setLoc(IDLoc);
6980 Out.emitInstruction(Inst, getSTI());
6981 return false;
6982 }
6983 case Match_MissingFeature: {
6984 assert(MissingFeatures.any() && "Unknown missing feature!");
6985 // Special case the error message for the very common case where only
6986 // a single subtarget feature is missing (neon, e.g.).
6987 std::string Msg = "instruction requires:";
6988 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
6989 if (MissingFeatures[i]) {
6990 Msg += " ";
6991 Msg += getSubtargetFeatureName(i);
6992 }
6993 }
6994 return Error(IDLoc, Msg);
6995 }
6996 case Match_MnemonicFail:
6997 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
6998 case Match_InvalidOperand: {
6999 SMLoc ErrorLoc = IDLoc;
7000
7001 if (ErrorInfo != ~0ULL) {
7002 if (ErrorInfo >= Operands.size())
7003 return Error(IDLoc, "too few operands for instruction",
7004 SMRange(IDLoc, getTok().getLoc()));
7005
7006 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7007 if (ErrorLoc == SMLoc())
7008 ErrorLoc = IDLoc;
7009 }
7010 // If the match failed on a suffix token operand, tweak the diagnostic
7011 // accordingly.
7012 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
7013 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
7014 MatchResult = Match_InvalidSuffix;
7015
7016 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
7017 }
7018 case Match_InvalidTiedOperand:
7019 case Match_InvalidMemoryIndexed1:
7020 case Match_InvalidMemoryIndexed2:
7021 case Match_InvalidMemoryIndexed4:
7022 case Match_InvalidMemoryIndexed8:
7023 case Match_InvalidMemoryIndexed16:
7024 case Match_InvalidCondCode:
7025 case Match_AddSubLSLImm3ShiftLarge:
7026 case Match_AddSubRegExtendSmall:
7027 case Match_AddSubRegExtendLarge:
7028 case Match_AddSubSecondSource:
7029 case Match_LogicalSecondSource:
7030 case Match_AddSubRegShift32:
7031 case Match_AddSubRegShift64:
7032 case Match_InvalidMovImm32Shift:
7033 case Match_InvalidMovImm64Shift:
7034 case Match_InvalidFPImm:
7035 case Match_InvalidMemoryWExtend8:
7036 case Match_InvalidMemoryWExtend16:
7037 case Match_InvalidMemoryWExtend32:
7038 case Match_InvalidMemoryWExtend64:
7039 case Match_InvalidMemoryWExtend128:
7040 case Match_InvalidMemoryXExtend8:
7041 case Match_InvalidMemoryXExtend16:
7042 case Match_InvalidMemoryXExtend32:
7043 case Match_InvalidMemoryXExtend64:
7044 case Match_InvalidMemoryXExtend128:
7045 case Match_InvalidMemoryIndexed1SImm4:
7046 case Match_InvalidMemoryIndexed2SImm4:
7047 case Match_InvalidMemoryIndexed3SImm4:
7048 case Match_InvalidMemoryIndexed4SImm4:
7049 case Match_InvalidMemoryIndexed1SImm6:
7050 case Match_InvalidMemoryIndexed16SImm4:
7051 case Match_InvalidMemoryIndexed32SImm4:
7052 case Match_InvalidMemoryIndexed4SImm7:
7053 case Match_InvalidMemoryIndexed8SImm7:
7054 case Match_InvalidMemoryIndexed16SImm7:
7055 case Match_InvalidMemoryIndexed8UImm5:
7056 case Match_InvalidMemoryIndexed8UImm3:
7057 case Match_InvalidMemoryIndexed4UImm5:
7058 case Match_InvalidMemoryIndexed2UImm5:
7059 case Match_InvalidMemoryIndexed1UImm6:
7060 case Match_InvalidMemoryIndexed2UImm6:
7061 case Match_InvalidMemoryIndexed4UImm6:
7062 case Match_InvalidMemoryIndexed8UImm6:
7063 case Match_InvalidMemoryIndexed16UImm6:
7064 case Match_InvalidMemoryIndexedSImm6:
7065 case Match_InvalidMemoryIndexedSImm5:
7066 case Match_InvalidMemoryIndexedSImm8:
7067 case Match_InvalidMemoryIndexedSImm9:
7068 case Match_InvalidMemoryIndexed16SImm9:
7069 case Match_InvalidMemoryIndexed8SImm10:
7070 case Match_InvalidImm0_0:
7071 case Match_InvalidImm0_1:
7072 case Match_InvalidImm0_3:
7073 case Match_InvalidImm0_7:
7074 case Match_InvalidImm0_15:
7075 case Match_InvalidImm0_31:
7076 case Match_InvalidImm0_63:
7077 case Match_InvalidImm0_127:
7078 case Match_InvalidImm0_255:
7079 case Match_InvalidImm0_65535:
7080 case Match_InvalidImm1_8:
7081 case Match_InvalidImm1_16:
7082 case Match_InvalidImm1_32:
7083 case Match_InvalidImm1_64:
7084 case Match_InvalidImmM1_62:
7085 case Match_InvalidMemoryIndexedRange2UImm0:
7086 case Match_InvalidMemoryIndexedRange2UImm1:
7087 case Match_InvalidMemoryIndexedRange2UImm2:
7088 case Match_InvalidMemoryIndexedRange2UImm3:
7089 case Match_InvalidMemoryIndexedRange4UImm0:
7090 case Match_InvalidMemoryIndexedRange4UImm1:
7091 case Match_InvalidMemoryIndexedRange4UImm2:
7092 case Match_InvalidSVEAddSubImm8:
7093 case Match_InvalidSVEAddSubImm16:
7094 case Match_InvalidSVEAddSubImm32:
7095 case Match_InvalidSVEAddSubImm64:
7096 case Match_InvalidSVECpyImm8:
7097 case Match_InvalidSVECpyImm16:
7098 case Match_InvalidSVECpyImm32:
7099 case Match_InvalidSVECpyImm64:
7100 case Match_InvalidIndexRange0_0:
7101 case Match_InvalidIndexRange1_1:
7102 case Match_InvalidIndexRange0_15:
7103 case Match_InvalidIndexRange0_7:
7104 case Match_InvalidIndexRange0_3:
7105 case Match_InvalidIndexRange0_1:
7106 case Match_InvalidSVEIndexRange0_63:
7107 case Match_InvalidSVEIndexRange0_31:
7108 case Match_InvalidSVEIndexRange0_15:
7109 case Match_InvalidSVEIndexRange0_7:
7110 case Match_InvalidSVEIndexRange0_3:
7111 case Match_InvalidLabel:
7112 case Match_InvalidComplexRotationEven:
7113 case Match_InvalidComplexRotationOdd:
7114 case Match_InvalidGPR64shifted8:
7115 case Match_InvalidGPR64shifted16:
7116 case Match_InvalidGPR64shifted32:
7117 case Match_InvalidGPR64shifted64:
7118 case Match_InvalidGPR64shifted128:
7119 case Match_InvalidGPR64NoXZRshifted8:
7120 case Match_InvalidGPR64NoXZRshifted16:
7121 case Match_InvalidGPR64NoXZRshifted32:
7122 case Match_InvalidGPR64NoXZRshifted64:
7123 case Match_InvalidGPR64NoXZRshifted128:
7124 case Match_InvalidZPR32UXTW8:
7125 case Match_InvalidZPR32UXTW16:
7126 case Match_InvalidZPR32UXTW32:
7127 case Match_InvalidZPR32UXTW64:
7128 case Match_InvalidZPR32SXTW8:
7129 case Match_InvalidZPR32SXTW16:
7130 case Match_InvalidZPR32SXTW32:
7131 case Match_InvalidZPR32SXTW64:
7132 case Match_InvalidZPR64UXTW8:
7133 case Match_InvalidZPR64SXTW8:
7134 case Match_InvalidZPR64UXTW16:
7135 case Match_InvalidZPR64SXTW16:
7136 case Match_InvalidZPR64UXTW32:
7137 case Match_InvalidZPR64SXTW32:
7138 case Match_InvalidZPR64UXTW64:
7139 case Match_InvalidZPR64SXTW64:
7140 case Match_InvalidZPR32LSL8:
7141 case Match_InvalidZPR32LSL16:
7142 case Match_InvalidZPR32LSL32:
7143 case Match_InvalidZPR32LSL64:
7144 case Match_InvalidZPR64LSL8:
7145 case Match_InvalidZPR64LSL16:
7146 case Match_InvalidZPR64LSL32:
7147 case Match_InvalidZPR64LSL64:
7148 case Match_InvalidZPR0:
7149 case Match_InvalidZPR8:
7150 case Match_InvalidZPR16:
7151 case Match_InvalidZPR32:
7152 case Match_InvalidZPR64:
7153 case Match_InvalidZPR128:
7154 case Match_InvalidZPR_3b8:
7155 case Match_InvalidZPR_3b16:
7156 case Match_InvalidZPR_3b32:
7157 case Match_InvalidZPR_4b8:
7158 case Match_InvalidZPR_4b16:
7159 case Match_InvalidZPR_4b32:
7160 case Match_InvalidZPR_4b64:
7161 case Match_InvalidSVEPPRorPNRAnyReg:
7162 case Match_InvalidSVEPPRorPNRBReg:
7163 case Match_InvalidSVEPredicateAnyReg:
7164 case Match_InvalidSVEPattern:
7165 case Match_InvalidSVEVecLenSpecifier:
7166 case Match_InvalidSVEPredicateBReg:
7167 case Match_InvalidSVEPredicateHReg:
7168 case Match_InvalidSVEPredicateSReg:
7169 case Match_InvalidSVEPredicateDReg:
7170 case Match_InvalidSVEPredicate3bAnyReg:
7171 case Match_InvalidSVEPNPredicateB_p8to15Reg:
7172 case Match_InvalidSVEPNPredicateH_p8to15Reg:
7173 case Match_InvalidSVEPNPredicateS_p8to15Reg:
7174 case Match_InvalidSVEPNPredicateD_p8to15Reg:
7175 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
7176 case Match_InvalidSVEPNPredicateBReg:
7177 case Match_InvalidSVEPNPredicateHReg:
7178 case Match_InvalidSVEPNPredicateSReg:
7179 case Match_InvalidSVEPNPredicateDReg:
7180 case Match_InvalidSVEPredicateListMul2x8:
7181 case Match_InvalidSVEPredicateListMul2x16:
7182 case Match_InvalidSVEPredicateListMul2x32:
7183 case Match_InvalidSVEPredicateListMul2x64:
7184 case Match_InvalidSVEExactFPImmOperandHalfOne:
7185 case Match_InvalidSVEExactFPImmOperandHalfTwo:
7186 case Match_InvalidSVEExactFPImmOperandZeroOne:
7187 case Match_InvalidMatrixTile16:
7188 case Match_InvalidMatrixTile32:
7189 case Match_InvalidMatrixTile64:
7190 case Match_InvalidMatrix:
7191 case Match_InvalidMatrix8:
7192 case Match_InvalidMatrix16:
7193 case Match_InvalidMatrix32:
7194 case Match_InvalidMatrix64:
7195 case Match_InvalidMatrixTileVectorH8:
7196 case Match_InvalidMatrixTileVectorH16:
7197 case Match_InvalidMatrixTileVectorH32:
7198 case Match_InvalidMatrixTileVectorH64:
7199 case Match_InvalidMatrixTileVectorH128:
7200 case Match_InvalidMatrixTileVectorV8:
7201 case Match_InvalidMatrixTileVectorV16:
7202 case Match_InvalidMatrixTileVectorV32:
7203 case Match_InvalidMatrixTileVectorV64:
7204 case Match_InvalidMatrixTileVectorV128:
7205 case Match_InvalidSVCR:
7206 case Match_InvalidMatrixIndexGPR32_12_15:
7207 case Match_InvalidMatrixIndexGPR32_8_11:
7208 case Match_InvalidLookupTable:
7209 case Match_InvalidZPRMul2_Lo8:
7210 case Match_InvalidZPRMul2_Hi8:
7211 case Match_InvalidZPRMul2_Lo16:
7212 case Match_InvalidZPRMul2_Hi16:
7213 case Match_InvalidZPRMul2_Lo32:
7214 case Match_InvalidZPRMul2_Hi32:
7215 case Match_InvalidZPRMul2_Lo64:
7216 case Match_InvalidZPRMul2_Hi64:
7217 case Match_InvalidZPR_K0:
7218 case Match_InvalidSVEVectorList2x8Mul2:
7219 case Match_InvalidSVEVectorList2x16Mul2:
7220 case Match_InvalidSVEVectorList2x32Mul2:
7221 case Match_InvalidSVEVectorList2x64Mul2:
7222 case Match_InvalidSVEVectorList2x128Mul2:
7223 case Match_InvalidSVEVectorList4x8Mul4:
7224 case Match_InvalidSVEVectorList4x16Mul4:
7225 case Match_InvalidSVEVectorList4x32Mul4:
7226 case Match_InvalidSVEVectorList4x64Mul4:
7227 case Match_InvalidSVEVectorList4x128Mul4:
7228 case Match_InvalidSVEVectorList2x8Mul2_Lo:
7229 case Match_InvalidSVEVectorList2x16Mul2_Lo:
7230 case Match_InvalidSVEVectorList2x32Mul2_Lo:
7231 case Match_InvalidSVEVectorList2x64Mul2_Lo:
7232 case Match_InvalidSVEVectorList2x8Mul2_Hi:
7233 case Match_InvalidSVEVectorList2x16Mul2_Hi:
7234 case Match_InvalidSVEVectorList2x32Mul2_Hi:
7235 case Match_InvalidSVEVectorList2x64Mul2_Hi:
7236 case Match_InvalidSVEVectorListStrided2x8:
7237 case Match_InvalidSVEVectorListStrided2x16:
7238 case Match_InvalidSVEVectorListStrided2x32:
7239 case Match_InvalidSVEVectorListStrided2x64:
7240 case Match_InvalidSVEVectorListStrided4x8:
7241 case Match_InvalidSVEVectorListStrided4x16:
7242 case Match_InvalidSVEVectorListStrided4x32:
7243 case Match_InvalidSVEVectorListStrided4x64:
7244 case Match_MSR:
7245 case Match_MRS: {
7246 if (ErrorInfo >= Operands.size())
7247 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
7248 // Any time we get here, there's nothing fancy to do. Just get the
7249 // operand SMLoc and display the diagnostic.
7250 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7251 if (ErrorLoc == SMLoc())
7252 ErrorLoc = IDLoc;
7253 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
7254 }
7255 }
7256
7257 llvm_unreachable("Implement any new match types added!");
7258}
7259
7260/// ParseDirective parses the arm specific directives
7261bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
7262 const MCContext::Environment Format = getContext().getObjectFileType();
7263 bool IsMachO = Format == MCContext::IsMachO;
7264 bool IsCOFF = Format == MCContext::IsCOFF;
7265 bool IsELF = Format == MCContext::IsELF;
7266
7267 auto IDVal = DirectiveID.getIdentifier().lower();
7268 SMLoc Loc = DirectiveID.getLoc();
7269 if (IDVal == ".arch")
7270 parseDirectiveArch(Loc);
7271 else if (IDVal == ".cpu")
7272 parseDirectiveCPU(Loc);
7273 else if (IDVal == ".tlsdesccall")
7274 parseDirectiveTLSDescCall(Loc);
7275 else if (IDVal == ".ltorg" || IDVal == ".pool")
7276 parseDirectiveLtorg(Loc);
7277 else if (IDVal == ".unreq")
7278 parseDirectiveUnreq(Loc);
7279 else if (IDVal == ".inst")
7280 parseDirectiveInst(Loc);
7281 else if (IDVal == ".cfi_negate_ra_state")
7282 parseDirectiveCFINegateRAState();
7283 else if (IDVal == ".cfi_negate_ra_state_with_pc")
7284 parseDirectiveCFINegateRAStateWithPC();
7285 else if (IDVal == ".cfi_b_key_frame")
7286 parseDirectiveCFIBKeyFrame();
7287 else if (IDVal == ".cfi_mte_tagged_frame")
7288 parseDirectiveCFIMTETaggedFrame();
7289 else if (IDVal == ".arch_extension")
7290 parseDirectiveArchExtension(Loc);
7291 else if (IDVal == ".variant_pcs")
7292 parseDirectiveVariantPCS(Loc);
7293 else if (IsMachO) {
7294 if (IDVal == MCLOHDirectiveName())
7295 parseDirectiveLOH(IDVal, Loc);
7296 else
7297 return true;
7298 } else if (IsCOFF) {
7299 if (IDVal == ".seh_stackalloc")
7300 parseDirectiveSEHAllocStack(Loc);
7301 else if (IDVal == ".seh_endprologue")
7302 parseDirectiveSEHPrologEnd(Loc);
7303 else if (IDVal == ".seh_save_r19r20_x")
7304 parseDirectiveSEHSaveR19R20X(Loc);
7305 else if (IDVal == ".seh_save_fplr")
7306 parseDirectiveSEHSaveFPLR(Loc);
7307 else if (IDVal == ".seh_save_fplr_x")
7308 parseDirectiveSEHSaveFPLRX(Loc);
7309 else if (IDVal == ".seh_save_reg")
7310 parseDirectiveSEHSaveReg(Loc);
7311 else if (IDVal == ".seh_save_reg_x")
7312 parseDirectiveSEHSaveRegX(Loc);
7313 else if (IDVal == ".seh_save_regp")
7314 parseDirectiveSEHSaveRegP(Loc);
7315 else if (IDVal == ".seh_save_regp_x")
7316 parseDirectiveSEHSaveRegPX(Loc);
7317 else if (IDVal == ".seh_save_lrpair")
7318 parseDirectiveSEHSaveLRPair(Loc);
7319 else if (IDVal == ".seh_save_freg")
7320 parseDirectiveSEHSaveFReg(Loc);
7321 else if (IDVal == ".seh_save_freg_x")
7322 parseDirectiveSEHSaveFRegX(Loc);
7323 else if (IDVal == ".seh_save_fregp")
7324 parseDirectiveSEHSaveFRegP(Loc);
7325 else if (IDVal == ".seh_save_fregp_x")
7326 parseDirectiveSEHSaveFRegPX(Loc);
7327 else if (IDVal == ".seh_set_fp")
7328 parseDirectiveSEHSetFP(Loc);
7329 else if (IDVal == ".seh_add_fp")
7330 parseDirectiveSEHAddFP(Loc);
7331 else if (IDVal == ".seh_nop")
7332 parseDirectiveSEHNop(Loc);
7333 else if (IDVal == ".seh_save_next")
7334 parseDirectiveSEHSaveNext(Loc);
7335 else if (IDVal == ".seh_startepilogue")
7336 parseDirectiveSEHEpilogStart(Loc);
7337 else if (IDVal == ".seh_endepilogue")
7338 parseDirectiveSEHEpilogEnd(Loc);
7339 else if (IDVal == ".seh_trap_frame")
7340 parseDirectiveSEHTrapFrame(Loc);
7341 else if (IDVal == ".seh_pushframe")
7342 parseDirectiveSEHMachineFrame(Loc);
7343 else if (IDVal == ".seh_context")
7344 parseDirectiveSEHContext(Loc);
7345 else if (IDVal == ".seh_ec_context")
7346 parseDirectiveSEHECContext(Loc);
7347 else if (IDVal == ".seh_clear_unwound_to_call")
7348 parseDirectiveSEHClearUnwoundToCall(Loc);
7349 else if (IDVal == ".seh_pac_sign_lr")
7350 parseDirectiveSEHPACSignLR(Loc);
7351 else if (IDVal == ".seh_save_any_reg")
7352 parseDirectiveSEHSaveAnyReg(Loc, false, false);
7353 else if (IDVal == ".seh_save_any_reg_p")
7354 parseDirectiveSEHSaveAnyReg(Loc, true, false);
7355 else if (IDVal == ".seh_save_any_reg_x")
7356 parseDirectiveSEHSaveAnyReg(Loc, false, true);
7357 else if (IDVal == ".seh_save_any_reg_px")
7358 parseDirectiveSEHSaveAnyReg(Loc, true, true);
7359 else if (IDVal == ".seh_allocz")
7360 parseDirectiveSEHAllocZ(Loc);
7361 else if (IDVal == ".seh_save_zreg")
7362 parseDirectiveSEHSaveZReg(Loc);
7363 else if (IDVal == ".seh_save_preg")
7364 parseDirectiveSEHSavePReg(Loc);
7365 else
7366 return true;
7367 } else if (IsELF) {
7368 if (IDVal == ".aeabi_subsection")
7369 parseDirectiveAeabiSubSectionHeader(Loc);
7370 else if (IDVal == ".aeabi_attribute")
7371 parseDirectiveAeabiAArch64Attr(Loc);
7372 else
7373 return true;
7374 } else
7375 return true;
7376 return false;
7377}
7378
7379static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
7380 SmallVector<StringRef, 4> &RequestedExtensions) {
7381 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
7382 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
7383
7384 if (!NoCrypto && Crypto) {
7385 // Map 'generic' (and others) to sha2 and aes, because
7386 // that was the traditional meaning of crypto.
7387 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7388 ArchInfo == AArch64::ARMV8_3A) {
7389 RequestedExtensions.push_back("sha2");
7390 RequestedExtensions.push_back("aes");
7391 }
7392 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7393 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7394 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7395 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7396 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7397 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
7398 RequestedExtensions.push_back("sm4");
7399 RequestedExtensions.push_back("sha3");
7400 RequestedExtensions.push_back("sha2");
7401 RequestedExtensions.push_back("aes");
7402 }
7403 } else if (NoCrypto) {
7404 // Map 'generic' (and others) to sha2 and aes, because
7405 // that was the traditional meaning of crypto.
7406 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7407 ArchInfo == AArch64::ARMV8_3A) {
7408 RequestedExtensions.push_back("nosha2");
7409 RequestedExtensions.push_back("noaes");
7410 }
7411 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7412 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7413 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7414 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7415 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7416 ArchInfo == AArch64::ARMV9_4A) {
7417 RequestedExtensions.push_back("nosm4");
7418 RequestedExtensions.push_back("nosha3");
7419 RequestedExtensions.push_back("nosha2");
7420 RequestedExtensions.push_back("noaes");
7421 }
7422 }
7423}
7424
7426 return SMLoc::getFromPointer(L.getPointer() + Offset);
7427}
7428
7429/// parseDirectiveArch
7430/// ::= .arch token
7431bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
7432 SMLoc CurLoc = getLoc();
7433
7434 StringRef Name = getParser().parseStringToEndOfStatement().trim();
7435 StringRef Arch, ExtensionString;
7436 std::tie(Arch, ExtensionString) = Name.split('+');
7437
7438 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
7439 if (!ArchInfo)
7440 return Error(CurLoc, "unknown arch name");
7441
7442 if (parseToken(AsmToken::EndOfStatement))
7443 return true;
7444
7445 // Get the architecture and extension features.
7446 std::vector<StringRef> AArch64Features;
7447 AArch64Features.push_back(ArchInfo->ArchFeature);
7448 AArch64::getExtensionFeatures(ArchInfo->DefaultExts, AArch64Features);
7449
7450 MCSubtargetInfo &STI = copySTI();
7451 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
7452 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
7453 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
7454
7455 SmallVector<StringRef, 4> RequestedExtensions;
7456 if (!ExtensionString.empty())
7457 ExtensionString.split(RequestedExtensions, '+');
7458
7459 ExpandCryptoAEK(*ArchInfo, RequestedExtensions);
7460 CurLoc = incrementLoc(CurLoc, Arch.size());
7461
7462 for (auto Name : RequestedExtensions) {
7463 // Advance source location past '+'.
7464 CurLoc = incrementLoc(CurLoc, 1);
7465
7466 bool EnableFeature = !Name.consume_front_insensitive("no");
7467
7468 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7469 return Extension.Name == Name;
7470 });
7471
7472 if (It == std::end(ExtensionMap))
7473 return Error(CurLoc, "unsupported architectural extension: " + Name);
7474
7475 if (EnableFeature)
7476 STI.SetFeatureBitsTransitively(It->Features);
7477 else
7478 STI.ClearFeatureBitsTransitively(It->Features);
7479 CurLoc = incrementLoc(CurLoc, Name.size());
7480 }
7481 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7482 setAvailableFeatures(Features);
7483
7484 getTargetStreamer().emitDirectiveArch(Name);
7485 return false;
7486}
7487
7488/// parseDirectiveArchExtension
7489/// ::= .arch_extension [no]feature
7490bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7491 SMLoc ExtLoc = getLoc();
7492
7493 StringRef FullName = getParser().parseStringToEndOfStatement().trim();
7494
7495 if (parseEOL())
7496 return true;
7497
7498 bool EnableFeature = true;
7499 StringRef Name = FullName;
7500 if (Name.starts_with_insensitive("no")) {
7501 EnableFeature = false;
7502 Name = Name.substr(2);
7503 }
7504
7505 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7506 return Extension.Name == Name;
7507 });
7508
7509 if (It == std::end(ExtensionMap))
7510 return Error(ExtLoc, "unsupported architectural extension: " + Name);
7511
7512 MCSubtargetInfo &STI = copySTI();
7513 if (EnableFeature)
7514 STI.SetFeatureBitsTransitively(It->Features);
7515 else
7516 STI.ClearFeatureBitsTransitively(It->Features);
7517 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7518 setAvailableFeatures(Features);
7519
7520 getTargetStreamer().emitDirectiveArchExtension(FullName);
7521 return false;
7522}
7523
7524/// parseDirectiveCPU
7525/// ::= .cpu id
7526bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7527 SMLoc CurLoc = getLoc();
7528
7529 StringRef CPU, ExtensionString;
7530 std::tie(CPU, ExtensionString) =
7531 getParser().parseStringToEndOfStatement().trim().split('+');
7532
7533 if (parseToken(AsmToken::EndOfStatement))
7534 return true;
7535
7536 SmallVector<StringRef, 4> RequestedExtensions;
7537 if (!ExtensionString.empty())
7538 ExtensionString.split(RequestedExtensions, '+');
7539
7540 const llvm::AArch64::ArchInfo *CpuArch = llvm::AArch64::getArchForCpu(CPU);
7541 if (!CpuArch) {
7542 Error(CurLoc, "unknown CPU name");
7543 return false;
7544 }
7545 ExpandCryptoAEK(*CpuArch, RequestedExtensions);
7546
7547 MCSubtargetInfo &STI = copySTI();
7548 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
7549 CurLoc = incrementLoc(CurLoc, CPU.size());
7550
7551 for (auto Name : RequestedExtensions) {
7552 // Advance source location past '+'.
7553 CurLoc = incrementLoc(CurLoc, 1);
7554
7555 bool EnableFeature = !Name.consume_front_insensitive("no");
7556
7557 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7558 return Extension.Name == Name;
7559 });
7560
7561 if (It == std::end(ExtensionMap))
7562 return Error(CurLoc, "unsupported architectural extension: " + Name);
7563
7564 if (EnableFeature)
7565 STI.SetFeatureBitsTransitively(It->Features);
7566 else
7567 STI.ClearFeatureBitsTransitively(It->Features);
7568 CurLoc = incrementLoc(CurLoc, Name.size());
7569 }
7570 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7571 setAvailableFeatures(Features);
7572 return false;
7573}
7574
7575/// parseDirectiveInst
7576/// ::= .inst opcode [, ...]
7577bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7578 if (getLexer().is(AsmToken::EndOfStatement))
7579 return Error(Loc, "expected expression following '.inst' directive");
7580
7581 auto parseOp = [&]() -> bool {
7582 SMLoc L = getLoc();
7583 const MCExpr *Expr = nullptr;
7584 if (check(getParser().parseExpression(Expr), L, "expected expression"))
7585 return true;
7586 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
7587 if (check(!Value, L, "expected constant expression"))
7588 return true;
7589 getTargetStreamer().emitInst(Value->getValue());
7590 return false;
7591 };
7592
7593 return parseMany(parseOp);
7594}
7595
7596// parseDirectiveTLSDescCall:
7597// ::= .tlsdesccall symbol
7598bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7599 StringRef Name;
7600 if (check(getParser().parseIdentifier(Name), L, "expected symbol") ||
7601 parseToken(AsmToken::EndOfStatement))
7602 return true;
7603
7604 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7605 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
7607
7608 MCInst Inst;
7609 Inst.setOpcode(AArch64::TLSDESCCALL);
7611
7612 getParser().getStreamer().emitInstruction(Inst, getSTI());
7613 return false;
7614}
7615
7616/// ::= .loh <lohName | lohId> label1, ..., labelN
7617/// The number of arguments depends on the loh identifier.
7618bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7620 if (getTok().isNot(AsmToken::Identifier)) {
7621 if (getTok().isNot(AsmToken::Integer))
7622 return TokError("expected an identifier or a number in directive");
7623 // We successfully get a numeric value for the identifier.
7624 // Check if it is valid.
7625 int64_t Id = getTok().getIntVal();
7626 if (Id <= -1U && !isValidMCLOHType(Id))
7627 return TokError("invalid numeric identifier in directive");
7628 Kind = (MCLOHType)Id;
7629 } else {
7630 StringRef Name = getTok().getIdentifier();
7631 // We successfully parse an identifier.
7632 // Check if it is a recognized one.
7633 int Id = MCLOHNameToId(Name);
7634
7635 if (Id == -1)
7636 return TokError("invalid identifier in directive");
7637 Kind = (MCLOHType)Id;
7638 }
7639 // Consume the identifier.
7640 Lex();
7641 // Get the number of arguments of this LOH.
7642 int NbArgs = MCLOHIdToNbArgs(Kind);
7643
7644 assert(NbArgs != -1 && "Invalid number of arguments");
7645
7647 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7648 StringRef Name;
7649 if (getParser().parseIdentifier(Name))
7650 return TokError("expected identifier in directive");
7651 Args.push_back(getContext().getOrCreateSymbol(Name));
7652
7653 if (Idx + 1 == NbArgs)
7654 break;
7655 if (parseComma())
7656 return true;
7657 }
7658 if (parseEOL())
7659 return true;
7660
7661 getStreamer().emitLOHDirective(Kind, Args);
7662 return false;
7663}
7664
7665/// parseDirectiveLtorg
7666/// ::= .ltorg | .pool
7667bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7668 if (parseEOL())
7669 return true;
7670 getTargetStreamer().emitCurrentConstantPool();
7671 return false;
7672}
7673
7674/// parseDirectiveReq
7675/// ::= name .req registername
7676bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7677 Lex(); // Eat the '.req' token.
7678 SMLoc SRegLoc = getLoc();
7679 RegKind RegisterKind = RegKind::Scalar;
7680 MCRegister RegNum;
7681 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7682
7683 if (!ParseRes.isSuccess()) {
7684 StringRef Kind;
7685 RegisterKind = RegKind::NeonVector;
7686 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7687
7688 if (ParseRes.isFailure())
7689 return true;
7690
7691 if (ParseRes.isSuccess() && !Kind.empty())
7692 return Error(SRegLoc, "vector register without type specifier expected");
7693 }
7694
7695 if (!ParseRes.isSuccess()) {
7696 StringRef Kind;
7697 RegisterKind = RegKind::SVEDataVector;
7698 ParseRes =
7699 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7700
7701 if (ParseRes.isFailure())
7702 return true;
7703
7704 if (ParseRes.isSuccess() && !Kind.empty())
7705 return Error(SRegLoc,
7706 "sve vector register without type specifier expected");
7707 }
7708
7709 if (!ParseRes.isSuccess()) {
7710 StringRef Kind;
7711 RegisterKind = RegKind::SVEPredicateVector;
7712 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7713
7714 if (ParseRes.isFailure())
7715 return true;
7716
7717 if (ParseRes.isSuccess() && !Kind.empty())
7718 return Error(SRegLoc,
7719 "sve predicate register without type specifier expected");
7720 }
7721
7722 if (!ParseRes.isSuccess())
7723 return Error(SRegLoc, "register name or alias expected");
7724
7725 // Shouldn't be anything else.
7726 if (parseEOL())
7727 return true;
7728
7729 auto pair = std::make_pair(RegisterKind, RegNum);
7730 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
7731 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
7732
7733 return false;
7734}
7735
7736/// parseDirectiveUneq
7737/// ::= .unreq registername
7738bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7739 if (getTok().isNot(AsmToken::Identifier))
7740 return TokError("unexpected input in .unreq directive.");
7741 RegisterReqs.erase(getTok().getIdentifier().lower());
7742 Lex(); // Eat the identifier.
7743 return parseToken(AsmToken::EndOfStatement);
7744}
7745
7746bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7747 if (parseEOL())
7748 return true;
7749 getStreamer().emitCFINegateRAState();
7750 return false;
7751}
7752
7753bool AArch64AsmParser::parseDirectiveCFINegateRAStateWithPC() {
7754 if (parseEOL())
7755 return true;
7756 getStreamer().emitCFINegateRAStateWithPC();
7757 return false;
7758}
7759
7760/// parseDirectiveCFIBKeyFrame
7761/// ::= .cfi_b_key
7762bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7763 if (parseEOL())
7764 return true;
7765 getStreamer().emitCFIBKeyFrame();
7766 return false;
7767}
7768
7769/// parseDirectiveCFIMTETaggedFrame
7770/// ::= .cfi_mte_tagged_frame
7771bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7772 if (parseEOL())
7773 return true;
7774 getStreamer().emitCFIMTETaggedFrame();
7775 return false;
7776}
7777
7778/// parseDirectiveVariantPCS
7779/// ::= .variant_pcs symbolname
7780bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7781 StringRef Name;
7782 if (getParser().parseIdentifier(Name))
7783 return TokError("expected symbol name");
7784 if (parseEOL())
7785 return true;
7786 getTargetStreamer().emitDirectiveVariantPCS(
7787 getContext().getOrCreateSymbol(Name));
7788 return false;
7789}
7790
7791/// parseDirectiveSEHAllocStack
7792/// ::= .seh_stackalloc
7793bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7794 int64_t Size;
7795 if (parseImmExpr(Size))
7796 return true;
7797 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7798 return false;
7799}
7800
7801/// parseDirectiveSEHPrologEnd
7802/// ::= .seh_endprologue
7803bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7804 getTargetStreamer().emitARM64WinCFIPrologEnd();
7805 return false;
7806}
7807
7808/// parseDirectiveSEHSaveR19R20X
7809/// ::= .seh_save_r19r20_x
7810bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7811 int64_t Offset;
7812 if (parseImmExpr(Offset))
7813 return true;
7814 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7815 return false;
7816}
7817
7818/// parseDirectiveSEHSaveFPLR
7819/// ::= .seh_save_fplr
7820bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7821 int64_t Offset;
7822 if (parseImmExpr(Offset))
7823 return true;
7824 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7825 return false;
7826}
7827
7828/// parseDirectiveSEHSaveFPLRX
7829/// ::= .seh_save_fplr_x
7830bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7831 int64_t Offset;
7832 if (parseImmExpr(Offset))
7833 return true;
7834 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7835 return false;
7836}
7837
7838/// parseDirectiveSEHSaveReg
7839/// ::= .seh_save_reg
7840bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7841 unsigned Reg;
7842 int64_t Offset;
7843 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7844 parseComma() || parseImmExpr(Offset))
7845 return true;
7846 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7847 return false;
7848}
7849
7850/// parseDirectiveSEHSaveRegX
7851/// ::= .seh_save_reg_x
7852bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7853 unsigned Reg;
7854 int64_t Offset;
7855 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7856 parseComma() || parseImmExpr(Offset))
7857 return true;
7858 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7859 return false;
7860}
7861
7862/// parseDirectiveSEHSaveRegP
7863/// ::= .seh_save_regp
7864bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7865 unsigned Reg;
7866 int64_t Offset;
7867 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7868 parseComma() || parseImmExpr(Offset))
7869 return true;
7870 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7871 return false;
7872}
7873
7874/// parseDirectiveSEHSaveRegPX
7875/// ::= .seh_save_regp_x
7876bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7877 unsigned Reg;
7878 int64_t Offset;
7879 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7880 parseComma() || parseImmExpr(Offset))
7881 return true;
7882 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7883 return false;
7884}
7885
7886/// parseDirectiveSEHSaveLRPair
7887/// ::= .seh_save_lrpair
7888bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7889 unsigned Reg;
7890 int64_t Offset;
7891 L = getLoc();
7892 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7893 parseComma() || parseImmExpr(Offset))
7894 return true;
7895 if (check(((Reg - 19) % 2 != 0), L,
7896 "expected register with even offset from x19"))
7897 return true;
7898 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7899 return false;
7900}
7901
7902/// parseDirectiveSEHSaveFReg
7903/// ::= .seh_save_freg
7904bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7905 unsigned Reg;
7906 int64_t Offset;
7907 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7908 parseComma() || parseImmExpr(Offset))
7909 return true;
7910 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7911 return false;
7912}
7913
7914/// parseDirectiveSEHSaveFRegX
7915/// ::= .seh_save_freg_x
7916bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7917 unsigned Reg;
7918 int64_t Offset;
7919 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7920 parseComma() || parseImmExpr(Offset))
7921 return true;
7922 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7923 return false;
7924}
7925
7926/// parseDirectiveSEHSaveFRegP
7927/// ::= .seh_save_fregp
7928bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7929 unsigned Reg;
7930 int64_t Offset;
7931 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7932 parseComma() || parseImmExpr(Offset))
7933 return true;
7934 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7935 return false;
7936}
7937
7938/// parseDirectiveSEHSaveFRegPX
7939/// ::= .seh_save_fregp_x
7940bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7941 unsigned Reg;
7942 int64_t Offset;
7943 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7944 parseComma() || parseImmExpr(Offset))
7945 return true;
7946 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7947 return false;
7948}
7949
7950/// parseDirectiveSEHSetFP
7951/// ::= .seh_set_fp
7952bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7953 getTargetStreamer().emitARM64WinCFISetFP();
7954 return false;
7955}
7956
7957/// parseDirectiveSEHAddFP
7958/// ::= .seh_add_fp
7959bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7960 int64_t Size;
7961 if (parseImmExpr(Size))
7962 return true;
7963 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7964 return false;
7965}
7966
7967/// parseDirectiveSEHNop
7968/// ::= .seh_nop
7969bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7970 getTargetStreamer().emitARM64WinCFINop();
7971 return false;
7972}
7973
7974/// parseDirectiveSEHSaveNext
7975/// ::= .seh_save_next
7976bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7977 getTargetStreamer().emitARM64WinCFISaveNext();
7978 return false;
7979}
7980
7981/// parseDirectiveSEHEpilogStart
7982/// ::= .seh_startepilogue
7983bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7984 getTargetStreamer().emitARM64WinCFIEpilogStart();
7985 return false;
7986}
7987
7988/// parseDirectiveSEHEpilogEnd
7989/// ::= .seh_endepilogue
7990bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
7991 getTargetStreamer().emitARM64WinCFIEpilogEnd();
7992 return false;
7993}
7994
7995/// parseDirectiveSEHTrapFrame
7996/// ::= .seh_trap_frame
7997bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
7998 getTargetStreamer().emitARM64WinCFITrapFrame();
7999 return false;
8000}
8001
8002/// parseDirectiveSEHMachineFrame
8003/// ::= .seh_pushframe
8004bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
8005 getTargetStreamer().emitARM64WinCFIMachineFrame();
8006 return false;
8007}
8008
8009/// parseDirectiveSEHContext
8010/// ::= .seh_context
8011bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
8012 getTargetStreamer().emitARM64WinCFIContext();
8013 return false;
8014}
8015
8016/// parseDirectiveSEHECContext
8017/// ::= .seh_ec_context
8018bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
8019 getTargetStreamer().emitARM64WinCFIECContext();
8020 return false;
8021}
8022
8023/// parseDirectiveSEHClearUnwoundToCall
8024/// ::= .seh_clear_unwound_to_call
8025bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
8026 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
8027 return false;
8028}
8029
8030/// parseDirectiveSEHPACSignLR
8031/// ::= .seh_pac_sign_lr
8032bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
8033 getTargetStreamer().emitARM64WinCFIPACSignLR();
8034 return false;
8035}
8036
8037/// parseDirectiveSEHSaveAnyReg
8038/// ::= .seh_save_any_reg
8039/// ::= .seh_save_any_reg_p
8040/// ::= .seh_save_any_reg_x
8041/// ::= .seh_save_any_reg_px
8042bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
8043 bool Writeback) {
8044 MCRegister Reg;
8045 SMLoc Start, End;
8046 int64_t Offset;
8047 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") ||
8048 parseComma() || parseImmExpr(Offset))
8049 return true;
8050
8051 if (Reg == AArch64::FP || Reg == AArch64::LR ||
8052 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
8053 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8054 return Error(L, "invalid save_any_reg offset");
8055 unsigned EncodedReg;
8056 if (Reg == AArch64::FP)
8057 EncodedReg = 29;
8058 else if (Reg == AArch64::LR)
8059 EncodedReg = 30;
8060 else
8061 EncodedReg = Reg - AArch64::X0;
8062 if (Paired) {
8063 if (Reg == AArch64::LR)
8064 return Error(Start, "lr cannot be paired with another register");
8065 if (Writeback)
8066 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset);
8067 else
8068 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset);
8069 } else {
8070 if (Writeback)
8071 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset);
8072 else
8073 getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset);
8074 }
8075 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
8076 unsigned EncodedReg = Reg - AArch64::D0;
8077 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8078 return Error(L, "invalid save_any_reg offset");
8079 if (Paired) {
8080 if (Reg == AArch64::D31)
8081 return Error(Start, "d31 cannot be paired with another register");
8082 if (Writeback)
8083 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset);
8084 else
8085 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset);
8086 } else {
8087 if (Writeback)
8088 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset);
8089 else
8090 getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset);
8091 }
8092 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
8093 unsigned EncodedReg = Reg - AArch64::Q0;
8094 if (Offset < 0 || Offset % 16)
8095 return Error(L, "invalid save_any_reg offset");
8096 if (Paired) {
8097 if (Reg == AArch64::Q31)
8098 return Error(Start, "q31 cannot be paired with another register");
8099 if (Writeback)
8100 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset);
8101 else
8102 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset);
8103 } else {
8104 if (Writeback)
8105 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset);
8106 else
8107 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset);
8108 }
8109 } else {
8110 return Error(Start, "save_any_reg register must be x, q or d register");
8111 }
8112 return false;
8113}
8114
8115/// parseDirectiveAllocZ
8116/// ::= .seh_allocz
8117bool AArch64AsmParser::parseDirectiveSEHAllocZ(SMLoc L) {
8118 int64_t Offset;
8119 if (parseImmExpr(Offset))
8120 return true;
8121 getTargetStreamer().emitARM64WinCFIAllocZ(Offset);
8122 return false;
8123}
8124
8125/// parseDirectiveSEHSaveZReg
8126/// ::= .seh_save_zreg
8127bool AArch64AsmParser::parseDirectiveSEHSaveZReg(SMLoc L) {
8128 MCRegister RegNum;
8129 StringRef Kind;
8130 int64_t Offset;
8131 ParseStatus Res =
8132 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8133 if (!Res.isSuccess())
8134 return true;
8135 if (check(RegNum < AArch64::Z8 || RegNum > AArch64::Z23, L,
8136 "expected register in range z8 to z23"))
8137 return true;
8138 if (parseComma() || parseImmExpr(Offset))
8139 return true;
8140 getTargetStreamer().emitARM64WinCFISaveZReg(RegNum - AArch64::Z0, Offset);
8141 return false;
8142}
8143
8144/// parseDirectiveSEHSavePReg
8145/// ::= .seh_save_preg
8146bool AArch64AsmParser::parseDirectiveSEHSavePReg(SMLoc L) {
8147 MCRegister RegNum;
8148 StringRef Kind;
8149 int64_t Offset;
8150 ParseStatus Res =
8151 tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
8152 if (!Res.isSuccess())
8153 return true;
8154 if (check(RegNum < AArch64::P4 || RegNum > AArch64::P15, L,
8155 "expected register in range p4 to p15"))
8156 return true;
8157 if (parseComma() || parseImmExpr(Offset))
8158 return true;
8159 getTargetStreamer().emitARM64WinCFISavePReg(RegNum - AArch64::P0, Offset);
8160 return false;
8161}
8162
8163bool AArch64AsmParser::parseDirectiveAeabiSubSectionHeader(SMLoc L) {
8164 // Handle parsing of .aeabi_subsection directives
8165 // - On first declaration of a subsection, expect exactly three identifiers
8166 // after `.aeabi_subsection`: the subsection name and two parameters.
8167 // - When switching to an existing subsection, it is valid to provide only
8168 // the subsection name, or the name together with the two parameters.
8169 MCAsmParser &Parser = getParser();
8170
8171 // Consume the name (subsection name)
8172 StringRef SubsectionName;
8173 AArch64BuildAttributes::VendorID SubsectionNameID;
8174 if (Parser.getTok().is(AsmToken::Identifier)) {
8175 SubsectionName = Parser.getTok().getIdentifier();
8176 SubsectionNameID = AArch64BuildAttributes::getVendorID(SubsectionName);
8177 } else {
8178 Error(Parser.getTok().getLoc(), "subsection name not found");
8179 return true;
8180 }
8181 Parser.Lex();
8182
8183 std::unique_ptr<MCELFStreamer::AttributeSubSection> SubsectionExists =
8184 getTargetStreamer().getAttributesSubsectionByName(SubsectionName);
8185 // Check whether only the subsection name was provided.
8186 // If so, the user is trying to switch to a subsection that should have been
8187 // declared before.
8189 if (SubsectionExists) {
8190 getTargetStreamer().emitAttributesSubsection(
8191 SubsectionName,
8193 SubsectionExists->IsOptional),
8195 SubsectionExists->ParameterType));
8196 return false;
8197 }
8198 // If subsection does not exists, report error.
8199 else {
8200 Error(Parser.getTok().getLoc(),
8201 "Could not switch to subsection '" + SubsectionName +
8202 "' using subsection name, subsection has not been defined");
8203 return true;
8204 }
8205 }
8206
8207 // Otherwise, expecting 2 more parameters: consume a comma
8208 // parseComma() return *false* on success, and call Lex(), no need to call
8209 // Lex() again.
8210 if (Parser.parseComma()) {
8211 return true;
8212 }
8213
8214 // Consume the first parameter (optionality parameter)
8216 // options: optional/required
8217 if (Parser.getTok().is(AsmToken::Identifier)) {
8218 StringRef Optionality = Parser.getTok().getIdentifier();
8219 IsOptional = AArch64BuildAttributes::getOptionalID(Optionality);
8221 Error(Parser.getTok().getLoc(),
8223 return true;
8224 }
8225 if (SubsectionExists) {
8226 if (IsOptional != SubsectionExists->IsOptional) {
8227 Error(Parser.getTok().getLoc(),
8228 "optionality mismatch! subsection '" + SubsectionName +
8229 "' already exists with optionality defined as '" +
8231 SubsectionExists->IsOptional) +
8232 "' and not '" +
8233 AArch64BuildAttributes::getOptionalStr(IsOptional) + "'");
8234 return true;
8235 }
8236 }
8237 } else {
8238 Error(Parser.getTok().getLoc(),
8239 "optionality parameter not found, expected required|optional");
8240 return true;
8241 }
8242 // Check for possible IsOptional unaccepted values for known subsections
8243 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID) {
8244 if (AArch64BuildAttributes::REQUIRED == IsOptional) {
8245 Error(Parser.getTok().getLoc(),
8246 "aeabi_feature_and_bits must be marked as optional");
8247 return true;
8248 }
8249 }
8250 if (AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8251 if (AArch64BuildAttributes::OPTIONAL == IsOptional) {
8252 Error(Parser.getTok().getLoc(),
8253 "aeabi_pauthabi must be marked as required");
8254 return true;
8255 }
8256 }
8257 Parser.Lex();
8258 // consume a comma
8259 if (Parser.parseComma()) {
8260 return true;
8261 }
8262
8263 // Consume the second parameter (type parameter)
8265 if (Parser.getTok().is(AsmToken::Identifier)) {
8266 StringRef Name = Parser.getTok().getIdentifier();
8269 Error(Parser.getTok().getLoc(),
8271 return true;
8272 }
8273 if (SubsectionExists) {
8274 if (Type != SubsectionExists->ParameterType) {
8275 Error(Parser.getTok().getLoc(),
8276 "type mismatch! subsection '" + SubsectionName +
8277 "' already exists with type defined as '" +
8279 SubsectionExists->ParameterType) +
8280 "' and not '" + AArch64BuildAttributes::getTypeStr(Type) +
8281 "'");
8282 return true;
8283 }
8284 }
8285 } else {
8286 Error(Parser.getTok().getLoc(),
8287 "type parameter not found, expected uleb128|ntbs");
8288 return true;
8289 }
8290 // Check for possible unaccepted 'type' values for known subsections
8291 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID ||
8292 AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8294 Error(Parser.getTok().getLoc(),
8295 SubsectionName + " must be marked as ULEB128");
8296 return true;
8297 }
8298 }
8299 Parser.Lex();
8300
8301 // Parsing finished, check for trailing tokens.
8303 Error(Parser.getTok().getLoc(), "unexpected token for AArch64 build "
8304 "attributes subsection header directive");
8305 return true;
8306 }
8307
8308 getTargetStreamer().emitAttributesSubsection(SubsectionName, IsOptional, Type);
8309
8310 return false;
8311}
8312
8313bool AArch64AsmParser::parseDirectiveAeabiAArch64Attr(SMLoc L) {
8314 // Expecting 2 Tokens: after '.aeabi_attribute', e.g.:
8315 // .aeabi_attribute (1)Tag_Feature_BTI, (2)[uleb128|ntbs]
8316 // separated by a comma.
8317 MCAsmParser &Parser = getParser();
8318
8319 std::unique_ptr<MCELFStreamer::AttributeSubSection> ActiveSubsection =
8320 getTargetStreamer().getActiveAttributesSubsection();
8321 if (nullptr == ActiveSubsection) {
8322 Error(Parser.getTok().getLoc(),
8323 "no active subsection, build attribute can not be added");
8324 return true;
8325 }
8326 StringRef ActiveSubsectionName = ActiveSubsection->VendorName;
8327 unsigned ActiveSubsectionType = ActiveSubsection->ParameterType;
8328
8329 unsigned ActiveSubsectionID = AArch64BuildAttributes::VENDOR_UNKNOWN;
8331 AArch64BuildAttributes::AEABI_PAUTHABI) == ActiveSubsectionName)
8332 ActiveSubsectionID = AArch64BuildAttributes::AEABI_PAUTHABI;
8335 ActiveSubsectionName)
8337
8338 StringRef TagStr = "";
8339 unsigned Tag;
8340 if (Parser.getTok().is(AsmToken::Integer)) {
8341 Tag = getTok().getIntVal();
8342 } else if (Parser.getTok().is(AsmToken::Identifier)) {
8343 TagStr = Parser.getTok().getIdentifier();
8344 switch (ActiveSubsectionID) {
8346 // Tag was provided as an unrecognized string instead of an unsigned
8347 // integer
8348 Error(Parser.getTok().getLoc(), "unrecognized Tag: '" + TagStr +
8349 "' \nExcept for public subsections, "
8350 "tags have to be an unsigned int.");
8351 return true;
8352 break;
8356 Error(Parser.getTok().getLoc(), "unknown AArch64 build attribute '" +
8357 TagStr + "' for subsection '" +
8358 ActiveSubsectionName + "'");
8359 return true;
8360 }
8361 break;
8365 Error(Parser.getTok().getLoc(), "unknown AArch64 build attribute '" +
8366 TagStr + "' for subsection '" +
8367 ActiveSubsectionName + "'");
8368 return true;
8369 }
8370 break;
8371 }
8372 } else {
8373 Error(Parser.getTok().getLoc(), "AArch64 build attributes tag not found");
8374 return true;
8375 }
8376 Parser.Lex();
8377 // consume a comma
8378 // parseComma() return *false* on success, and call Lex(), no need to call
8379 // Lex() again.
8380 if (Parser.parseComma()) {
8381 return true;
8382 }
8383
8384 // Consume the second parameter (attribute value)
8385 unsigned ValueInt = unsigned(-1);
8386 std::string ValueStr = "";
8387 if (Parser.getTok().is(AsmToken::Integer)) {
8388 if (AArch64BuildAttributes::NTBS == ActiveSubsectionType) {
8389 Error(
8390 Parser.getTok().getLoc(),
8391 "active subsection type is NTBS (string), found ULEB128 (unsigned)");
8392 return true;
8393 }
8394 ValueInt = getTok().getIntVal();
8395 } else if (Parser.getTok().is(AsmToken::Identifier)) {
8396 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8397 Error(
8398 Parser.getTok().getLoc(),
8399 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8400 return true;
8401 }
8402 ValueStr = Parser.getTok().getIdentifier();
8403 } else if (Parser.getTok().is(AsmToken::String)) {
8404 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8405 Error(
8406 Parser.getTok().getLoc(),
8407 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8408 return true;
8409 }
8410 ValueStr = Parser.getTok().getString();
8411 } else {
8412 Error(Parser.getTok().getLoc(), "AArch64 build attributes value not found");
8413 return true;
8414 }
8415 // Check for possible unaccepted values for known tags
8416 // (AEABI_FEATURE_AND_BITS)
8417 if (ActiveSubsectionID == AArch64BuildAttributes::AEABI_FEATURE_AND_BITS) {
8418 if (0 != ValueInt && 1 != ValueInt) {
8419 Error(Parser.getTok().getLoc(),
8420 "unknown AArch64 build attributes Value for Tag '" + TagStr +
8421 "' options are 0|1");
8422 return true;
8423 }
8424 }
8425 Parser.Lex();
8426
8427 // Parsing finished. Check for trailing tokens.
8429 Error(Parser.getTok().getLoc(),
8430 "unexpected token for AArch64 build attributes tag and value "
8431 "attribute directive");
8432 return true;
8433 }
8434
8435 if (unsigned(-1) != ValueInt) {
8436 getTargetStreamer().emitAttribute(ActiveSubsectionName, Tag, ValueInt, "");
8437 }
8438 if ("" != ValueStr) {
8439 getTargetStreamer().emitAttribute(ActiveSubsectionName, Tag, unsigned(-1),
8440 ValueStr);
8441 }
8442 return false;
8443}
8444
8445bool AArch64AsmParser::parseExprWithSpecifier(const MCExpr *&Res, SMLoc &E) {
8446 SMLoc Loc = getLoc();
8447 if (getLexer().getKind() != AsmToken::Identifier)
8448 return TokError("expected '%' relocation specifier");
8449 StringRef Identifier = getParser().getTok().getIdentifier();
8450 auto Spec = AArch64::parsePercentSpecifierName(Identifier);
8451 if (!Spec)
8452 return TokError("invalid relocation specifier");
8453
8454 getParser().Lex(); // Eat the identifier
8455 if (parseToken(AsmToken::LParen, "expected '('"))
8456 return true;
8457
8458 const MCExpr *SubExpr;
8459 if (getParser().parseParenExpression(SubExpr, E))
8460 return true;
8461
8462 Res = MCSpecifierExpr::create(SubExpr, Spec, getContext(), Loc);
8463 return false;
8464}
8465
8466bool AArch64AsmParser::parseDataExpr(const MCExpr *&Res) {
8467 SMLoc EndLoc;
8468 if (parseOptionalToken(AsmToken::Percent))
8469 return parseExprWithSpecifier(Res, EndLoc);
8470
8471 if (getParser().parseExpression(Res))
8472 return true;
8473 MCAsmParser &Parser = getParser();
8474 if (!parseOptionalToken(AsmToken::At))
8475 return false;
8476 if (getLexer().getKind() != AsmToken::Identifier)
8477 return Error(getLoc(), "expected relocation specifier");
8478
8479 std::string Identifier = Parser.getTok().getIdentifier().lower();
8480 SMLoc Loc = getLoc();
8481 Lex();
8482 if (Identifier == "auth")
8483 return parseAuthExpr(Res, EndLoc);
8484
8485 auto Spec = AArch64::S_None;
8486 if (STI->getTargetTriple().isOSBinFormatMachO()) {
8487 if (Identifier == "got")
8488 Spec = AArch64::S_MACHO_GOT;
8489 }
8490 if (Spec == AArch64::S_None)
8491 return Error(Loc, "invalid relocation specifier");
8492 if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Res))
8493 Res = MCSymbolRefExpr::create(&SRE->getSymbol(), Spec, getContext(),
8494 SRE->getLoc());
8495 else
8496 return Error(Loc, "@ specifier only allowed after a symbol");
8497
8498 for (;;) {
8499 std::optional<MCBinaryExpr::Opcode> Opcode;
8500 if (parseOptionalToken(AsmToken::Plus))
8501 Opcode = MCBinaryExpr::Add;
8502 else if (parseOptionalToken(AsmToken::Minus))
8503 Opcode = MCBinaryExpr::Sub;
8504 else
8505 break;
8506 const MCExpr *Term;
8507 if (getParser().parsePrimaryExpr(Term, EndLoc, nullptr))
8508 return true;
8509 Res = MCBinaryExpr::create(*Opcode, Res, Term, getContext(), Res->getLoc());
8510 }
8511 return false;
8512}
8513
8514/// parseAuthExpr
8515/// ::= _sym@AUTH(ib,123[,addr])
8516/// ::= (_sym + 5)@AUTH(ib,123[,addr])
8517/// ::= (_sym - 5)@AUTH(ib,123[,addr])
8518bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
8519 MCAsmParser &Parser = getParser();
8520 MCContext &Ctx = getContext();
8521 AsmToken Tok = Parser.getTok();
8522
8523 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
8524 if (parseToken(AsmToken::LParen, "expected '('"))
8525 return true;
8526
8527 if (Parser.getTok().isNot(AsmToken::Identifier))
8528 return TokError("expected key name");
8529
8530 StringRef KeyStr = Parser.getTok().getIdentifier();
8531 auto KeyIDOrNone = AArch64StringToPACKeyID(KeyStr);
8532 if (!KeyIDOrNone)
8533 return TokError("invalid key '" + KeyStr + "'");
8534 Parser.Lex();
8535
8536 if (parseToken(AsmToken::Comma, "expected ','"))
8537 return true;
8538
8539 if (Parser.getTok().isNot(AsmToken::Integer))
8540 return TokError("expected integer discriminator");
8541 int64_t Discriminator = Parser.getTok().getIntVal();
8542
8543 if (!isUInt<16>(Discriminator))
8544 return TokError("integer discriminator " + Twine(Discriminator) +
8545 " out of range [0, 0xFFFF]");
8546 Parser.Lex();
8547
8548 bool UseAddressDiversity = false;
8549 if (Parser.getTok().is(AsmToken::Comma)) {
8550 Parser.Lex();
8551 if (Parser.getTok().isNot(AsmToken::Identifier) ||
8552 Parser.getTok().getIdentifier() != "addr")
8553 return TokError("expected 'addr'");
8554 UseAddressDiversity = true;
8555 Parser.Lex();
8556 }
8557
8558 EndLoc = Parser.getTok().getEndLoc();
8559 if (parseToken(AsmToken::RParen, "expected ')'"))
8560 return true;
8561
8562 Res = AArch64AuthMCExpr::create(Res, Discriminator, *KeyIDOrNone,
8563 UseAddressDiversity, Ctx, Res->getLoc());
8564 return false;
8565}
8566
8567bool AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
8568 AArch64::Specifier &ELFSpec,
8569 AArch64::Specifier &DarwinSpec,
8570 int64_t &Addend) {
8571 ELFSpec = AArch64::S_INVALID;
8572 DarwinSpec = AArch64::S_None;
8573 Addend = 0;
8574
8575 if (auto *AE = dyn_cast<MCSpecifierExpr>(Expr)) {
8576 ELFSpec = AE->getSpecifier();
8577 Expr = AE->getSubExpr();
8578 }
8579
8580 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
8581 if (SE) {
8582 // It's a simple symbol reference with no addend.
8583 DarwinSpec = AArch64::Specifier(SE->getKind());
8584 return true;
8585 }
8586
8587 // Check that it looks like a symbol + an addend
8588 MCValue Res;
8589 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr);
8590 if (!Relocatable || Res.getSubSym())
8591 return false;
8592
8593 // Treat expressions with an ELFSpec (like ":abs_g1:3", or
8594 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
8595 if (!Res.getAddSym() && ELFSpec == AArch64::S_INVALID)
8596 return false;
8597
8598 if (Res.getAddSym())
8599 DarwinSpec = AArch64::Specifier(Res.getSpecifier());
8600 Addend = Res.getConstant();
8601
8602 // It's some symbol reference + a constant addend, but really
8603 // shouldn't use both Darwin and ELF syntax.
8604 return ELFSpec == AArch64::S_INVALID || DarwinSpec == AArch64::S_None;
8605}
8606
8607/// Force static initialization.
8608extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
8616
8617#define GET_REGISTER_MATCHER
8618#define GET_SUBTARGET_FEATURE_NAME
8619#define GET_MATCHER_IMPLEMENTATION
8620#define GET_MNEMONIC_SPELL_CHECKER
8621#include "AArch64GenAsmMatcher.inc"
8622
8623// Define this matcher function after the auto-generated include so we
8624// have the match class enum definitions.
8625unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
8626 unsigned Kind) {
8627 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
8628
8629 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
8630 if (!Op.isImm())
8631 return Match_InvalidOperand;
8632 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
8633 if (!CE)
8634 return Match_InvalidOperand;
8635 if (CE->getValue() == ExpectedVal)
8636 return Match_Success;
8637 return Match_InvalidOperand;
8638 };
8639
8640 switch (Kind) {
8641 default:
8642 return Match_InvalidOperand;
8643 case MCK_MPR:
8644 // If the Kind is a token for the MPR register class which has the "za"
8645 // register (SME accumulator array), check if the asm is a literal "za"
8646 // token. This is for the "smstart za" alias that defines the register
8647 // as a literal token.
8648 if (Op.isTokenEqual("za"))
8649 return Match_Success;
8650 return Match_InvalidOperand;
8651
8652 // If the kind is a token for a literal immediate, check if our asm operand
8653 // matches. This is for InstAliases which have a fixed-value immediate in
8654 // the asm string, such as hints which are parsed into a specific
8655 // instruction definition.
8656#define MATCH_HASH(N) \
8657 case MCK__HASH_##N: \
8658 return MatchesOpImmediate(N);
8659 MATCH_HASH(0)
8660 MATCH_HASH(1)
8661 MATCH_HASH(2)
8662 MATCH_HASH(3)
8663 MATCH_HASH(4)
8664 MATCH_HASH(6)
8665 MATCH_HASH(7)
8666 MATCH_HASH(8)
8667 MATCH_HASH(10)
8668 MATCH_HASH(12)
8669 MATCH_HASH(14)
8670 MATCH_HASH(16)
8671 MATCH_HASH(24)
8672 MATCH_HASH(25)
8673 MATCH_HASH(26)
8674 MATCH_HASH(27)
8675 MATCH_HASH(28)
8676 MATCH_HASH(29)
8677 MATCH_HASH(30)
8678 MATCH_HASH(31)
8679 MATCH_HASH(32)
8680 MATCH_HASH(40)
8681 MATCH_HASH(48)
8682 MATCH_HASH(64)
8683#undef MATCH_HASH
8684#define MATCH_HASH_MINUS(N) \
8685 case MCK__HASH__MINUS_##N: \
8686 return MatchesOpImmediate(-N);
8690#undef MATCH_HASH_MINUS
8691 }
8692}
8693
8694ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
8695
8696 SMLoc S = getLoc();
8697
8698 if (getTok().isNot(AsmToken::Identifier))
8699 return Error(S, "expected register");
8700
8701 MCRegister FirstReg;
8702 ParseStatus Res = tryParseScalarRegister(FirstReg);
8703 if (!Res.isSuccess())
8704 return Error(S, "expected first even register of a consecutive same-size "
8705 "even/odd register pair");
8706
8707 const MCRegisterClass &WRegClass =
8708 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
8709 const MCRegisterClass &XRegClass =
8710 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
8711
8712 bool isXReg = XRegClass.contains(FirstReg),
8713 isWReg = WRegClass.contains(FirstReg);
8714 if (!isXReg && !isWReg)
8715 return Error(S, "expected first even register of a consecutive same-size "
8716 "even/odd register pair");
8717
8718 const MCRegisterInfo *RI = getContext().getRegisterInfo();
8719 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
8720
8721 if (FirstEncoding & 0x1)
8722 return Error(S, "expected first even register of a consecutive same-size "
8723 "even/odd register pair");
8724
8725 if (getTok().isNot(AsmToken::Comma))
8726 return Error(getLoc(), "expected comma");
8727 // Eat the comma
8728 Lex();
8729
8730 SMLoc E = getLoc();
8731 MCRegister SecondReg;
8732 Res = tryParseScalarRegister(SecondReg);
8733 if (!Res.isSuccess())
8734 return Error(E, "expected second odd register of a consecutive same-size "
8735 "even/odd register pair");
8736
8737 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
8738 (isXReg && !XRegClass.contains(SecondReg)) ||
8739 (isWReg && !WRegClass.contains(SecondReg)))
8740 return Error(E, "expected second odd register of a consecutive same-size "
8741 "even/odd register pair");
8742
8743 MCRegister Pair;
8744 if (isXReg) {
8745 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
8746 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
8747 } else {
8748 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
8749 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
8750 }
8751
8752 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
8753 getLoc(), getContext()));
8754
8755 return ParseStatus::Success;
8756}
8757
8758template <bool ParseShiftExtend, bool ParseSuffix>
8759ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
8760 const SMLoc S = getLoc();
8761 // Check for a SVE vector register specifier first.
8762 MCRegister RegNum;
8763 StringRef Kind;
8764
8765 ParseStatus Res =
8766 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8767
8768 if (!Res.isSuccess())
8769 return Res;
8770
8771 if (ParseSuffix && Kind.empty())
8772 return ParseStatus::NoMatch;
8773
8774 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
8775 if (!KindRes)
8776 return ParseStatus::NoMatch;
8777
8778 unsigned ElementWidth = KindRes->second;
8779
8780 // No shift/extend is the default.
8781 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
8782 Operands.push_back(AArch64Operand::CreateVectorReg(
8783 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
8784
8785 ParseStatus Res = tryParseVectorIndex(Operands);
8786 if (Res.isFailure())
8787 return ParseStatus::Failure;
8788 return ParseStatus::Success;
8789 }
8790
8791 // Eat the comma
8792 Lex();
8793
8794 // Match the shift
8796 Res = tryParseOptionalShiftExtend(ExtOpnd);
8797 if (!Res.isSuccess())
8798 return Res;
8799
8800 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
8801 Operands.push_back(AArch64Operand::CreateVectorReg(
8802 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
8803 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
8804 Ext->hasShiftExtendAmount()));
8805
8806 return ParseStatus::Success;
8807}
8808
8809ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
8810 MCAsmParser &Parser = getParser();
8811
8812 SMLoc SS = getLoc();
8813 const AsmToken &TokE = getTok();
8814 bool IsHash = TokE.is(AsmToken::Hash);
8815
8816 if (!IsHash && TokE.isNot(AsmToken::Identifier))
8817 return ParseStatus::NoMatch;
8818
8819 int64_t Pattern;
8820 if (IsHash) {
8821 Lex(); // Eat hash
8822
8823 // Parse the immediate operand.
8824 const MCExpr *ImmVal;
8825 SS = getLoc();
8826 if (Parser.parseExpression(ImmVal))
8827 return ParseStatus::Failure;
8828
8829 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
8830 if (!MCE)
8831 return TokError("invalid operand for instruction");
8832
8833 Pattern = MCE->getValue();
8834 } else {
8835 // Parse the pattern
8836 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
8837 if (!Pat)
8838 return ParseStatus::NoMatch;
8839
8840 Lex();
8841 Pattern = Pat->Encoding;
8842 assert(Pattern >= 0 && Pattern < 32);
8843 }
8844
8845 Operands.push_back(
8846 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8847 SS, getLoc(), getContext()));
8848
8849 return ParseStatus::Success;
8850}
8851
8852ParseStatus
8853AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8854 int64_t Pattern;
8855 SMLoc SS = getLoc();
8856 const AsmToken &TokE = getTok();
8857 // Parse the pattern
8858 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8859 TokE.getString());
8860 if (!Pat)
8861 return ParseStatus::NoMatch;
8862
8863 Lex();
8864 Pattern = Pat->Encoding;
8865 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8866
8867 Operands.push_back(
8868 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8869 SS, getLoc(), getContext()));
8870
8871 return ParseStatus::Success;
8872}
8873
8874ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8875 SMLoc SS = getLoc();
8876
8877 MCRegister XReg;
8878 if (!tryParseScalarRegister(XReg).isSuccess())
8879 return ParseStatus::NoMatch;
8880
8881 MCContext &ctx = getContext();
8882 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8883 MCRegister X8Reg = RI->getMatchingSuperReg(
8884 XReg, AArch64::x8sub_0,
8885 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8886 if (!X8Reg)
8887 return Error(SS,
8888 "expected an even-numbered x-register in the range [x0,x22]");
8889
8890 Operands.push_back(
8891 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
8892 return ParseStatus::Success;
8893}
8894
8895ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8896 SMLoc S = getLoc();
8897
8898 if (getTok().isNot(AsmToken::Integer))
8899 return ParseStatus::NoMatch;
8900
8901 if (getLexer().peekTok().isNot(AsmToken::Colon))
8902 return ParseStatus::NoMatch;
8903
8904 const MCExpr *ImmF;
8905 if (getParser().parseExpression(ImmF))
8906 return ParseStatus::NoMatch;
8907
8908 if (getTok().isNot(AsmToken::Colon))
8909 return ParseStatus::NoMatch;
8910
8911 Lex(); // Eat ':'
8912 if (getTok().isNot(AsmToken::Integer))
8913 return ParseStatus::NoMatch;
8914
8915 SMLoc E = getTok().getLoc();
8916 const MCExpr *ImmL;
8917 if (getParser().parseExpression(ImmL))
8918 return ParseStatus::NoMatch;
8919
8920 unsigned ImmFVal = cast<MCConstantExpr>(ImmF)->getValue();
8921 unsigned ImmLVal = cast<MCConstantExpr>(ImmL)->getValue();
8922
8923 Operands.push_back(
8924 AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext()));
8925 return ParseStatus::Success;
8926}
8927
8928template <int Adj>
8929ParseStatus AArch64AsmParser::tryParseAdjImm0_63(OperandVector &Operands) {
8930 SMLoc S = getLoc();
8931
8932 parseOptionalToken(AsmToken::Hash);
8933 bool IsNegative = parseOptionalToken(AsmToken::Minus);
8934
8935 if (getTok().isNot(AsmToken::Integer))
8936 return ParseStatus::NoMatch;
8937
8938 const MCExpr *Ex;
8939 if (getParser().parseExpression(Ex))
8940 return ParseStatus::NoMatch;
8941
8942 int64_t Imm = dyn_cast<MCConstantExpr>(Ex)->getValue();
8943 if (IsNegative)
8944 Imm = -Imm;
8945
8946 // We want an adjusted immediate in the range [0, 63]. If we don't have one,
8947 // return a value, which is certain to trigger a error message about invalid
8948 // immediate range instead of a non-descriptive invalid operand error.
8949 static_assert(Adj == 1 || Adj == -1, "Unsafe immediate adjustment");
8950 if (Imm == INT64_MIN || Imm == INT64_MAX || Imm + Adj < 0 || Imm + Adj > 63)
8951 Imm = -2;
8952 else
8953 Imm += Adj;
8954
8955 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
8956 Operands.push_back(AArch64Operand::CreateImm(
8958
8959 return ParseStatus::Success;
8960}
static bool isGPR64(unsigned Reg, unsigned SubReg, const MachineRegisterInfo *MRI)
#define MATCH_HASH_MINUS(N)
static unsigned matchSVEDataVectorRegName(StringRef Name)
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind)
static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo, SmallVector< StringRef, 4 > &RequestedExtensions)
static unsigned matchSVEPredicateAsCounterRegName(StringRef Name)
static MCRegister MatchRegisterName(StringRef Name)
static bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg)
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser()
Force static initialization.
static const char * getSubtargetFeatureName(uint64_t Val)
static unsigned MatchNeonVectorRegName(StringRef Name)
}
static std::optional< std::pair< int, int > > parseVectorKind(StringRef Suffix, RegKind VectorKind)
Returns an optional pair of (elements, element-width) if Suffix is a valid vector kind.
static unsigned matchMatrixRegName(StringRef Name)
static bool isMovPrfxable(unsigned TSFlags)
static unsigned matchMatrixTileListRegName(StringRef Name)
static std::string AArch64MnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static SMLoc incrementLoc(SMLoc L, int Offset)
#define MATCH_HASH(N)
static const struct Extension ExtensionMap[]
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
static unsigned matchSVEPredicateVectorRegName(StringRef Name)
static AArch64CC::CondCode parseCondCode(ArrayRef< MachineOperand > Cond)
static SDValue getCondCode(SelectionDAG &DAG, AArch64CC::CondCode CC)
Like SelectionDAG::getCondCode(), but for AArch64 condition codes.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
#define X(NUM, ENUM, NAME)
Definition ELF.h:851
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
@ Default
Value * getPointer(Value *Ptr)
static LVOptions Options
Definition LVOptions.cpp:25
Live Register Matrix
loop data Loop Data Prefetch
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
#define T
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:483
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
APInt bitcastToAPInt() const
Definition APFloat.h:1408
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition APInt.h:436
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition APInt.h:433
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1585
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition AsmLexer.h:121
void UnLex(AsmToken const &Token)
Definition AsmLexer.h:106
LLVM_ABI SMLoc getLoc() const
Definition AsmLexer.cpp:31
int64_t getIntVal() const
Definition MCAsmMacro.h:108
bool isNot(TokenKind K) const
Definition MCAsmMacro.h:76
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition MCAsmMacro.h:103
bool is(TokenKind K) const
Definition MCAsmMacro.h:75
LLVM_ABI SMLoc getEndLoc() const
Definition AsmLexer.cpp:33
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Definition MCAsmMacro.h:92
Base class for user error types.
Definition Error.h:354
Container class for subtarget features.
constexpr size_t size() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
void printExpr(raw_ostream &, const MCExpr &) const
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
AsmLexer & getLexer()
const AsmToken & getTok() const
Get the current AsmToken from the stream.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
static LLVM_ABI const MCBinaryExpr * create(Opcode Op, const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:201
@ Sub
Subtraction.
Definition MCExpr.h:324
@ Add
Addition.
Definition MCExpr.h:302
int64_t getValue() const
Definition MCExpr.h:171
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
const MCRegisterInfo * getRegisterInfo() const
Definition MCContext.h:411
LLVM_ABI bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm) const
Try to evaluate the expression to a relocatable value, i.e.
Definition MCExpr.cpp:450
SMLoc getLoc() const
Definition MCExpr.h:86
unsigned getNumOperands() const
Definition MCInst.h:212
void setLoc(SMLoc loc)
Definition MCInst.h:207
unsigned getOpcode() const
Definition MCInst.h:202
void addOperand(const MCOperand Op)
Definition MCInst.h:215
void setOpcode(unsigned Op)
Definition MCInst.h:201
const MCOperand & getOperand(unsigned i) const
Definition MCInst.h:210
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
int64_t getImm() const
Definition MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
bool isImm() const
Definition MCInst.h:66
bool isReg() const
Definition MCInst.h:65
MCRegister getReg() const
Returns the register number.
Definition MCInst.h:73
const MCExpr * getExpr() const
Definition MCInst.h:118
bool isExpr() const
Definition MCInst.h:69
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual MCRegister getReg() const =0
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
constexpr unsigned id() const
Definition MCRegister.h:82
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:743
Streaming machine code generation interface.
Definition MCStreamer.h:222
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
MCTargetStreamer * getTargetStreamer()
Definition MCStreamer.h:333
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
const FeatureBitset & ClearFeatureBitsTransitively(const FeatureBitset &FB)
const FeatureBitset & SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
VariantKind getKind() const
Definition MCExpr.h:232
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual bool areEqualRegs(const MCParsedAsmOperand &Op1, const MCParsedAsmOperand &Op2) const
Returns whether two operands are registers and are equal.
const MCSymbol * getAddSym() const
Definition MCValue.h:49
int64_t getConstant() const
Definition MCValue.h:44
uint32_t getSpecifier() const
Definition MCValue.h:46
const MCSymbol * getSubSym() const
Definition MCValue.h:51
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
constexpr bool isNoMatch() const
constexpr unsigned id() const
Definition Register.h:100
Represents a location in source code.
Definition SMLoc.h:22
static SMLoc getFromPointer(const char *Ptr)
Definition SMLoc.h:35
constexpr const char * getPointer() const
Definition SMLoc.h:33
void insert_range(Range &&R)
Definition SmallSet.h:196
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:229
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:184
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
iterator end()
Definition StringMap.h:224
iterator find(StringRef Key)
Definition StringMap.h:237
void erase(iterator I)
Definition StringMap.h:427
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
Definition StringMap.h:321
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:730
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:490
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:258
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:140
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
Definition StringRef.h:629
LLVM_ABI std::string upper() const
Convert the given ASCII string to uppercase.
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:143
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:137
StringRef take_back(size_t N=1) const
Return a StringRef equal to 'this' but with only the last N elements remaining.
Definition StringRef.h:609
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition StringRef.h:844
LLVM_ABI std::string lower() const
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
Definition StringRef.h:169
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition Triple.h:787
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define INT64_MIN
Definition DataTypes.h:74
#define INT64_MAX
Definition DataTypes.h:71
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SubsectionType getTypeID(StringRef Type)
StringRef getVendorName(unsigned const Vendor)
StringRef getOptionalStr(unsigned Optional)
VendorID
AArch64 build attributes vendors IDs (a.k.a subsection name)
SubsectionOptional getOptionalID(StringRef Optional)
FeatureAndBitsTags getFeatureAndBitsTagsID(StringRef FeatureAndBitsTag)
VendorID getVendorID(StringRef const Vendor)
PauthABITags getPauthABITagsID(StringRef PauthABITag)
StringRef getTypeStr(unsigned Type)
static CondCode getInvertedCondCode(CondCode Code)
const PHint * lookupPHintByName(StringRef)
uint32_t parseGenericRegister(StringRef Name)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static bool isSVEAddSubImm(int64_t Imm)
Returns true if Imm is valid for ADD/SUB.
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static float getFPImmFloat(unsigned Imm)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
static bool isSVECpyImm(int64_t Imm)
Returns true if Imm is valid for CPY/DUP.
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
Specifier parsePercentSpecifierName(StringRef)
LLVM_ABI const ArchInfo * parseArch(StringRef Arch)
LLVM_ABI const ArchInfo * getArchForCpu(StringRef CPU)
LLVM_ABI bool getExtensionFeatures(const AArch64::ExtensionBitset &Extensions, std::vector< StringRef > &Features)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool isPredicated(const MCInst &MI, const MCInstrInfo *MCII)
@ Entry
Definition COFF.h:862
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
float getFPImm(unsigned Imm)
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
constexpr double e
NodeAddr< CodeNode * > Code
Definition RDFGraph.h:388
Context & getContext() const
Definition BasicBlock.h:99
This is an optimization pass for GlobalISel generic memory operations.
static std::optional< AArch64PACKey::ID > AArch64StringToPACKeyID(StringRef Name)
Return numeric key ID for 2-letter identifier string.
bool errorToBool(Error Err)
Helper for converting an Error to a bool.
Definition Error.h:1129
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
static int MCLOHNameToId(StringRef Name)
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
static bool isMem(const MachineInstr &MI, unsigned Op)
LLVM_ABI std::pair< StringRef, StringRef > getToken(StringRef Source, StringRef Delimiters=" \t\n\v\f\r")
getToken - This function extracts one token from source, ignoring any leading characters that appear ...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
Target & getTheAArch64beTarget()
static StringRef MCLOHDirectiveName()
std::string utostr(uint64_t X, bool isNeg=false)
static bool isValidMCLOHType(unsigned Kind)
Op::Description Desc
Target & getTheAArch64leTarget()
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:204
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
SmallVectorImpl< std::unique_ptr< MCParsedAsmOperand > > OperandVector
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
Target & getTheAArch64_32Target()
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
Target & getTheARM64_32Target()
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
static int MCLOHIdToNbArgs(MCLOHType Kind)
std::string join(IteratorT Begin, IteratorT End, StringRef Separator)
Joins the strings in the range [Begin, End), adding Separator between the elements.
static MCRegister getXRegFromWReg(MCRegister Reg)
MCLOHType
Linker Optimization Hint Type.
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
Target & getTheARM64Target()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static MCRegister getWRegFromXReg(MCRegister Reg)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1772
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
#define N
const FeatureBitset Features
const char * Name
AArch64::ExtensionBitset DefaultExts
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
bool haveFeatures(FeatureBitset ActiveFeatures) const
FeatureBitset getRequiredFeatures() const
const char * Name
bool haveFeatures(FeatureBitset ActiveFeatures) const