LLVM  15.0.0git
ARMAsmParser.cpp
Go to the documentation of this file.
1 //===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMBaseInstrInfo.h"
10 #include "ARMFeatures.h"
14 #include "MCTargetDesc/ARMMCExpr.h"
17 #include "Utils/ARMBaseInfo.h"
18 #include "llvm/ADT/APFloat.h"
19 #include "llvm/ADT/APInt.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringMap.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/StringSet.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/Triple.h"
29 #include "llvm/ADT/Twine.h"
30 #include "llvm/MC/MCContext.h"
31 #include "llvm/MC/MCExpr.h"
32 #include "llvm/MC/MCInst.h"
33 #include "llvm/MC/MCInstrDesc.h"
34 #include "llvm/MC/MCInstrInfo.h"
41 #include "llvm/MC/MCRegisterInfo.h"
42 #include "llvm/MC/MCSection.h"
43 #include "llvm/MC/MCStreamer.h"
45 #include "llvm/MC/MCSymbol.h"
47 #include "llvm/MC/TargetRegistry.h"
49 #include "llvm/Support/ARMEHABI.h"
50 #include "llvm/Support/Casting.h"
52 #include "llvm/Support/Compiler.h"
55 #include "llvm/Support/SMLoc.h"
58 #include <algorithm>
59 #include <cassert>
60 #include <cstddef>
61 #include <cstdint>
62 #include <iterator>
63 #include <limits>
64 #include <memory>
65 #include <string>
66 #include <utility>
67 #include <vector>
68 
69 #define DEBUG_TYPE "asm-parser"
70 
71 using namespace llvm;
72 
73 namespace llvm {
74 extern const MCInstrDesc ARMInsts[];
75 } // end namespace llvm
76 
77 namespace {
78 
79 enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
80 
81 static cl::opt<ImplicitItModeTy> ImplicitItMode(
82  "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
83  cl::desc("Allow conditional instructions outdside of an IT block"),
84  cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
85  "Accept in both ISAs, emit implicit ITs in Thumb"),
86  clEnumValN(ImplicitItModeTy::Never, "never",
87  "Warn in ARM, reject in Thumb"),
88  clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
89  "Accept in ARM, reject in Thumb"),
90  clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
91  "Warn in ARM, emit implicit ITs in Thumb")));
92 
93 static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
94  cl::init(false));
95 
96 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
97 
98 static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
99  // Position==0 means we're not in an IT block at all. Position==1
100  // means we want the first state bit, which is always 0 (Then).
101  // Position==2 means we want the second state bit, stored at bit 3
102  // of Mask, and so on downwards. So (5 - Position) will shift the
103  // right bit down to bit 0, including the always-0 bit at bit 4 for
104  // the mandatory initial Then.
105  return (Mask >> (5 - Position) & 1);
106 }
107 
108 class UnwindContext {
109  using Locs = SmallVector<SMLoc, 4>;
110 
111  MCAsmParser &Parser;
112  Locs FnStartLocs;
113  Locs CantUnwindLocs;
114  Locs PersonalityLocs;
115  Locs PersonalityIndexLocs;
116  Locs HandlerDataLocs;
117  int FPReg;
118 
119 public:
120  UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
121 
122  bool hasFnStart() const { return !FnStartLocs.empty(); }
123  bool cantUnwind() const { return !CantUnwindLocs.empty(); }
124  bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
125 
126  bool hasPersonality() const {
127  return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
128  }
129 
130  void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
131  void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
132  void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
133  void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
134  void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
135 
136  void saveFPReg(int Reg) { FPReg = Reg; }
137  int getFPReg() const { return FPReg; }
138 
139  void emitFnStartLocNotes() const {
140  for (const SMLoc &Loc : FnStartLocs)
141  Parser.Note(Loc, ".fnstart was specified here");
142  }
143 
144  void emitCantUnwindLocNotes() const {
145  for (const SMLoc &Loc : CantUnwindLocs)
146  Parser.Note(Loc, ".cantunwind was specified here");
147  }
148 
149  void emitHandlerDataLocNotes() const {
150  for (const SMLoc &Loc : HandlerDataLocs)
151  Parser.Note(Loc, ".handlerdata was specified here");
152  }
153 
154  void emitPersonalityLocNotes() const {
155  for (Locs::const_iterator PI = PersonalityLocs.begin(),
156  PE = PersonalityLocs.end(),
157  PII = PersonalityIndexLocs.begin(),
158  PIE = PersonalityIndexLocs.end();
159  PI != PE || PII != PIE;) {
160  if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
161  Parser.Note(*PI++, ".personality was specified here");
162  else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
163  Parser.Note(*PII++, ".personalityindex was specified here");
164  else
165  llvm_unreachable(".personality and .personalityindex cannot be "
166  "at the same location");
167  }
168  }
169 
170  void reset() {
171  FnStartLocs = Locs();
172  CantUnwindLocs = Locs();
173  PersonalityLocs = Locs();
174  HandlerDataLocs = Locs();
175  PersonalityIndexLocs = Locs();
176  FPReg = ARM::SP;
177  }
178 };
179 
180 // Various sets of ARM instruction mnemonics which are used by the asm parser
181 class ARMMnemonicSets {
182  StringSet<> CDE;
183  StringSet<> CDEWithVPTSuffix;
184 public:
185  ARMMnemonicSets(const MCSubtargetInfo &STI);
186 
187  /// Returns true iff a given mnemonic is a CDE instruction
188  bool isCDEInstr(StringRef Mnemonic) {
189  // Quick check before searching the set
190  if (!Mnemonic.startswith("cx") && !Mnemonic.startswith("vcx"))
191  return false;
192  return CDE.count(Mnemonic);
193  }
194 
195  /// Returns true iff a given mnemonic is a VPT-predicable CDE instruction
196  /// (possibly with a predication suffix "e" or "t")
197  bool isVPTPredicableCDEInstr(StringRef Mnemonic) {
198  if (!Mnemonic.startswith("vcx"))
199  return false;
200  return CDEWithVPTSuffix.count(Mnemonic);
201  }
202 
203  /// Returns true iff a given mnemonic is an IT-predicable CDE instruction
204  /// (possibly with a condition suffix)
205  bool isITPredicableCDEInstr(StringRef Mnemonic) {
206  if (!Mnemonic.startswith("cx"))
207  return false;
208  return Mnemonic.startswith("cx1a") || Mnemonic.startswith("cx1da") ||
209  Mnemonic.startswith("cx2a") || Mnemonic.startswith("cx2da") ||
210  Mnemonic.startswith("cx3a") || Mnemonic.startswith("cx3da");
211  }
212 
213  /// Return true iff a given mnemonic is an integer CDE instruction with
214  /// dual-register destination
215  bool isCDEDualRegInstr(StringRef Mnemonic) {
216  if (!Mnemonic.startswith("cx"))
217  return false;
218  return Mnemonic == "cx1d" || Mnemonic == "cx1da" ||
219  Mnemonic == "cx2d" || Mnemonic == "cx2da" ||
220  Mnemonic == "cx3d" || Mnemonic == "cx3da";
221  }
222 };
223 
224 ARMMnemonicSets::ARMMnemonicSets(const MCSubtargetInfo &STI) {
225  for (StringRef Mnemonic: { "cx1", "cx1a", "cx1d", "cx1da",
226  "cx2", "cx2a", "cx2d", "cx2da",
227  "cx3", "cx3a", "cx3d", "cx3da", })
228  CDE.insert(Mnemonic);
229  for (StringRef Mnemonic :
230  {"vcx1", "vcx1a", "vcx2", "vcx2a", "vcx3", "vcx3a"}) {
231  CDE.insert(Mnemonic);
232  CDEWithVPTSuffix.insert(Mnemonic);
233  CDEWithVPTSuffix.insert(std::string(Mnemonic) + "t");
234  CDEWithVPTSuffix.insert(std::string(Mnemonic) + "e");
235  }
236 }
237 
238 class ARMAsmParser : public MCTargetAsmParser {
239  const MCRegisterInfo *MRI;
240  UnwindContext UC;
241  ARMMnemonicSets MS;
242 
243  ARMTargetStreamer &getTargetStreamer() {
244  assert(getParser().getStreamer().getTargetStreamer() &&
245  "do not have a target streamer");
246  MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
247  return static_cast<ARMTargetStreamer &>(TS);
248  }
249 
250  // Map of register aliases registers via the .req directive.
251  StringMap<unsigned> RegisterReqs;
252 
253  bool NextSymbolIsThumb;
254 
255  bool useImplicitITThumb() const {
256  return ImplicitItMode == ImplicitItModeTy::Always ||
257  ImplicitItMode == ImplicitItModeTy::ThumbOnly;
258  }
259 
260  bool useImplicitITARM() const {
261  return ImplicitItMode == ImplicitItModeTy::Always ||
262  ImplicitItMode == ImplicitItModeTy::ARMOnly;
263  }
264 
265  struct {
266  ARMCC::CondCodes Cond; // Condition for IT block.
267  unsigned Mask:4; // Condition mask for instructions.
268  // Starting at first 1 (from lsb).
269  // '1' condition as indicated in IT.
270  // '0' inverse of condition (else).
271  // Count of instructions in IT block is
272  // 4 - trailingzeroes(mask)
273  // Note that this does not have the same encoding
274  // as in the IT instruction, which also depends
275  // on the low bit of the condition code.
276 
277  unsigned CurPosition; // Current position in parsing of IT
278  // block. In range [0,4], with 0 being the IT
279  // instruction itself. Initialized according to
280  // count of instructions in block. ~0U if no
281  // active IT block.
282 
283  bool IsExplicit; // true - The IT instruction was present in the
284  // input, we should not modify it.
285  // false - The IT instruction was added
286  // implicitly, we can extend it if that
287  // would be legal.
288  } ITState;
289 
290  SmallVector<MCInst, 4> PendingConditionalInsts;
291 
292  void flushPendingInstructions(MCStreamer &Out) override {
293  if (!inImplicitITBlock()) {
294  assert(PendingConditionalInsts.size() == 0);
295  return;
296  }
297 
298  // Emit the IT instruction
299  MCInst ITInst;
300  ITInst.setOpcode(ARM::t2IT);
301  ITInst.addOperand(MCOperand::createImm(ITState.Cond));
302  ITInst.addOperand(MCOperand::createImm(ITState.Mask));
303  Out.emitInstruction(ITInst, getSTI());
304 
305  // Emit the conditonal instructions
306  assert(PendingConditionalInsts.size() <= 4);
307  for (const MCInst &Inst : PendingConditionalInsts) {
308  Out.emitInstruction(Inst, getSTI());
309  }
310  PendingConditionalInsts.clear();
311 
312  // Clear the IT state
313  ITState.Mask = 0;
314  ITState.CurPosition = ~0U;
315  }
316 
317  bool inITBlock() { return ITState.CurPosition != ~0U; }
318  bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
319  bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
320 
321  bool lastInITBlock() {
322  return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
323  }
324 
325  void forwardITPosition() {
326  if (!inITBlock()) return;
327  // Move to the next instruction in the IT block, if there is one. If not,
328  // mark the block as done, except for implicit IT blocks, which we leave
329  // open until we find an instruction that can't be added to it.
330  unsigned TZ = countTrailingZeros(ITState.Mask);
331  if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
332  ITState.CurPosition = ~0U; // Done with the IT block after this.
333  }
334 
335  // Rewind the state of the current IT block, removing the last slot from it.
336  void rewindImplicitITPosition() {
337  assert(inImplicitITBlock());
338  assert(ITState.CurPosition > 1);
339  ITState.CurPosition--;
340  unsigned TZ = countTrailingZeros(ITState.Mask);
341  unsigned NewMask = 0;
342  NewMask |= ITState.Mask & (0xC << TZ);
343  NewMask |= 0x2 << TZ;
344  ITState.Mask = NewMask;
345  }
346 
347  // Rewind the state of the current IT block, removing the last slot from it.
348  // If we were at the first slot, this closes the IT block.
349  void discardImplicitITBlock() {
350  assert(inImplicitITBlock());
351  assert(ITState.CurPosition == 1);
352  ITState.CurPosition = ~0U;
353  }
354 
355  // Return the low-subreg of a given Q register.
356  unsigned getDRegFromQReg(unsigned QReg) const {
357  return MRI->getSubReg(QReg, ARM::dsub_0);
358  }
359 
360  // Get the condition code corresponding to the current IT block slot.
361  ARMCC::CondCodes currentITCond() {
362  unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
363  return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond;
364  }
365 
366  // Invert the condition of the current IT block slot without changing any
367  // other slots in the same block.
368  void invertCurrentITCondition() {
369  if (ITState.CurPosition == 1) {
370  ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
371  } else {
372  ITState.Mask ^= 1 << (5 - ITState.CurPosition);
373  }
374  }
375 
376  // Returns true if the current IT block is full (all 4 slots used).
377  bool isITBlockFull() {
378  return inITBlock() && (ITState.Mask & 1);
379  }
380 
381  // Extend the current implicit IT block to have one more slot with the given
382  // condition code.
383  void extendImplicitITBlock(ARMCC::CondCodes Cond) {
384  assert(inImplicitITBlock());
385  assert(!isITBlockFull());
386  assert(Cond == ITState.Cond ||
387  Cond == ARMCC::getOppositeCondition(ITState.Cond));
388  unsigned TZ = countTrailingZeros(ITState.Mask);
389  unsigned NewMask = 0;
390  // Keep any existing condition bits.
391  NewMask |= ITState.Mask & (0xE << TZ);
392  // Insert the new condition bit.
393  NewMask |= (Cond != ITState.Cond) << TZ;
394  // Move the trailing 1 down one bit.
395  NewMask |= 1 << (TZ - 1);
396  ITState.Mask = NewMask;
397  }
398 
399  // Create a new implicit IT block with a dummy condition code.
400  void startImplicitITBlock() {
401  assert(!inITBlock());
402  ITState.Cond = ARMCC::AL;
403  ITState.Mask = 8;
404  ITState.CurPosition = 1;
405  ITState.IsExplicit = false;
406  }
407 
408  // Create a new explicit IT block with the given condition and mask.
409  // The mask should be in the format used in ARMOperand and
410  // MCOperand, with a 1 implying 'e', regardless of the low bit of
411  // the condition.
412  void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
413  assert(!inITBlock());
414  ITState.Cond = Cond;
415  ITState.Mask = Mask;
416  ITState.CurPosition = 0;
417  ITState.IsExplicit = true;
418  }
419 
420  struct {
421  unsigned Mask : 4;
422  unsigned CurPosition;
423  } VPTState;
424  bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
425  void forwardVPTPosition() {
426  if (!inVPTBlock()) return;
427  unsigned TZ = countTrailingZeros(VPTState.Mask);
428  if (++VPTState.CurPosition == 5 - TZ)
429  VPTState.CurPosition = ~0U;
430  }
431 
432  void Note(SMLoc L, const Twine &Msg, SMRange Range = None) {
433  return getParser().Note(L, Msg, Range);
434  }
435 
436  bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) {
437  return getParser().Warning(L, Msg, Range);
438  }
439 
440  bool Error(SMLoc L, const Twine &Msg, SMRange Range = None) {
441  return getParser().Error(L, Msg, Range);
442  }
443 
444  bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
445  unsigned ListNo, bool IsARPop = false);
446  bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
447  unsigned ListNo);
448 
449  int tryParseRegister();
450  bool tryParseRegisterWithWriteBack(OperandVector &);
451  int tryParseShiftRegister(OperandVector &);
452  bool parseRegisterList(OperandVector &, bool EnforceOrder = true,
453  bool AllowRAAC = false);
454  bool parseMemory(OperandVector &);
455  bool parseOperand(OperandVector &, StringRef Mnemonic);
456  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
457  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
458  unsigned &ShiftAmount);
459  bool parseLiteralValues(unsigned Size, SMLoc L);
460  bool parseDirectiveThumb(SMLoc L);
461  bool parseDirectiveARM(SMLoc L);
462  bool parseDirectiveThumbFunc(SMLoc L);
463  bool parseDirectiveCode(SMLoc L);
464  bool parseDirectiveSyntax(SMLoc L);
465  bool parseDirectiveReq(StringRef Name, SMLoc L);
466  bool parseDirectiveUnreq(SMLoc L);
467  bool parseDirectiveArch(SMLoc L);
468  bool parseDirectiveEabiAttr(SMLoc L);
469  bool parseDirectiveCPU(SMLoc L);
470  bool parseDirectiveFPU(SMLoc L);
471  bool parseDirectiveFnStart(SMLoc L);
472  bool parseDirectiveFnEnd(SMLoc L);
473  bool parseDirectiveCantUnwind(SMLoc L);
474  bool parseDirectivePersonality(SMLoc L);
475  bool parseDirectiveHandlerData(SMLoc L);
476  bool parseDirectiveSetFP(SMLoc L);
477  bool parseDirectivePad(SMLoc L);
478  bool parseDirectiveRegSave(SMLoc L, bool IsVector);
479  bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
480  bool parseDirectiveLtorg(SMLoc L);
481  bool parseDirectiveEven(SMLoc L);
482  bool parseDirectivePersonalityIndex(SMLoc L);
483  bool parseDirectiveUnwindRaw(SMLoc L);
484  bool parseDirectiveTLSDescSeq(SMLoc L);
485  bool parseDirectiveMovSP(SMLoc L);
486  bool parseDirectiveObjectArch(SMLoc L);
487  bool parseDirectiveArchExtension(SMLoc L);
488  bool parseDirectiveAlign(SMLoc L);
489  bool parseDirectiveThumbSet(SMLoc L);
490 
491  bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
492  StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
493  unsigned &PredicationCode,
494  unsigned &VPTPredicationCode, bool &CarrySetting,
495  unsigned &ProcessorIMod, StringRef &ITMask);
496  void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
497  StringRef FullInst, bool &CanAcceptCarrySet,
498  bool &CanAcceptPredicationCode,
499  bool &CanAcceptVPTPredicationCode);
500  bool enableArchExtFeature(StringRef Name, SMLoc &ExtLoc);
501 
502  void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
504  bool CDEConvertDualRegOperand(StringRef Mnemonic, OperandVector &Operands);
505 
506  bool isThumb() const {
507  // FIXME: Can tablegen auto-generate this?
508  return getSTI().getFeatureBits()[ARM::ModeThumb];
509  }
510 
511  bool isThumbOne() const {
512  return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
513  }
514 
515  bool isThumbTwo() const {
516  return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
517  }
518 
519  bool hasThumb() const {
520  return getSTI().getFeatureBits()[ARM::HasV4TOps];
521  }
522 
523  bool hasThumb2() const {
524  return getSTI().getFeatureBits()[ARM::FeatureThumb2];
525  }
526 
527  bool hasV6Ops() const {
528  return getSTI().getFeatureBits()[ARM::HasV6Ops];
529  }
530 
531  bool hasV6T2Ops() const {
532  return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
533  }
534 
535  bool hasV6MOps() const {
536  return getSTI().getFeatureBits()[ARM::HasV6MOps];
537  }
538 
539  bool hasV7Ops() const {
540  return getSTI().getFeatureBits()[ARM::HasV7Ops];
541  }
542 
543  bool hasV8Ops() const {
544  return getSTI().getFeatureBits()[ARM::HasV8Ops];
545  }
546 
547  bool hasV8MBaseline() const {
548  return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
549  }
550 
551  bool hasV8MMainline() const {
552  return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
553  }
554  bool hasV8_1MMainline() const {
555  return getSTI().getFeatureBits()[ARM::HasV8_1MMainlineOps];
556  }
557  bool hasMVE() const {
558  return getSTI().getFeatureBits()[ARM::HasMVEIntegerOps];
559  }
560  bool hasMVEFloat() const {
561  return getSTI().getFeatureBits()[ARM::HasMVEFloatOps];
562  }
563  bool hasCDE() const {
564  return getSTI().getFeatureBits()[ARM::HasCDEOps];
565  }
566  bool has8MSecExt() const {
567  return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
568  }
569 
570  bool hasARM() const {
571  return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
572  }
573 
574  bool hasDSP() const {
575  return getSTI().getFeatureBits()[ARM::FeatureDSP];
576  }
577 
578  bool hasD32() const {
579  return getSTI().getFeatureBits()[ARM::FeatureD32];
580  }
581 
582  bool hasV8_1aOps() const {
583  return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
584  }
585 
586  bool hasRAS() const {
587  return getSTI().getFeatureBits()[ARM::FeatureRAS];
588  }
589 
590  void SwitchMode() {
591  MCSubtargetInfo &STI = copySTI();
592  auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
593  setAvailableFeatures(FB);
594  }
595 
596  void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
597 
598  bool isMClass() const {
599  return getSTI().getFeatureBits()[ARM::FeatureMClass];
600  }
601 
602  /// @name Auto-generated Match Functions
603  /// {
604 
605 #define GET_ASSEMBLER_HEADER
606 #include "ARMGenAsmMatcher.inc"
607 
608  /// }
609 
610  OperandMatchResultTy parseITCondCode(OperandVector &);
611  OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
612  OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
613  OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
614  OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
615  OperandMatchResultTy parseTraceSyncBarrierOptOperand(OperandVector &);
616  OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
617  OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
618  OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
619  OperandMatchResultTy parseBankedRegOperand(OperandVector &);
620  OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
621  int High);
622  OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
623  return parsePKHImm(O, "lsl", 0, 31);
624  }
625  OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
626  return parsePKHImm(O, "asr", 1, 32);
627  }
628  OperandMatchResultTy parseSetEndImm(OperandVector &);
629  OperandMatchResultTy parseShifterImm(OperandVector &);
630  OperandMatchResultTy parseRotImm(OperandVector &);
631  OperandMatchResultTy parseModImm(OperandVector &);
632  OperandMatchResultTy parseBitfield(OperandVector &);
633  OperandMatchResultTy parsePostIdxReg(OperandVector &);
634  OperandMatchResultTy parseAM3Offset(OperandVector &);
635  OperandMatchResultTy parseFPImm(OperandVector &);
636  OperandMatchResultTy parseVectorList(OperandVector &);
637  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
638  SMLoc &EndLoc);
639 
640  // Asm Match Converter Methods
641  void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
642  void cvtThumbBranches(MCInst &Inst, const OperandVector &);
643  void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &);
644 
645  bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
646  bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
647  bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
648  bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
649  bool shouldOmitVectorPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
650  bool isITBlockTerminator(MCInst &Inst) const;
651  void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
652  bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
653  bool Load, bool ARMMode, bool Writeback);
654 
655 public:
656  enum ARMMatchResultTy {
657  Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
658  Match_RequiresNotITBlock,
659  Match_RequiresV6,
660  Match_RequiresThumb2,
661  Match_RequiresV8,
662  Match_RequiresFlagSetting,
663 #define GET_OPERAND_DIAGNOSTIC_TYPES
664 #include "ARMGenAsmMatcher.inc"
665 
666  };
667 
668  ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
669  const MCInstrInfo &MII, const MCTargetOptions &Options)
670  : MCTargetAsmParser(Options, STI, MII), UC(Parser), MS(STI) {
672 
673  // Cache the MCRegisterInfo.
674  MRI = getContext().getRegisterInfo();
675 
676  // Initialize the set of available features.
677  setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
678 
679  // Add build attributes based on the selected target.
680  if (AddBuildAttributes)
681  getTargetStreamer().emitTargetAttributes(STI);
682 
683  // Not in an ITBlock to start with.
684  ITState.CurPosition = ~0U;
685 
686  VPTState.CurPosition = ~0U;
687 
688  NextSymbolIsThumb = false;
689  }
690 
691  // Implementation of the MCTargetAsmParser interface:
692  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
693  OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
694  SMLoc &EndLoc) override;
695  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
696  SMLoc NameLoc, OperandVector &Operands) override;
697  bool ParseDirective(AsmToken DirectiveID) override;
698 
699  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
700  unsigned Kind) override;
701  unsigned checkTargetMatchPredicate(MCInst &Inst) override;
702 
703  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
706  bool MatchingInlineAsm) override;
707  unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
708  SmallVectorImpl<NearMissInfo> &NearMisses,
709  bool MatchingInlineAsm, bool &EmitInITBlock,
710  MCStreamer &Out);
711 
712  struct NearMissMessage {
713  SMLoc Loc;
714  SmallString<128> Message;
715  };
716 
717  const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
718 
719  void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
720  SmallVectorImpl<NearMissMessage> &NearMissesOut,
721  SMLoc IDLoc, OperandVector &Operands);
722  void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
724 
725  void doBeforeLabelEmit(MCSymbol *Symbol) override;
726 
727  void onLabelParsed(MCSymbol *Symbol) override;
728 };
729 
730 /// ARMOperand - Instances of this class represent a parsed ARM machine
731 /// operand.
732 class ARMOperand : public MCParsedAsmOperand {
733  enum KindTy {
734  k_CondCode,
735  k_VPTPred,
736  k_CCOut,
737  k_ITCondMask,
738  k_CoprocNum,
739  k_CoprocReg,
740  k_CoprocOption,
741  k_Immediate,
742  k_MemBarrierOpt,
743  k_InstSyncBarrierOpt,
744  k_TraceSyncBarrierOpt,
745  k_Memory,
746  k_PostIndexRegister,
747  k_MSRMask,
748  k_BankedReg,
749  k_ProcIFlags,
750  k_VectorIndex,
751  k_Register,
752  k_RegisterList,
753  k_RegisterListWithAPSR,
754  k_DPRRegisterList,
755  k_SPRRegisterList,
756  k_FPSRegisterListWithVPR,
757  k_FPDRegisterListWithVPR,
758  k_VectorList,
759  k_VectorListAllLanes,
760  k_VectorListIndexed,
761  k_ShiftedRegister,
762  k_ShiftedImmediate,
763  k_ShifterImmediate,
764  k_RotateImmediate,
765  k_ModifiedImmediate,
766  k_ConstantPoolImmediate,
767  k_BitfieldDescriptor,
768  k_Token,
769  } Kind;
770 
771  SMLoc StartLoc, EndLoc, AlignmentLoc;
773 
774  struct CCOp {
775  ARMCC::CondCodes Val;
776  };
777 
778  struct VCCOp {
779  ARMVCC::VPTCodes Val;
780  };
781 
782  struct CopOp {
783  unsigned Val;
784  };
785 
786  struct CoprocOptionOp {
787  unsigned Val;
788  };
789 
790  struct ITMaskOp {
791  unsigned Mask:4;
792  };
793 
794  struct MBOptOp {
795  ARM_MB::MemBOpt Val;
796  };
797 
798  struct ISBOptOp {
800  };
801 
802  struct TSBOptOp {
804  };
805 
806  struct IFlagsOp {
807  ARM_PROC::IFlags Val;
808  };
809 
810  struct MMaskOp {
811  unsigned Val;
812  };
813 
814  struct BankedRegOp {
815  unsigned Val;
816  };
817 
818  struct TokOp {
819  const char *Data;
820  unsigned Length;
821  };
822 
823  struct RegOp {
824  unsigned RegNum;
825  };
826 
827  // A vector register list is a sequential list of 1 to 4 registers.
828  struct VectorListOp {
829  unsigned RegNum;
830  unsigned Count;
831  unsigned LaneIndex;
832  bool isDoubleSpaced;
833  };
834 
835  struct VectorIndexOp {
836  unsigned Val;
837  };
838 
839  struct ImmOp {
840  const MCExpr *Val;
841  };
842 
843  /// Combined record for all forms of ARM address expressions.
844  struct MemoryOp {
845  unsigned BaseRegNum;
846  // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
847  // was specified.
848  const MCExpr *OffsetImm; // Offset immediate value
849  unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL
850  ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
851  unsigned ShiftImm; // shift for OffsetReg.
852  unsigned Alignment; // 0 = no alignment specified
853  // n = alignment in bytes (2, 4, 8, 16, or 32)
854  unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
855  };
856 
857  struct PostIdxRegOp {
858  unsigned RegNum;
859  bool isAdd;
860  ARM_AM::ShiftOpc ShiftTy;
861  unsigned ShiftImm;
862  };
863 
864  struct ShifterImmOp {
865  bool isASR;
866  unsigned Imm;
867  };
868 
869  struct RegShiftedRegOp {
870  ARM_AM::ShiftOpc ShiftTy;
871  unsigned SrcReg;
872  unsigned ShiftReg;
873  unsigned ShiftImm;
874  };
875 
876  struct RegShiftedImmOp {
877  ARM_AM::ShiftOpc ShiftTy;
878  unsigned SrcReg;
879  unsigned ShiftImm;
880  };
881 
882  struct RotImmOp {
883  unsigned Imm;
884  };
885 
886  struct ModImmOp {
887  unsigned Bits;
888  unsigned Rot;
889  };
890 
891  struct BitfieldOp {
892  unsigned LSB;
893  unsigned Width;
894  };
895 
896  union {
897  struct CCOp CC;
898  struct VCCOp VCC;
899  struct CopOp Cop;
900  struct CoprocOptionOp CoprocOption;
901  struct MBOptOp MBOpt;
902  struct ISBOptOp ISBOpt;
903  struct TSBOptOp TSBOpt;
904  struct ITMaskOp ITMask;
905  struct IFlagsOp IFlags;
906  struct MMaskOp MMask;
907  struct BankedRegOp BankedReg;
908  struct TokOp Tok;
909  struct RegOp Reg;
910  struct VectorListOp VectorList;
911  struct VectorIndexOp VectorIndex;
912  struct ImmOp Imm;
913  struct MemoryOp Memory;
914  struct PostIdxRegOp PostIdxReg;
915  struct ShifterImmOp ShifterImm;
916  struct RegShiftedRegOp RegShiftedReg;
917  struct RegShiftedImmOp RegShiftedImm;
918  struct RotImmOp RotImm;
919  struct ModImmOp ModImm;
920  struct BitfieldOp Bitfield;
921  };
922 
923 public:
924  ARMOperand(KindTy K) : Kind(K) {}
925 
926  /// getStartLoc - Get the location of the first token of this operand.
927  SMLoc getStartLoc() const override { return StartLoc; }
928 
929  /// getEndLoc - Get the location of the last token of this operand.
930  SMLoc getEndLoc() const override { return EndLoc; }
931 
932  /// getLocRange - Get the range between the first and last token of this
933  /// operand.
934  SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
935 
936  /// getAlignmentLoc - Get the location of the Alignment token of this operand.
937  SMLoc getAlignmentLoc() const {
938  assert(Kind == k_Memory && "Invalid access!");
939  return AlignmentLoc;
940  }
941 
942  ARMCC::CondCodes getCondCode() const {
943  assert(Kind == k_CondCode && "Invalid access!");
944  return CC.Val;
945  }
946 
947  ARMVCC::VPTCodes getVPTPred() const {
948  assert(isVPTPred() && "Invalid access!");
949  return VCC.Val;
950  }
951 
952  unsigned getCoproc() const {
953  assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
954  return Cop.Val;
955  }
956 
957  StringRef getToken() const {
958  assert(Kind == k_Token && "Invalid access!");
959  return StringRef(Tok.Data, Tok.Length);
960  }
961 
962  unsigned getReg() const override {
963  assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
964  return Reg.RegNum;
965  }
966 
967  const SmallVectorImpl<unsigned> &getRegList() const {
968  assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
969  Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
970  Kind == k_FPSRegisterListWithVPR ||
971  Kind == k_FPDRegisterListWithVPR) &&
972  "Invalid access!");
973  return Registers;
974  }
975 
976  const MCExpr *getImm() const {
977  assert(isImm() && "Invalid access!");
978  return Imm.Val;
979  }
980 
981  const MCExpr *getConstantPoolImm() const {
982  assert(isConstantPoolImm() && "Invalid access!");
983  return Imm.Val;
984  }
985 
986  unsigned getVectorIndex() const {
987  assert(Kind == k_VectorIndex && "Invalid access!");
988  return VectorIndex.Val;
989  }
990 
991  ARM_MB::MemBOpt getMemBarrierOpt() const {
992  assert(Kind == k_MemBarrierOpt && "Invalid access!");
993  return MBOpt.Val;
994  }
995 
996  ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
997  assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
998  return ISBOpt.Val;
999  }
1000 
1001  ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
1002  assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!");
1003  return TSBOpt.Val;
1004  }
1005 
1006  ARM_PROC::IFlags getProcIFlags() const {
1007  assert(Kind == k_ProcIFlags && "Invalid access!");
1008  return IFlags.Val;
1009  }
1010 
1011  unsigned getMSRMask() const {
1012  assert(Kind == k_MSRMask && "Invalid access!");
1013  return MMask.Val;
1014  }
1015 
1016  unsigned getBankedReg() const {
1017  assert(Kind == k_BankedReg && "Invalid access!");
1018  return BankedReg.Val;
1019  }
1020 
1021  bool isCoprocNum() const { return Kind == k_CoprocNum; }
1022  bool isCoprocReg() const { return Kind == k_CoprocReg; }
1023  bool isCoprocOption() const { return Kind == k_CoprocOption; }
1024  bool isCondCode() const { return Kind == k_CondCode; }
1025  bool isVPTPred() const { return Kind == k_VPTPred; }
1026  bool isCCOut() const { return Kind == k_CCOut; }
1027  bool isITMask() const { return Kind == k_ITCondMask; }
1028  bool isITCondCode() const { return Kind == k_CondCode; }
1029  bool isImm() const override {
1030  return Kind == k_Immediate;
1031  }
1032 
1033  bool isARMBranchTarget() const {
1034  if (!isImm()) return false;
1035 
1036  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1037  return CE->getValue() % 4 == 0;
1038  return true;
1039  }
1040 
1041 
1042  bool isThumbBranchTarget() const {
1043  if (!isImm()) return false;
1044 
1045  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1046  return CE->getValue() % 2 == 0;
1047  return true;
1048  }
1049 
1050  // checks whether this operand is an unsigned offset which fits is a field
1051  // of specified width and scaled by a specific number of bits
1052  template<unsigned width, unsigned scale>
1053  bool isUnsignedOffset() const {
1054  if (!isImm()) return false;
1055  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1056  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1057  int64_t Val = CE->getValue();
1058  int64_t Align = 1LL << scale;
1059  int64_t Max = Align * ((1LL << width) - 1);
1060  return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
1061  }
1062  return false;
1063  }
1064 
1065  // checks whether this operand is an signed offset which fits is a field
1066  // of specified width and scaled by a specific number of bits
1067  template<unsigned width, unsigned scale>
1068  bool isSignedOffset() const {
1069  if (!isImm()) return false;
1070  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1071  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1072  int64_t Val = CE->getValue();
1073  int64_t Align = 1LL << scale;
1074  int64_t Max = Align * ((1LL << (width-1)) - 1);
1075  int64_t Min = -Align * (1LL << (width-1));
1076  return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
1077  }
1078  return false;
1079  }
1080 
1081  // checks whether this operand is an offset suitable for the LE /
1082  // LETP instructions in Arm v8.1M
1083  bool isLEOffset() const {
1084  if (!isImm()) return false;
1085  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1086  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1087  int64_t Val = CE->getValue();
1088  return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1089  }
1090  return false;
1091  }
1092 
1093  // checks whether this operand is a memory operand computed as an offset
1094  // applied to PC. the offset may have 8 bits of magnitude and is represented
1095  // with two bits of shift. textually it may be either [pc, #imm], #imm or
1096  // relocable expression...
1097  bool isThumbMemPC() const {
1098  int64_t Val = 0;
1099  if (isImm()) {
1100  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1101  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1102  if (!CE) return false;
1103  Val = CE->getValue();
1104  }
1105  else if (isGPRMem()) {
1106  if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
1107  if(Memory.BaseRegNum != ARM::PC) return false;
1108  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
1109  Val = CE->getValue();
1110  else
1111  return false;
1112  }
1113  else return false;
1114  return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1115  }
1116 
1117  bool isFPImm() const {
1118  if (!isImm()) return false;
1119  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1120  if (!CE) return false;
1121  int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1122  return Val != -1;
1123  }
1124 
1125  template<int64_t N, int64_t M>
1126  bool isImmediate() const {
1127  if (!isImm()) return false;
1128  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1129  if (!CE) return false;
1130  int64_t Value = CE->getValue();
1131  return Value >= N && Value <= M;
1132  }
1133 
1134  template<int64_t N, int64_t M>
1135  bool isImmediateS4() const {
1136  if (!isImm()) return false;
1137  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1138  if (!CE) return false;
1139  int64_t Value = CE->getValue();
1140  return ((Value & 3) == 0) && Value >= N && Value <= M;
1141  }
1142  template<int64_t N, int64_t M>
1143  bool isImmediateS2() const {
1144  if (!isImm()) return false;
1145  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1146  if (!CE) return false;
1147  int64_t Value = CE->getValue();
1148  return ((Value & 1) == 0) && Value >= N && Value <= M;
1149  }
1150  bool isFBits16() const {
1151  return isImmediate<0, 17>();
1152  }
1153  bool isFBits32() const {
1154  return isImmediate<1, 33>();
1155  }
1156  bool isImm8s4() const {
1157  return isImmediateS4<-1020, 1020>();
1158  }
1159  bool isImm7s4() const {
1160  return isImmediateS4<-508, 508>();
1161  }
1162  bool isImm7Shift0() const {
1163  return isImmediate<-127, 127>();
1164  }
1165  bool isImm7Shift1() const {
1166  return isImmediateS2<-255, 255>();
1167  }
1168  bool isImm7Shift2() const {
1169  return isImmediateS4<-511, 511>();
1170  }
1171  bool isImm7() const {
1172  return isImmediate<-127, 127>();
1173  }
1174  bool isImm0_1020s4() const {
1175  return isImmediateS4<0, 1020>();
1176  }
1177  bool isImm0_508s4() const {
1178  return isImmediateS4<0, 508>();
1179  }
1180  bool isImm0_508s4Neg() const {
1181  if (!isImm()) return false;
1182  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1183  if (!CE) return false;
1184  int64_t Value = -CE->getValue();
1185  // explicitly exclude zero. we want that to use the normal 0_508 version.
1186  return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1187  }
1188 
1189  bool isImm0_4095Neg() const {
1190  if (!isImm()) return false;
1191  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1192  if (!CE) return false;
1193  // isImm0_4095Neg is used with 32-bit immediates only.
1194  // 32-bit immediates are zero extended to 64-bit when parsed,
1195  // thus simple -CE->getValue() results in a big negative number,
1196  // not a small positive number as intended
1197  if ((CE->getValue() >> 32) > 0) return false;
1198  uint32_t Value = -static_cast<uint32_t>(CE->getValue());
1199  return Value > 0 && Value < 4096;
1200  }
1201 
1202  bool isImm0_7() const {
1203  return isImmediate<0, 7>();
1204  }
1205 
1206  bool isImm1_16() const {
1207  return isImmediate<1, 16>();
1208  }
1209 
1210  bool isImm1_32() const {
1211  return isImmediate<1, 32>();
1212  }
1213 
1214  bool isImm8_255() const {
1215  return isImmediate<8, 255>();
1216  }
1217 
1218  bool isImm256_65535Expr() const {
1219  if (!isImm()) return false;
1220  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1221  // If it's not a constant expression, it'll generate a fixup and be
1222  // handled later.
1223  if (!CE) return true;
1224  int64_t Value = CE->getValue();
1225  return Value >= 256 && Value < 65536;
1226  }
1227 
1228  bool isImm0_65535Expr() const {
1229  if (!isImm()) return false;
1230  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1231  // If it's not a constant expression, it'll generate a fixup and be
1232  // handled later.
1233  if (!CE) return true;
1234  int64_t Value = CE->getValue();
1235  return Value >= 0 && Value < 65536;
1236  }
1237 
1238  bool isImm24bit() const {
1239  return isImmediate<0, 0xffffff + 1>();
1240  }
1241 
1242  bool isImmThumbSR() const {
1243  return isImmediate<1, 33>();
1244  }
1245 
1246  template<int shift>
1247  bool isExpImmValue(uint64_t Value) const {
1248  uint64_t mask = (1 << shift) - 1;
1249  if ((Value & mask) != 0 || (Value >> shift) > 0xff)
1250  return false;
1251  return true;
1252  }
1253 
1254  template<int shift>
1255  bool isExpImm() const {
1256  if (!isImm()) return false;
1257  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1258  if (!CE) return false;
1259 
1260  return isExpImmValue<shift>(CE->getValue());
1261  }
1262 
1263  template<int shift, int size>
1264  bool isInvertedExpImm() const {
1265  if (!isImm()) return false;
1266  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1267  if (!CE) return false;
1268 
1269  uint64_t OriginalValue = CE->getValue();
1270  uint64_t InvertedValue = OriginalValue ^ (((uint64_t)1 << size) - 1);
1271  return isExpImmValue<shift>(InvertedValue);
1272  }
1273 
1274  bool isPKHLSLImm() const {
1275  return isImmediate<0, 32>();
1276  }
1277 
1278  bool isPKHASRImm() const {
1279  return isImmediate<0, 33>();
1280  }
1281 
1282  bool isAdrLabel() const {
1283  // If we have an immediate that's not a constant, treat it as a label
1284  // reference needing a fixup.
1285  if (isImm() && !isa<MCConstantExpr>(getImm()))
1286  return true;
1287 
1288  // If it is a constant, it must fit into a modified immediate encoding.
1289  if (!isImm()) return false;
1290  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1291  if (!CE) return false;
1292  int64_t Value = CE->getValue();
1293  return (ARM_AM::getSOImmVal(Value) != -1 ||
1294  ARM_AM::getSOImmVal(-Value) != -1);
1295  }
1296 
1297  bool isT2SOImm() const {
1298  // If we have an immediate that's not a constant, treat it as an expression
1299  // needing a fixup.
1300  if (isImm() && !isa<MCConstantExpr>(getImm())) {
1301  // We want to avoid matching :upper16: and :lower16: as we want these
1302  // expressions to match in isImm0_65535Expr()
1303  const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1304  return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1305  ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1306  }
1307  if (!isImm()) return false;
1308  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1309  if (!CE) return false;
1310  int64_t Value = CE->getValue();
1311  return ARM_AM::getT2SOImmVal(Value) != -1;
1312  }
1313 
1314  bool isT2SOImmNot() const {
1315  if (!isImm()) return false;
1316  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1317  if (!CE) return false;
1318  int64_t Value = CE->getValue();
1319  return ARM_AM::getT2SOImmVal(Value) == -1 &&
1320  ARM_AM::getT2SOImmVal(~Value) != -1;
1321  }
1322 
1323  bool isT2SOImmNeg() const {
1324  if (!isImm()) return false;
1325  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1326  if (!CE) return false;
1327  int64_t Value = CE->getValue();
1328  // Only use this when not representable as a plain so_imm.
1329  return ARM_AM::getT2SOImmVal(Value) == -1 &&
1330  ARM_AM::getT2SOImmVal(-Value) != -1;
1331  }
1332 
1333  bool isSetEndImm() const {
1334  if (!isImm()) return false;
1335  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1336  if (!CE) return false;
1337  int64_t Value = CE->getValue();
1338  return Value == 1 || Value == 0;
1339  }
1340 
1341  bool isReg() const override { return Kind == k_Register; }
1342  bool isRegList() const { return Kind == k_RegisterList; }
1343  bool isRegListWithAPSR() const {
1344  return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
1345  }
1346  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1347  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1348  bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
1349  bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
1350  bool isToken() const override { return Kind == k_Token; }
1351  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1352  bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1353  bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
1354  bool isMem() const override {
1355  return isGPRMem() || isMVEMem();
1356  }
1357  bool isMVEMem() const {
1358  if (Kind != k_Memory)
1359  return false;
1360  if (Memory.BaseRegNum &&
1361  !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum) &&
1362  !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Memory.BaseRegNum))
1363  return false;
1364  if (Memory.OffsetRegNum &&
1365  !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1366  Memory.OffsetRegNum))
1367  return false;
1368  return true;
1369  }
1370  bool isGPRMem() const {
1371  if (Kind != k_Memory)
1372  return false;
1373  if (Memory.BaseRegNum &&
1374  !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
1375  return false;
1376  if (Memory.OffsetRegNum &&
1377  !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
1378  return false;
1379  return true;
1380  }
1381  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1382  bool isRegShiftedReg() const {
1383  return Kind == k_ShiftedRegister &&
1384  ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1385  RegShiftedReg.SrcReg) &&
1386  ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1387  RegShiftedReg.ShiftReg);
1388  }
1389  bool isRegShiftedImm() const {
1390  return Kind == k_ShiftedImmediate &&
1391  ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1392  RegShiftedImm.SrcReg);
1393  }
1394  bool isRotImm() const { return Kind == k_RotateImmediate; }
1395 
1396  template<unsigned Min, unsigned Max>
1397  bool isPowerTwoInRange() const {
1398  if (!isImm()) return false;
1399  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1400  if (!CE) return false;
1401  int64_t Value = CE->getValue();
1402  return Value > 0 && countPopulation((uint64_t)Value) == 1 &&
1403  Value >= Min && Value <= Max;
1404  }
1405  bool isModImm() const { return Kind == k_ModifiedImmediate; }
1406 
1407  bool isModImmNot() const {
1408  if (!isImm()) return false;
1409  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1410  if (!CE) return false;
1411  int64_t Value = CE->getValue();
1412  return ARM_AM::getSOImmVal(~Value) != -1;
1413  }
1414 
1415  bool isModImmNeg() const {
1416  if (!isImm()) return false;
1417  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1418  if (!CE) return false;
1419  int64_t Value = CE->getValue();
1420  return ARM_AM::getSOImmVal(Value) == -1 &&
1421  ARM_AM::getSOImmVal(-Value) != -1;
1422  }
1423 
1424  bool isThumbModImmNeg1_7() const {
1425  if (!isImm()) return false;
1426  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1427  if (!CE) return false;
1428  int32_t Value = -(int32_t)CE->getValue();
1429  return 0 < Value && Value < 8;
1430  }
1431 
1432  bool isThumbModImmNeg8_255() const {
1433  if (!isImm()) return false;
1434  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1435  if (!CE) return false;
1436  int32_t Value = -(int32_t)CE->getValue();
1437  return 7 < Value && Value < 256;
1438  }
1439 
1440  bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1441  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1442  bool isPostIdxRegShifted() const {
1443  return Kind == k_PostIndexRegister &&
1444  ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1445  }
1446  bool isPostIdxReg() const {
1447  return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
1448  }
1449  bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1450  if (!isGPRMem())
1451  return false;
1452  // No offset of any kind.
1453  return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1454  (alignOK || Memory.Alignment == Alignment);
1455  }
1456  bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
1457  if (!isGPRMem())
1458  return false;
1459 
1460  if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1461  Memory.BaseRegNum))
1462  return false;
1463 
1464  // No offset of any kind.
1465  return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1466  (alignOK || Memory.Alignment == Alignment);
1467  }
1468  bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
1469  if (!isGPRMem())
1470  return false;
1471 
1472  if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
1473  Memory.BaseRegNum))
1474  return false;
1475 
1476  // No offset of any kind.
1477  return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1478  (alignOK || Memory.Alignment == Alignment);
1479  }
1480  bool isMemNoOffsetT(bool alignOK = false, unsigned Alignment = 0) const {
1481  if (!isGPRMem())
1482  return false;
1483 
1484  if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].contains(
1485  Memory.BaseRegNum))
1486  return false;
1487 
1488  // No offset of any kind.
1489  return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1490  (alignOK || Memory.Alignment == Alignment);
1491  }
1492  bool isMemPCRelImm12() const {
1493  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1494  return false;
1495  // Base register must be PC.
1496  if (Memory.BaseRegNum != ARM::PC)
1497  return false;
1498  // Immediate offset in range [-4095, 4095].
1499  if (!Memory.OffsetImm) return true;
1500  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1501  int64_t Val = CE->getValue();
1502  return (Val > -4096 && Val < 4096) ||
1504  }
1505  return false;
1506  }
1507 
1508  bool isAlignedMemory() const {
1509  return isMemNoOffset(true);
1510  }
1511 
1512  bool isAlignedMemoryNone() const {
1513  return isMemNoOffset(false, 0);
1514  }
1515 
1516  bool isDupAlignedMemoryNone() const {
1517  return isMemNoOffset(false, 0);
1518  }
1519 
1520  bool isAlignedMemory16() const {
1521  if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1522  return true;
1523  return isMemNoOffset(false, 0);
1524  }
1525 
1526  bool isDupAlignedMemory16() const {
1527  if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1528  return true;
1529  return isMemNoOffset(false, 0);
1530  }
1531 
1532  bool isAlignedMemory32() const {
1533  if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1534  return true;
1535  return isMemNoOffset(false, 0);
1536  }
1537 
1538  bool isDupAlignedMemory32() const {
1539  if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1540  return true;
1541  return isMemNoOffset(false, 0);
1542  }
1543 
1544  bool isAlignedMemory64() const {
1545  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1546  return true;
1547  return isMemNoOffset(false, 0);
1548  }
1549 
1550  bool isDupAlignedMemory64() const {
1551  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1552  return true;
1553  return isMemNoOffset(false, 0);
1554  }
1555 
1556  bool isAlignedMemory64or128() const {
1557  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1558  return true;
1559  if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1560  return true;
1561  return isMemNoOffset(false, 0);
1562  }
1563 
1564  bool isDupAlignedMemory64or128() const {
1565  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1566  return true;
1567  if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1568  return true;
1569  return isMemNoOffset(false, 0);
1570  }
1571 
1572  bool isAlignedMemory64or128or256() const {
1573  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1574  return true;
1575  if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1576  return true;
1577  if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1578  return true;
1579  return isMemNoOffset(false, 0);
1580  }
1581 
1582  bool isAddrMode2() const {
1583  if (!isGPRMem() || Memory.Alignment != 0) return false;
1584  // Check for register offset.
1585  if (Memory.OffsetRegNum) return true;
1586  // Immediate offset in range [-4095, 4095].
1587  if (!Memory.OffsetImm) return true;
1588  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1589  int64_t Val = CE->getValue();
1590  return Val > -4096 && Val < 4096;
1591  }
1592  return false;
1593  }
1594 
1595  bool isAM2OffsetImm() const {
1596  if (!isImm()) return false;
1597  // Immediate offset in range [-4095, 4095].
1598  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1599  if (!CE) return false;
1600  int64_t Val = CE->getValue();
1601  return (Val == std::numeric_limits<int32_t>::min()) ||
1602  (Val > -4096 && Val < 4096);
1603  }
1604 
1605  bool isAddrMode3() const {
1606  // If we have an immediate that's not a constant, treat it as a label
1607  // reference needing a fixup. If it is a constant, it's something else
1608  // and we reject it.
1609  if (isImm() && !isa<MCConstantExpr>(getImm()))
1610  return true;
1611  if (!isGPRMem() || Memory.Alignment != 0) return false;
1612  // No shifts are legal for AM3.
1613  if (Memory.ShiftType != ARM_AM::no_shift) return false;
1614  // Check for register offset.
1615  if (Memory.OffsetRegNum) return true;
1616  // Immediate offset in range [-255, 255].
1617  if (!Memory.OffsetImm) return true;
1618  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1619  int64_t Val = CE->getValue();
1620  // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and
1621  // we have to check for this too.
1622  return (Val > -256 && Val < 256) ||
1624  }
1625  return false;
1626  }
1627 
1628  bool isAM3Offset() const {
1629  if (isPostIdxReg())
1630  return true;
1631  if (!isImm())
1632  return false;
1633  // Immediate offset in range [-255, 255].
1634  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1635  if (!CE) return false;
1636  int64_t Val = CE->getValue();
1637  // Special case, #-0 is std::numeric_limits<int32_t>::min().
1638  return (Val > -256 && Val < 256) ||
1640  }
1641 
1642  bool isAddrMode5() const {
1643  // If we have an immediate that's not a constant, treat it as a label
1644  // reference needing a fixup. If it is a constant, it's something else
1645  // and we reject it.
1646  if (isImm() && !isa<MCConstantExpr>(getImm()))
1647  return true;
1648  if (!isGPRMem() || Memory.Alignment != 0) return false;
1649  // Check for register offset.
1650  if (Memory.OffsetRegNum) return false;
1651  // Immediate offset in range [-1020, 1020] and a multiple of 4.
1652  if (!Memory.OffsetImm) return true;
1653  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1654  int64_t Val = CE->getValue();
1655  return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1657  }
1658  return false;
1659  }
1660 
1661  bool isAddrMode5FP16() const {
1662  // If we have an immediate that's not a constant, treat it as a label
1663  // reference needing a fixup. If it is a constant, it's something else
1664  // and we reject it.
1665  if (isImm() && !isa<MCConstantExpr>(getImm()))
1666  return true;
1667  if (!isGPRMem() || Memory.Alignment != 0) return false;
1668  // Check for register offset.
1669  if (Memory.OffsetRegNum) return false;
1670  // Immediate offset in range [-510, 510] and a multiple of 2.
1671  if (!Memory.OffsetImm) return true;
1672  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1673  int64_t Val = CE->getValue();
1674  return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1676  }
1677  return false;
1678  }
1679 
1680  bool isMemTBB() const {
1681  if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1682  Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1683  return false;
1684  return true;
1685  }
1686 
1687  bool isMemTBH() const {
1688  if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1689  Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1690  Memory.Alignment != 0 )
1691  return false;
1692  return true;
1693  }
1694 
1695  bool isMemRegOffset() const {
1696  if (!isGPRMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1697  return false;
1698  return true;
1699  }
1700 
1701  bool isT2MemRegOffset() const {
1702  if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1703  Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1704  return false;
1705  // Only lsl #{0, 1, 2, 3} allowed.
1706  if (Memory.ShiftType == ARM_AM::no_shift)
1707  return true;
1708  if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1709  return false;
1710  return true;
1711  }
1712 
1713  bool isMemThumbRR() const {
1714  // Thumb reg+reg addressing is simple. Just two registers, a base and
1715  // an offset. No shifts, negations or any other complicating factors.
1716  if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1717  Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1718  return false;
1719  return isARMLowRegister(Memory.BaseRegNum) &&
1720  (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1721  }
1722 
1723  bool isMemThumbRIs4() const {
1724  if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1725  !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1726  return false;
1727  // Immediate offset, multiple of 4 in range [0, 124].
1728  if (!Memory.OffsetImm) return true;
1729  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1730  int64_t Val = CE->getValue();
1731  return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1732  }
1733  return false;
1734  }
1735 
1736  bool isMemThumbRIs2() const {
1737  if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1738  !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1739  return false;
1740  // Immediate offset, multiple of 4 in range [0, 62].
1741  if (!Memory.OffsetImm) return true;
1742  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1743  int64_t Val = CE->getValue();
1744  return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1745  }
1746  return false;
1747  }
1748 
1749  bool isMemThumbRIs1() const {
1750  if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1751  !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1752  return false;
1753  // Immediate offset in range [0, 31].
1754  if (!Memory.OffsetImm) return true;
1755  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1756  int64_t Val = CE->getValue();
1757  return Val >= 0 && Val <= 31;
1758  }
1759  return false;
1760  }
1761 
1762  bool isMemThumbSPI() const {
1763  if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1764  Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1765  return false;
1766  // Immediate offset, multiple of 4 in range [0, 1020].
1767  if (!Memory.OffsetImm) return true;
1768  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1769  int64_t Val = CE->getValue();
1770  return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1771  }
1772  return false;
1773  }
1774 
1775  bool isMemImm8s4Offset() const {
1776  // If we have an immediate that's not a constant, treat it as a label
1777  // reference needing a fixup. If it is a constant, it's something else
1778  // and we reject it.
1779  if (isImm() && !isa<MCConstantExpr>(getImm()))
1780  return true;
1781  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1782  return false;
1783  // Immediate offset a multiple of 4 in range [-1020, 1020].
1784  if (!Memory.OffsetImm) return true;
1785  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1786  int64_t Val = CE->getValue();
1787  // Special case, #-0 is std::numeric_limits<int32_t>::min().
1788  return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1790  }
1791  return false;
1792  }
1793 
1794  bool isMemImm7s4Offset() const {
1795  // If we have an immediate that's not a constant, treat it as a label
1796  // reference needing a fixup. If it is a constant, it's something else
1797  // and we reject it.
1798  if (isImm() && !isa<MCConstantExpr>(getImm()))
1799  return true;
1800  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1801  !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1802  Memory.BaseRegNum))
1803  return false;
1804  // Immediate offset a multiple of 4 in range [-508, 508].
1805  if (!Memory.OffsetImm) return true;
1806  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1807  int64_t Val = CE->getValue();
1808  // Special case, #-0 is INT32_MIN.
1809  return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1810  }
1811  return false;
1812  }
1813 
1814  bool isMemImm0_1020s4Offset() const {
1815  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1816  return false;
1817  // Immediate offset a multiple of 4 in range [0, 1020].
1818  if (!Memory.OffsetImm) return true;
1819  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1820  int64_t Val = CE->getValue();
1821  return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1822  }
1823  return false;
1824  }
1825 
1826  bool isMemImm8Offset() const {
1827  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1828  return false;
1829  // Base reg of PC isn't allowed for these encodings.
1830  if (Memory.BaseRegNum == ARM::PC) return false;
1831  // Immediate offset in range [-255, 255].
1832  if (!Memory.OffsetImm) return true;
1833  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1834  int64_t Val = CE->getValue();
1835  return (Val == std::numeric_limits<int32_t>::min()) ||
1836  (Val > -256 && Val < 256);
1837  }
1838  return false;
1839  }
1840 
1841  template<unsigned Bits, unsigned RegClassID>
1842  bool isMemImm7ShiftedOffset() const {
1843  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1844  !ARMMCRegisterClasses[RegClassID].contains(Memory.BaseRegNum))
1845  return false;
1846 
1847  // Expect an immediate offset equal to an element of the range
1848  // [-127, 127], shifted left by Bits.
1849 
1850  if (!Memory.OffsetImm) return true;
1851  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1852  int64_t Val = CE->getValue();
1853 
1854  // INT32_MIN is a special-case value (indicating the encoding with
1855  // zero offset and the subtract bit set)
1856  if (Val == INT32_MIN)
1857  return true;
1858 
1859  unsigned Divisor = 1U << Bits;
1860 
1861  // Check that the low bits are zero
1862  if (Val % Divisor != 0)
1863  return false;
1864 
1865  // Check that the remaining offset is within range.
1866  Val /= Divisor;
1867  return (Val >= -127 && Val <= 127);
1868  }
1869  return false;
1870  }
1871 
1872  template <int shift> bool isMemRegRQOffset() const {
1873  if (!isMVEMem() || Memory.OffsetImm != nullptr || Memory.Alignment != 0)
1874  return false;
1875 
1876  if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1877  Memory.BaseRegNum))
1878  return false;
1879  if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1880  Memory.OffsetRegNum))
1881  return false;
1882 
1883  if (shift == 0 && Memory.ShiftType != ARM_AM::no_shift)
1884  return false;
1885 
1886  if (shift > 0 &&
1887  (Memory.ShiftType != ARM_AM::uxtw || Memory.ShiftImm != shift))
1888  return false;
1889 
1890  return true;
1891  }
1892 
1893  template <int shift> bool isMemRegQOffset() const {
1894  if (!isMVEMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1895  return false;
1896 
1897  if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1898  Memory.BaseRegNum))
1899  return false;
1900 
1901  if (!Memory.OffsetImm)
1902  return true;
1903  static_assert(shift < 56,
1904  "Such that we dont shift by a value higher than 62");
1905  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1906  int64_t Val = CE->getValue();
1907 
1908  // The value must be a multiple of (1 << shift)
1909  if ((Val & ((1U << shift) - 1)) != 0)
1910  return false;
1911 
1912  // And be in the right range, depending on the amount that it is shifted
1913  // by. Shift 0, is equal to 7 unsigned bits, the sign bit is set
1914  // separately.
1915  int64_t Range = (1U << (7 + shift)) - 1;
1916  return (Val == INT32_MIN) || (Val > -Range && Val < Range);
1917  }
1918  return false;
1919  }
1920 
1921  bool isMemPosImm8Offset() const {
1922  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1923  return false;
1924  // Immediate offset in range [0, 255].
1925  if (!Memory.OffsetImm) return true;
1926  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1927  int64_t Val = CE->getValue();
1928  return Val >= 0 && Val < 256;
1929  }
1930  return false;
1931  }
1932 
1933  bool isMemNegImm8Offset() const {
1934  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1935  return false;
1936  // Base reg of PC isn't allowed for these encodings.
1937  if (Memory.BaseRegNum == ARM::PC) return false;
1938  // Immediate offset in range [-255, -1].
1939  if (!Memory.OffsetImm) return false;
1940  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1941  int64_t Val = CE->getValue();
1942  return (Val == std::numeric_limits<int32_t>::min()) ||
1943  (Val > -256 && Val < 0);
1944  }
1945  return false;
1946  }
1947 
1948  bool isMemUImm12Offset() const {
1949  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1950  return false;
1951  // Immediate offset in range [0, 4095].
1952  if (!Memory.OffsetImm) return true;
1953  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1954  int64_t Val = CE->getValue();
1955  return (Val >= 0 && Val < 4096);
1956  }
1957  return false;
1958  }
1959 
1960  bool isMemImm12Offset() const {
1961  // If we have an immediate that's not a constant, treat it as a label
1962  // reference needing a fixup. If it is a constant, it's something else
1963  // and we reject it.
1964 
1965  if (isImm() && !isa<MCConstantExpr>(getImm()))
1966  return true;
1967 
1968  if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1969  return false;
1970  // Immediate offset in range [-4095, 4095].
1971  if (!Memory.OffsetImm) return true;
1972  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1973  int64_t Val = CE->getValue();
1974  return (Val > -4096 && Val < 4096) ||
1976  }
1977  // If we have an immediate that's not a constant, treat it as a
1978  // symbolic expression needing a fixup.
1979  return true;
1980  }
1981 
1982  bool isConstPoolAsmImm() const {
1983  // Delay processing of Constant Pool Immediate, this will turn into
1984  // a constant. Match no other operand
1985  return (isConstantPoolImm());
1986  }
1987 
1988  bool isPostIdxImm8() const {
1989  if (!isImm()) return false;
1990  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1991  if (!CE) return false;
1992  int64_t Val = CE->getValue();
1993  return (Val > -256 && Val < 256) ||
1995  }
1996 
1997  bool isPostIdxImm8s4() const {
1998  if (!isImm()) return false;
1999  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2000  if (!CE) return false;
2001  int64_t Val = CE->getValue();
2002  return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
2004  }
2005 
2006  bool isMSRMask() const { return Kind == k_MSRMask; }
2007  bool isBankedReg() const { return Kind == k_BankedReg; }
2008  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
2009 
2010  // NEON operands.
2011  bool isSingleSpacedVectorList() const {
2012  return Kind == k_VectorList && !VectorList.isDoubleSpaced;
2013  }
2014 
2015  bool isDoubleSpacedVectorList() const {
2016  return Kind == k_VectorList && VectorList.isDoubleSpaced;
2017  }
2018 
2019  bool isVecListOneD() const {
2020  if (!isSingleSpacedVectorList()) return false;
2021  return VectorList.Count == 1;
2022  }
2023 
2024  bool isVecListTwoMQ() const {
2025  return isSingleSpacedVectorList() && VectorList.Count == 2 &&
2026  ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2027  VectorList.RegNum);
2028  }
2029 
2030  bool isVecListDPair() const {
2031  if (!isSingleSpacedVectorList()) return false;
2032  return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2033  .contains(VectorList.RegNum));
2034  }
2035 
2036  bool isVecListThreeD() const {
2037  if (!isSingleSpacedVectorList()) return false;
2038  return VectorList.Count == 3;
2039  }
2040 
2041  bool isVecListFourD() const {
2042  if (!isSingleSpacedVectorList()) return false;
2043  return VectorList.Count == 4;
2044  }
2045 
2046  bool isVecListDPairSpaced() const {
2047  if (Kind != k_VectorList) return false;
2048  if (isSingleSpacedVectorList()) return false;
2049  return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
2050  .contains(VectorList.RegNum));
2051  }
2052 
2053  bool isVecListThreeQ() const {
2054  if (!isDoubleSpacedVectorList()) return false;
2055  return VectorList.Count == 3;
2056  }
2057 
2058  bool isVecListFourQ() const {
2059  if (!isDoubleSpacedVectorList()) return false;
2060  return VectorList.Count == 4;
2061  }
2062 
2063  bool isVecListFourMQ() const {
2064  return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2065  ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2066  VectorList.RegNum);
2067  }
2068 
2069  bool isSingleSpacedVectorAllLanes() const {
2070  return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2071  }
2072 
2073  bool isDoubleSpacedVectorAllLanes() const {
2074  return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2075  }
2076 
2077  bool isVecListOneDAllLanes() const {
2078  if (!isSingleSpacedVectorAllLanes()) return false;
2079  return VectorList.Count == 1;
2080  }
2081 
2082  bool isVecListDPairAllLanes() const {
2083  if (!isSingleSpacedVectorAllLanes()) return false;
2084  return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2085  .contains(VectorList.RegNum));
2086  }
2087 
2088  bool isVecListDPairSpacedAllLanes() const {
2089  if (!isDoubleSpacedVectorAllLanes()) return false;
2090  return VectorList.Count == 2;
2091  }
2092 
2093  bool isVecListThreeDAllLanes() const {
2094  if (!isSingleSpacedVectorAllLanes()) return false;
2095  return VectorList.Count == 3;
2096  }
2097 
2098  bool isVecListThreeQAllLanes() const {
2099  if (!isDoubleSpacedVectorAllLanes()) return false;
2100  return VectorList.Count == 3;
2101  }
2102 
2103  bool isVecListFourDAllLanes() const {
2104  if (!isSingleSpacedVectorAllLanes()) return false;
2105  return VectorList.Count == 4;
2106  }
2107 
2108  bool isVecListFourQAllLanes() const {
2109  if (!isDoubleSpacedVectorAllLanes()) return false;
2110  return VectorList.Count == 4;
2111  }
2112 
2113  bool isSingleSpacedVectorIndexed() const {
2114  return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2115  }
2116 
2117  bool isDoubleSpacedVectorIndexed() const {
2118  return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2119  }
2120 
2121  bool isVecListOneDByteIndexed() const {
2122  if (!isSingleSpacedVectorIndexed()) return false;
2123  return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2124  }
2125 
2126  bool isVecListOneDHWordIndexed() const {
2127  if (!isSingleSpacedVectorIndexed()) return false;
2128  return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2129  }
2130 
2131  bool isVecListOneDWordIndexed() const {
2132  if (!isSingleSpacedVectorIndexed()) return false;
2133  return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2134  }
2135 
2136  bool isVecListTwoDByteIndexed() const {
2137  if (!isSingleSpacedVectorIndexed()) return false;
2138  return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2139  }
2140 
2141  bool isVecListTwoDHWordIndexed() const {
2142  if (!isSingleSpacedVectorIndexed()) return false;
2143  return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2144  }
2145 
2146  bool isVecListTwoQWordIndexed() const {
2147  if (!isDoubleSpacedVectorIndexed()) return false;
2148  return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2149  }
2150 
2151  bool isVecListTwoQHWordIndexed() const {
2152  if (!isDoubleSpacedVectorIndexed()) return false;
2153  return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2154  }
2155 
2156  bool isVecListTwoDWordIndexed() const {
2157  if (!isSingleSpacedVectorIndexed()) return false;
2158  return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2159  }
2160 
2161  bool isVecListThreeDByteIndexed() const {
2162  if (!isSingleSpacedVectorIndexed()) return false;
2163  return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2164  }
2165 
2166  bool isVecListThreeDHWordIndexed() const {
2167  if (!isSingleSpacedVectorIndexed()) return false;
2168  return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2169  }
2170 
2171  bool isVecListThreeQWordIndexed() const {
2172  if (!isDoubleSpacedVectorIndexed()) return false;
2173  return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2174  }
2175 
2176  bool isVecListThreeQHWordIndexed() const {
2177  if (!isDoubleSpacedVectorIndexed()) return false;
2178  return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2179  }
2180 
2181  bool isVecListThreeDWordIndexed() const {
2182  if (!isSingleSpacedVectorIndexed()) return false;
2183  return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2184  }
2185 
2186  bool isVecListFourDByteIndexed() const {
2187  if (!isSingleSpacedVectorIndexed()) return false;
2188  return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2189  }
2190 
2191  bool isVecListFourDHWordIndexed() const {
2192  if (!isSingleSpacedVectorIndexed()) return false;
2193  return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2194  }
2195 
2196  bool isVecListFourQWordIndexed() const {
2197  if (!isDoubleSpacedVectorIndexed()) return false;
2198  return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2199  }
2200 
2201  bool isVecListFourQHWordIndexed() const {
2202  if (!isDoubleSpacedVectorIndexed()) return false;
2203  return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2204  }
2205 
2206  bool isVecListFourDWordIndexed() const {
2207  if (!isSingleSpacedVectorIndexed()) return false;
2208  return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2209  }
2210 
2211  bool isVectorIndex() const { return Kind == k_VectorIndex; }
2212 
2213  template <unsigned NumLanes>
2214  bool isVectorIndexInRange() const {
2215  if (Kind != k_VectorIndex) return false;
2216  return VectorIndex.Val < NumLanes;
2217  }
2218 
2219  bool isVectorIndex8() const { return isVectorIndexInRange<8>(); }
2220  bool isVectorIndex16() const { return isVectorIndexInRange<4>(); }
2221  bool isVectorIndex32() const { return isVectorIndexInRange<2>(); }
2222  bool isVectorIndex64() const { return isVectorIndexInRange<1>(); }
2223 
2224  template<int PermittedValue, int OtherPermittedValue>
2225  bool isMVEPairVectorIndex() const {
2226  if (Kind != k_VectorIndex) return false;
2227  return VectorIndex.Val == PermittedValue ||
2228  VectorIndex.Val == OtherPermittedValue;
2229  }
2230 
2231  bool isNEONi8splat() const {
2232  if (!isImm()) return false;
2233  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2234  // Must be a constant.
2235  if (!CE) return false;
2236  int64_t Value = CE->getValue();
2237  // i8 value splatted across 8 bytes. The immediate is just the 8 byte
2238  // value.
2239  return Value >= 0 && Value < 256;
2240  }
2241 
2242  bool isNEONi16splat() const {
2243  if (isNEONByteReplicate(2))
2244  return false; // Leave that for bytes replication and forbid by default.
2245  if (!isImm())
2246  return false;
2247  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2248  // Must be a constant.
2249  if (!CE) return false;
2250  unsigned Value = CE->getValue();
2251  return ARM_AM::isNEONi16splat(Value);
2252  }
2253 
2254  bool isNEONi16splatNot() const {
2255  if (!isImm())
2256  return false;
2257  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2258  // Must be a constant.
2259  if (!CE) return false;
2260  unsigned Value = CE->getValue();
2261  return ARM_AM::isNEONi16splat(~Value & 0xffff);
2262  }
2263 
2264  bool isNEONi32splat() const {
2265  if (isNEONByteReplicate(4))
2266  return false; // Leave that for bytes replication and forbid by default.
2267  if (!isImm())
2268  return false;
2269  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2270  // Must be a constant.
2271  if (!CE) return false;
2272  unsigned Value = CE->getValue();
2273  return ARM_AM::isNEONi32splat(Value);
2274  }
2275 
2276  bool isNEONi32splatNot() const {
2277  if (!isImm())
2278  return false;
2279  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2280  // Must be a constant.
2281  if (!CE) return false;
2282  unsigned Value = CE->getValue();
2283  return ARM_AM::isNEONi32splat(~Value);
2284  }
2285 
2286  static bool isValidNEONi32vmovImm(int64_t Value) {
2287  // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
2288  // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
2289  return ((Value & 0xffffffffffffff00) == 0) ||
2290  ((Value & 0xffffffffffff00ff) == 0) ||
2291  ((Value & 0xffffffffff00ffff) == 0) ||
2292  ((Value & 0xffffffff00ffffff) == 0) ||
2293  ((Value & 0xffffffffffff00ff) == 0xff) ||
2294  ((Value & 0xffffffffff00ffff) == 0xffff);
2295  }
2296 
2297  bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
2298  assert((Width == 8 || Width == 16 || Width == 32) &&
2299  "Invalid element width");
2300  assert(NumElems * Width <= 64 && "Invalid result width");
2301 
2302  if (!isImm())
2303  return false;
2304  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2305  // Must be a constant.
2306  if (!CE)
2307  return false;
2308  int64_t Value = CE->getValue();
2309  if (!Value)
2310  return false; // Don't bother with zero.
2311  if (Inv)
2312  Value = ~Value;
2313 
2314  uint64_t Mask = (1ull << Width) - 1;
2315  uint64_t Elem = Value & Mask;
2316  if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2317  return false;
2318  if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2319  return false;
2320 
2321  for (unsigned i = 1; i < NumElems; ++i) {
2322  Value >>= Width;
2323  if ((Value & Mask) != Elem)
2324  return false;
2325  }
2326  return true;
2327  }
2328 
2329  bool isNEONByteReplicate(unsigned NumBytes) const {
2330  return isNEONReplicate(8, NumBytes, false);
2331  }
2332 
2333  static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
2334  assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2335  "Invalid source width");
2336  assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2337  "Invalid destination width");
2338  assert(FromW < ToW && "ToW is not less than FromW");
2339  }
2340 
2341  template<unsigned FromW, unsigned ToW>
2342  bool isNEONmovReplicate() const {
2343  checkNeonReplicateArgs(FromW, ToW);
2344  if (ToW == 64 && isNEONi64splat())
2345  return false;
2346  return isNEONReplicate(FromW, ToW / FromW, false);
2347  }
2348 
2349  template<unsigned FromW, unsigned ToW>
2350  bool isNEONinvReplicate() const {
2351  checkNeonReplicateArgs(FromW, ToW);
2352  return isNEONReplicate(FromW, ToW / FromW, true);
2353  }
2354 
2355  bool isNEONi32vmov() const {
2356  if (isNEONByteReplicate(4))
2357  return false; // Let it to be classified as byte-replicate case.
2358  if (!isImm())
2359  return false;
2360  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2361  // Must be a constant.
2362  if (!CE)
2363  return false;
2364  return isValidNEONi32vmovImm(CE->getValue());
2365  }
2366 
2367  bool isNEONi32vmovNeg() const {
2368  if (!isImm()) return false;
2369  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2370  // Must be a constant.
2371  if (!CE) return false;
2372  return isValidNEONi32vmovImm(~CE->getValue());
2373  }
2374 
2375  bool isNEONi64splat() const {
2376  if (!isImm()) return false;
2377  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2378  // Must be a constant.
2379  if (!CE) return false;
2380  uint64_t Value = CE->getValue();
2381  // i64 value with each byte being either 0 or 0xff.
2382  for (unsigned i = 0; i < 8; ++i, Value >>= 8)
2383  if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
2384  return true;
2385  }
2386 
2387  template<int64_t Angle, int64_t Remainder>
2388  bool isComplexRotation() const {
2389  if (!isImm()) return false;
2390 
2391  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2392  if (!CE) return false;
2393  uint64_t Value = CE->getValue();
2394 
2395  return (Value % Angle == Remainder && Value <= 270);
2396  }
2397 
2398  bool isMVELongShift() const {
2399  if (!isImm()) return false;
2400  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2401  // Must be a constant.
2402  if (!CE) return false;
2403  uint64_t Value = CE->getValue();
2404  return Value >= 1 && Value <= 32;
2405  }
2406 
2407  bool isMveSaturateOp() const {
2408  if (!isImm()) return false;
2409  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2410  if (!CE) return false;
2411  uint64_t Value = CE->getValue();
2412  return Value == 48 || Value == 64;
2413  }
2414 
2415  bool isITCondCodeNoAL() const {
2416  if (!isITCondCode()) return false;
2418  return CC != ARMCC::AL;
2419  }
2420 
2421  bool isITCondCodeRestrictedI() const {
2422  if (!isITCondCode())
2423  return false;
2425  return CC == ARMCC::EQ || CC == ARMCC::NE;
2426  }
2427 
2428  bool isITCondCodeRestrictedS() const {
2429  if (!isITCondCode())
2430  return false;
2432  return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
2433  CC == ARMCC::GE;
2434  }
2435 
2436  bool isITCondCodeRestrictedU() const {
2437  if (!isITCondCode())
2438  return false;
2440  return CC == ARMCC::HS || CC == ARMCC::HI;
2441  }
2442 
2443  bool isITCondCodeRestrictedFP() const {
2444  if (!isITCondCode())
2445  return false;
2447  return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
2448  CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
2449  }
2450 
2451  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
2452  // Add as immediates when possible. Null MCExpr = 0.
2453  if (!Expr)
2455  else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2456  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2457  else
2458  Inst.addOperand(MCOperand::createExpr(Expr));
2459  }
2460 
2461  void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
2462  assert(N == 1 && "Invalid number of operands!");
2463  addExpr(Inst, getImm());
2464  }
2465 
2466  void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
2467  assert(N == 1 && "Invalid number of operands!");
2468  addExpr(Inst, getImm());
2469  }
2470 
2471  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2472  assert(N == 2 && "Invalid number of operands!");
2473  Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2474  unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
2475  Inst.addOperand(MCOperand::createReg(RegNum));
2476  }
2477 
2478  void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
2479  assert(N == 3 && "Invalid number of operands!");
2480  Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred())));
2481  unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0;
2482  Inst.addOperand(MCOperand::createReg(RegNum));
2484  }
2485 
2486  void addVPTPredROperands(MCInst &Inst, unsigned N) const {
2487  assert(N == 4 && "Invalid number of operands!");
2488  addVPTPredNOperands(Inst, N-1);
2489  unsigned RegNum;
2490  if (getVPTPred() == ARMVCC::None) {
2491  RegNum = 0;
2492  } else {
2493  unsigned NextOpIndex = Inst.getNumOperands();
2494  const MCInstrDesc &MCID = ARMInsts[Inst.getOpcode()];
2495  int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO);
2496  assert(TiedOp >= 0 &&
2497  "Inactive register in vpred_r is not tied to an output!");
2498  RegNum = Inst.getOperand(TiedOp).getReg();
2499  }
2500  Inst.addOperand(MCOperand::createReg(RegNum));
2501  }
2502 
2503  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
2504  assert(N == 1 && "Invalid number of operands!");
2505  Inst.addOperand(MCOperand::createImm(getCoproc()));
2506  }
2507 
2508  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
2509  assert(N == 1 && "Invalid number of operands!");
2510  Inst.addOperand(MCOperand::createImm(getCoproc()));
2511  }
2512 
2513  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
2514  assert(N == 1 && "Invalid number of operands!");
2515  Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
2516  }
2517 
2518  void addITMaskOperands(MCInst &Inst, unsigned N) const {
2519  assert(N == 1 && "Invalid number of operands!");
2520  Inst.addOperand(MCOperand::createImm(ITMask.Mask));
2521  }
2522 
2523  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
2524  assert(N == 1 && "Invalid number of operands!");
2525  Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2526  }
2527 
2528  void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const {
2529  assert(N == 1 && "Invalid number of operands!");
2531  }
2532 
2533  void addCCOutOperands(MCInst &Inst, unsigned N) const {
2534  assert(N == 1 && "Invalid number of operands!");
2536  }
2537 
2538  void addRegOperands(MCInst &Inst, unsigned N) const {
2539  assert(N == 1 && "Invalid number of operands!");
2541  }
2542 
2543  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
2544  assert(N == 3 && "Invalid number of operands!");
2545  assert(isRegShiftedReg() &&
2546  "addRegShiftedRegOperands() on non-RegShiftedReg!");
2547  Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
2548  Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
2550  ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
2551  }
2552 
2553  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
2554  assert(N == 2 && "Invalid number of operands!");
2555  assert(isRegShiftedImm() &&
2556  "addRegShiftedImmOperands() on non-RegShiftedImm!");
2557  Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
2558  // Shift of #32 is encoded as 0 where permitted
2559  unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2561  ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2562  }
2563 
2564  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2565  assert(N == 1 && "Invalid number of operands!");
2566  Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2567  ShifterImm.Imm));
2568  }
2569 
2570  void addRegListOperands(MCInst &Inst, unsigned N) const {
2571  assert(N == 1 && "Invalid number of operands!");
2572  const SmallVectorImpl<unsigned> &RegList = getRegList();
2573  for (unsigned Reg : RegList)
2575  }
2576 
2577  void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const {
2578  assert(N == 1 && "Invalid number of operands!");
2579  const SmallVectorImpl<unsigned> &RegList = getRegList();
2580  for (unsigned Reg : RegList)
2582  }
2583 
2584  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2585  addRegListOperands(Inst, N);
2586  }
2587 
2588  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2589  addRegListOperands(Inst, N);
2590  }
2591 
2592  void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2593  addRegListOperands(Inst, N);
2594  }
2595 
2596  void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2597  addRegListOperands(Inst, N);
2598  }
2599 
2600  void addRotImmOperands(MCInst &Inst, unsigned N) const {
2601  assert(N == 1 && "Invalid number of operands!");
2602  // Encoded as val>>3. The printer handles display as 8, 16, 24.
2603  Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2604  }
2605 
2606  void addModImmOperands(MCInst &Inst, unsigned N) const {
2607  assert(N == 1 && "Invalid number of operands!");
2608 
2609  // Support for fixups (MCFixup)
2610  if (isImm())
2611  return addImmOperands(Inst, N);
2612 
2613  Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2614  }
2615 
2616  void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2617  assert(N == 1 && "Invalid number of operands!");
2618  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2619  uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2620  Inst.addOperand(MCOperand::createImm(Enc));
2621  }
2622 
2623  void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2624  assert(N == 1 && "Invalid number of operands!");
2625  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2626  uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2627  Inst.addOperand(MCOperand::createImm(Enc));
2628  }
2629 
2630  void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2631  assert(N == 1 && "Invalid number of operands!");
2632  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2633  uint32_t Val = -CE->getValue();
2634  Inst.addOperand(MCOperand::createImm(Val));
2635  }
2636 
2637  void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2638  assert(N == 1 && "Invalid number of operands!");
2639  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2640  uint32_t Val = -CE->getValue();
2641  Inst.addOperand(MCOperand::createImm(Val));
2642  }
2643 
2644  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2645  assert(N == 1 && "Invalid number of operands!");
2646  // Munge the lsb/width into a bitfield mask.
2647  unsigned lsb = Bitfield.LSB;
2648  unsigned width = Bitfield.Width;
2649  // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2650  uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2651  (32 - (lsb + width)));
2653  }
2654 
2655  void addImmOperands(MCInst &Inst, unsigned N) const {
2656  assert(N == 1 && "Invalid number of operands!");
2657  addExpr(Inst, getImm());
2658  }
2659 
2660  void addFBits16Operands(MCInst &Inst, unsigned N) const {
2661  assert(N == 1 && "Invalid number of operands!");
2662  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2663  Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2664  }
2665 
2666  void addFBits32Operands(MCInst &Inst, unsigned N) const {
2667  assert(N == 1 && "Invalid number of operands!");
2668  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2669  Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2670  }
2671 
2672  void addFPImmOperands(MCInst &Inst, unsigned N) const {
2673  assert(N == 1 && "Invalid number of operands!");
2674  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2675  int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2676  Inst.addOperand(MCOperand::createImm(Val));
2677  }
2678 
2679  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2680  assert(N == 1 && "Invalid number of operands!");
2681  // FIXME: We really want to scale the value here, but the LDRD/STRD
2682  // instruction don't encode operands that way yet.
2683  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2684  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2685  }
2686 
2687  void addImm7s4Operands(MCInst &Inst, unsigned N) const {
2688  assert(N == 1 && "Invalid number of operands!");
2689  // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR
2690  // instruction don't encode operands that way yet.
2691  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2692  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2693  }
2694 
2695  void addImm7Shift0Operands(MCInst &Inst, unsigned N) const {
2696  assert(N == 1 && "Invalid number of operands!");
2697  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2698  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2699  }
2700 
2701  void addImm7Shift1Operands(MCInst &Inst, unsigned N) const {
2702  assert(N == 1 && "Invalid number of operands!");
2703  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2704  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2705  }
2706 
2707  void addImm7Shift2Operands(MCInst &Inst, unsigned N) const {
2708  assert(N == 1 && "Invalid number of operands!");
2709  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2710  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2711  }
2712 
2713  void addImm7Operands(MCInst &Inst, unsigned N) const {
2714  assert(N == 1 && "Invalid number of operands!");
2715  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2716  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2717  }
2718 
2719  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2720  assert(N == 1 && "Invalid number of operands!");
2721  // The immediate is scaled by four in the encoding and is stored
2722  // in the MCInst as such. Lop off the low two bits here.
2723  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2724  Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2725  }
2726 
2727  void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2728  assert(N == 1 && "Invalid number of operands!");
2729  // The immediate is scaled by four in the encoding and is stored
2730  // in the MCInst as such. Lop off the low two bits here.
2731  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2732  Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2733  }
2734 
2735  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2736  assert(N == 1 && "Invalid number of operands!");
2737  // The immediate is scaled by four in the encoding and is stored
2738  // in the MCInst as such. Lop off the low two bits here.
2739  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2740  Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2741  }
2742 
2743  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2744  assert(N == 1 && "Invalid number of operands!");
2745  // The constant encodes as the immediate-1, and we store in the instruction
2746  // the bits as encoded, so subtract off one here.
2747  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2748  Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2749  }
2750 
2751  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2752  assert(N == 1 && "Invalid number of operands!");
2753  // The constant encodes as the immediate-1, and we store in the instruction
2754  // the bits as encoded, so subtract off one here.
2755  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2756  Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2757  }
2758 
2759  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2760  assert(N == 1 && "Invalid number of operands!");
2761  // The constant encodes as the immediate, except for 32, which encodes as
2762  // zero.
2763  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2764  unsigned Imm = CE->getValue();
2765  Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2766  }
2767 
2768  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2769  assert(N == 1 && "Invalid number of operands!");
2770  // An ASR value of 32 encodes as 0, so that's how we want to add it to
2771  // the instruction as well.
2772  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2773  int Val = CE->getValue();
2774  Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2775  }
2776 
2777  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2778  assert(N == 1 && "Invalid number of operands!");
2779  // The operand is actually a t2_so_imm, but we have its bitwise
2780  // negation in the assembly source, so twiddle it here.
2781  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2782  Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
2783  }
2784 
2785  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2786  assert(N == 1 && "Invalid number of operands!");
2787  // The operand is actually a t2_so_imm, but we have its
2788  // negation in the assembly source, so twiddle it here.
2789  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2790  Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2791  }
2792 
2793  void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2794  assert(N == 1 && "Invalid number of operands!");
2795  // The operand is actually an imm0_4095, but we have its
2796  // negation in the assembly source, so twiddle it here.
2797  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2798  Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2799  }
2800 
2801  void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2802  if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2803  Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2804  return;
2805  }
2806  const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2808  }
2809 
2810  void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2811  assert(N == 1 && "Invalid number of operands!");
2812  if (isImm()) {
2813  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2814  if (CE) {
2815  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2816  return;
2817  }
2818  const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2820  return;
2821  }
2822 
2823  assert(isGPRMem() && "Unknown value type!");
2824  assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2825  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2826  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2827  else
2828  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2829  }
2830 
2831  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2832  assert(N == 1 && "Invalid number of operands!");
2833  Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2834  }
2835 
2836  void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2837  assert(N == 1 && "Invalid number of operands!");
2838  Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2839  }
2840 
2841  void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2842  assert(N == 1 && "Invalid number of operands!");
2843  Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt())));
2844  }
2845 
2846  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2847  assert(N == 1 && "Invalid number of operands!");
2848  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2849  }
2850 
2851  void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const {
2852  assert(N == 1 && "Invalid number of operands!");
2853  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2854  }
2855 
2856  void addMemNoOffsetT2NoSpOperands(MCInst &Inst, unsigned N) const {
2857  assert(N == 1 && "Invalid number of operands!");
2858  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2859  }
2860 
2861  void addMemNoOffsetTOperands(MCInst &Inst, unsigned N) const {
2862  assert(N == 1 && "Invalid number of operands!");
2863  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2864  }
2865 
2866  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2867  assert(N == 1 && "Invalid number of operands!");
2868  if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2869  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2870  else
2871  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2872  }
2873 
2874  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2875  assert(N == 1 && "Invalid number of operands!");
2876  assert(isImm() && "Not an immediate!");
2877 
2878  // If we have an immediate that's not a constant, treat it as a label
2879  // reference needing a fixup.
2880  if (!isa<MCConstantExpr>(getImm())) {
2881  Inst.addOperand(MCOperand::createExpr(getImm()));
2882  return;
2883  }
2884 
2885  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2886  int Val = CE->getValue();
2887  Inst.addOperand(MCOperand::createImm(Val));
2888  }
2889 
2890  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2891  assert(N == 2 && "Invalid number of operands!");
2892  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2893  Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2894  }
2895 
2896  void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2897  addAlignedMemoryOperands(Inst, N);
2898  }
2899 
2900  void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2901  addAlignedMemoryOperands(Inst, N);
2902  }
2903 
2904  void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2905  addAlignedMemoryOperands(Inst, N);
2906  }
2907 
2908  void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2909  addAlignedMemoryOperands(Inst, N);
2910  }
2911 
2912  void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2913  addAlignedMemoryOperands(Inst, N);
2914  }
2915 
2916  void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2917  addAlignedMemoryOperands(Inst, N);
2918  }
2919 
2920  void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2921  addAlignedMemoryOperands(Inst, N);
2922  }
2923 
2924  void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2925  addAlignedMemoryOperands(Inst, N);
2926  }
2927 
2928  void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2929  addAlignedMemoryOperands(Inst, N);
2930  }
2931 
2932  void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2933  addAlignedMemoryOperands(Inst, N);
2934  }
2935 
2936  void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2937  addAlignedMemoryOperands(Inst, N);
2938  }
2939 
2940  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2941  assert(N == 3 && "Invalid number of operands!");
2942  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2943  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2944  if (!Memory.OffsetRegNum) {
2945  if (!Memory.OffsetImm)
2947  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
2948  int32_t Val = CE->getValue();
2950  // Special case for #-0
2951  if (Val == std::numeric_limits<int32_t>::min())
2952  Val = 0;
2953  if (Val < 0)
2954  Val = -Val;
2955  Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2956  Inst.addOperand(MCOperand::createImm(Val));
2957  } else
2958  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2959  } else {
2960  // For register offset, we encode the shift type and negation flag
2961  // here.
2962  int32_t Val =
2964  Memory.ShiftImm, Memory.ShiftType);
2965  Inst.addOperand(MCOperand::createImm(Val));
2966  }
2967  }
2968 
2969  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2970  assert(N == 2 && "Invalid number of operands!");
2971  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2972  assert(CE && "non-constant AM2OffsetImm operand!");
2973  int32_t Val = CE->getValue();
2975  // Special case for #-0
2976  if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2977  if (Val < 0) Val = -Val;
2978  Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2980  Inst.addOperand(MCOperand::createImm(Val));
2981  }
2982 
2983  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2984  assert(N == 3 && "Invalid number of operands!");
2985  // If we have an immediate that's not a constant, treat it as a label
2986  // reference needing a fixup. If it is a constant, it's something else
2987  // and we reject it.
2988  if (isImm()) {
2989  Inst.addOperand(MCOperand::createExpr(getImm()));
2992  return;
2993  }
2994 
2995  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2996  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2997  if (!Memory.OffsetRegNum) {
2998  if (!Memory.OffsetImm)
3000  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3001  int32_t Val = CE->getValue();
3003  // Special case for #-0
3004  if (Val == std::numeric_limits<int32_t>::min())
3005  Val = 0;
3006  if (Val < 0)
3007  Val = -Val;
3008  Val = ARM_AM::getAM3Opc(AddSub, Val);
3009  Inst.addOperand(MCOperand::createImm(Val));
3010  } else
3011  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3012  } else {
3013  // For register offset, we encode the shift type and negation flag
3014  // here.
3015  int32_t Val =
3016  ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
3017  Inst.addOperand(MCOperand::createImm(Val));
3018  }
3019  }
3020 
3021  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
3022  assert(N == 2 && "Invalid number of operands!");
3023  if (Kind == k_PostIndexRegister) {
3024  int32_t Val =
3025  ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
3026  Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3027  Inst.addOperand(MCOperand::createImm(Val));
3028  return;
3029  }
3030 
3031  // Constant offset.
3032  const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
3033  int32_t Val = CE->getValue();
3035  // Special case for #-0
3036  if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3037  if (Val < 0) Val = -Val;
3038  Val = ARM_AM::getAM3Opc(AddSub, Val);
3040  Inst.addOperand(MCOperand::createImm(Val));
3041  }
3042 
3043  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
3044  assert(N == 2 && "Invalid number of operands!");
3045  // If we have an immediate that's not a constant, treat it as a label
3046  // reference needing a fixup. If it is a constant, it's something else
3047  // and we reject it.
3048  if (isImm()) {
3049  Inst.addOperand(MCOperand::createExpr(getImm()));
3051  return;
3052  }
3053 
3054  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3055  if (!Memory.OffsetImm)
3057  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3058  // The lower two bits are always zero and as such are not encoded.
3059  int32_t Val = CE->getValue() / 4;
3061  // Special case for #-0
3062  if (Val == std::numeric_limits<int32_t>::min())
3063  Val = 0;
3064  if (Val < 0)
3065  Val = -Val;
3066  Val = ARM_AM::getAM5Opc(AddSub, Val);
3067  Inst.addOperand(MCOperand::createImm(Val));
3068  } else
3069  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3070  }
3071 
3072  void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
3073  assert(N == 2 && "Invalid number of operands!");
3074  // If we have an immediate that's not a constant, treat it as a label
3075  // reference needing a fixup. If it is a constant, it's something else
3076  // and we reject it.
3077  if (isImm()) {
3078  Inst.addOperand(MCOperand::createExpr(getImm()));
3080  return;
3081  }
3082 
3083  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3084  // The lower bit is always zero and as such is not encoded.
3085  if (!Memory.OffsetImm)
3087  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3088  int32_t Val = CE->getValue() / 2;
3090  // Special case for #-0
3091  if (Val == std::numeric_limits<int32_t>::min())
3092  Val = 0;
3093  if (Val < 0)
3094  Val = -Val;
3095  Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
3096  Inst.addOperand(MCOperand::createImm(Val));
3097  } else
3098  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3099  }
3100 
3101  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
3102  assert(N == 2 && "Invalid number of operands!");
3103  // If we have an immediate that's not a constant, treat it as a label
3104  // reference needing a fixup. If it is a constant, it's something else
3105  // and we reject it.
3106  if (isImm()) {
3107  Inst.addOperand(MCOperand::createExpr(getImm()));
3109  return;
3110  }
3111 
3112  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3113  addExpr(Inst, Memory.OffsetImm);
3114  }
3115 
3116  void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const {
3117  assert(N == 2 && "Invalid number of operands!");
3118  // If we have an immediate that's not a constant, treat it as a label
3119  // reference needing a fixup. If it is a constant, it's something else
3120  // and we reject it.
3121  if (isImm()) {
3122  Inst.addOperand(MCOperand::createExpr(getImm()));
3124  return;
3125  }
3126 
3127  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3128  addExpr(Inst, Memory.OffsetImm);
3129  }
3130 
3131  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
3132  assert(N == 2 && "Invalid number of operands!");
3133  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3134  if (!Memory.OffsetImm)
3136  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3137  // The lower two bits are always zero and as such are not encoded.
3138  Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3139  else
3140  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3141  }
3142 
3143  void addMemImmOffsetOperands(MCInst &Inst, unsigned N) const {
3144  assert(N == 2 && "Invalid number of operands!");
3145  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3146  addExpr(Inst, Memory.OffsetImm);
3147  }
3148 
3149  void addMemRegRQOffsetOperands(MCInst &Inst, unsigned N) const {
3150  assert(N == 2 && "Invalid number of operands!");
3151  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3152  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3153  }
3154 
3155  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3156  assert(N == 2 && "Invalid number of operands!");
3157  // If this is an immediate, it's a label reference.
3158  if (isImm()) {
3159  addExpr(Inst, getImm());
3161  return;
3162  }
3163 
3164  // Otherwise, it's a normal memory reg+offset.
3165  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3166  addExpr(Inst, Memory.OffsetImm);
3167  }
3168 
3169  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3170  assert(N == 2 && "Invalid number of operands!");
3171  // If this is an immediate, it's a label reference.
3172  if (isImm()) {
3173  addExpr(Inst, getImm());
3175  return;
3176  }
3177 
3178  // Otherwise, it's a normal memory reg+offset.
3179  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3180  addExpr(Inst, Memory.OffsetImm);
3181  }
3182 
3183  void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
3184  assert(N == 1 && "Invalid number of operands!");
3185  // This is container for the immediate that we will create the constant
3186  // pool from
3187  addExpr(Inst, getConstantPoolImm());
3188  }
3189 
3190  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
3191  assert(N == 2 && "Invalid number of operands!");
3192  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3193  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3194  }
3195 
3196  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
3197  assert(N == 2 && "Invalid number of operands!");
3198  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3199  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3200  }
3201 
3202  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3203  assert(N == 3 && "Invalid number of operands!");
3204  unsigned Val =
3206  Memory.ShiftImm, Memory.ShiftType);
3207  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3208  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3209  Inst.addOperand(MCOperand::createImm(Val));
3210  }
3211 
3212  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3213  assert(N == 3 && "Invalid number of operands!");
3214  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3215  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3216  Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
3217  }
3218 
3219  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
3220  assert(N == 2 && "Invalid number of operands!");
3221  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3222  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3223  }
3224 
3225  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
3226  assert(N == 2 && "Invalid number of operands!");
3227  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3228  if (!Memory.OffsetImm)
3230  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3231  // The lower two bits are always zero and as such are not encoded.
3232  Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3233  else
3234  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3235  }
3236 
3237  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
3238  assert(N == 2 && "Invalid number of operands!");
3239  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3240  if (!Memory.OffsetImm)
3242  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3243  Inst.addOperand(MCOperand::createImm(CE->getValue() / 2));
3244  else
3245  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3246  }
3247 
3248  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
3249  assert(N == 2 && "Invalid number of operands!");
3250  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3251  addExpr(Inst, Memory.OffsetImm);
3252  }
3253 
3254  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
3255  assert(N == 2 && "Invalid number of operands!");
3256  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3257  if (!Memory.OffsetImm)
3259  else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3260  // The lower two bits are always zero and as such are not encoded.
3261  Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3262  else
3263  Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3264  }
3265 
3266  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
3267  assert(N == 1 && "Invalid number of operands!");
3268  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3269  assert(CE && "non-constant post-idx-imm8 operand!");
3270  int Imm = CE->getValue();
3271  bool isAdd = Imm >= 0;
3272  if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3273  Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
3274  Inst.addOperand(MCOperand::createImm(Imm));
3275  }
3276 
3277  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
3278  assert(N == 1 && "Invalid number of operands!");
3279  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3280  assert(CE && "non-constant post-idx-imm8s4 operand!");
3281  int Imm = CE->getValue();
3282  bool isAdd = Imm >= 0;
3283  if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3284  // Immediate is scaled by 4.
3285  Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
3286  Inst.addOperand(MCOperand::createImm(Imm));
3287  }
3288 
3289  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
3290  assert(N == 2 && "Invalid number of operands!");
3291  Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3292  Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
3293  }
3294 
3295  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
3296  assert(N == 2 && "Invalid number of operands!");
3297  Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3298  // The sign, shift type, and shift amount are encoded in a single operand
3299  // using the AM2 encoding helpers.
3300  ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
3301  unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
3302  PostIdxReg.ShiftTy);
3303  Inst.addOperand(MCOperand::createImm(Imm));
3304  }
3305 
3306  void addPowerTwoOperands(MCInst &Inst, unsigned N) const {
3307  assert(N == 1 && "Invalid number of operands!");
3308  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3309  Inst.addOperand(MCOperand::createImm(CE->getValue()));
3310  }
3311 
3312  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
3313  assert(N == 1 && "Invalid number of operands!");
3314  Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
3315  }
3316 
3317  void addBankedRegOperands(MCInst &Inst, unsigned N) const {
3318  assert(N == 1 && "Invalid number of operands!");
3319  Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
3320  }
3321 
3322  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
3323  assert(N == 1 && "Invalid number of operands!");
3324  Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
3325  }
3326 
3327  void addVecListOperands(MCInst &Inst, unsigned N) const {
3328  assert(N == 1 && "Invalid number of operands!");
3329  Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3330  }
3331 
3332  void addMVEVecListOperands(MCInst &Inst, unsigned N) const {
3333  assert(N == 1 && "Invalid number of operands!");
3334 
3335  // When we come here, the VectorList field will identify a range
3336  // of q-registers by its base register and length, and it will
3337  // have already been error-checked to be the expected length of
3338  // range and contain only q-regs in the range q0-q7. So we can
3339  // count on the base register being in the range q0-q6 (for 2
3340  // regs) or q0-q4 (for 4)
3341  //
3342  // The MVE instructions taking a register range of this kind will
3343  // need an operand in the MQQPR or MQQQQPR class, representing the
3344  // entire range as a unit. So we must translate into that class,
3345  // by finding the index of the base register in the MQPR reg
3346  // class, and returning the super-register at the corresponding
3347  // index in the target class.
3348 
3349  const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3350  const MCRegisterClass *RC_out =
3351  (VectorList.Count == 2) ? &ARMMCRegisterClasses[ARM::MQQPRRegClassID]
3352  : &ARMMCRegisterClasses[ARM::MQQQQPRRegClassID];
3353 
3354  unsigned I, E = RC_out->getNumRegs();
3355  for (I = 0; I < E; I++)
3356  if (RC_in->getRegister(I) == VectorList.RegNum)
3357  break;
3358  assert(I < E && "Invalid vector list start register!");
3359 
3360  Inst.addOperand(MCOperand::createReg(RC_out->getRegister(I)));
3361  }
3362 
3363  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
3364  assert(N == 2 && "Invalid number of operands!");
3365  Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3366  Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
3367  }
3368 
3369  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
3370  assert(N == 1 && "Invalid number of operands!");
3371  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3372  }
3373 
3374  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
3375  assert(N == 1 && "Invalid number of operands!");
3376  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3377  }
3378 
3379  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
3380  assert(N == 1 && "Invalid number of operands!");
3381  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3382  }
3383 
3384  void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
3385  assert(N == 1 && "Invalid number of operands!");
3386  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3387  }
3388 
3389  void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const {
3390  assert(N == 1 && "Invalid number of operands!");
3391  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3392  }
3393 
3394  void addMVEPairVectorIndexOperands(MCInst &Inst, unsigned N) const {
3395  assert(N == 1 && "Invalid number of operands!");
3396  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3397  }
3398 
3399  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
3400  assert(N == 1 && "Invalid number of operands!");
3401  // The immediate encodes the type of constant as well as the value.
3402  // Mask in that this is an i8 splat.
3403  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3404  Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
3405  }
3406 
3407  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
3408  assert(N == 1 && "Invalid number of operands!");
3409  // The immediate encodes the type of constant as well as the value.
3410  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3411  unsigned Value = CE->getValue();
3414  }
3415 
3416  void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
3417  assert(N == 1 && "Invalid number of operands!");
3418  // The immediate encodes the type of constant as well as the value.
3419  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3420  unsigned Value = CE->getValue();
3421  Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
3423  }
3424 
3425  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
3426  assert(N == 1 && "Invalid number of operands!");
3427  // The immediate encodes the type of constant as well as the value.
3428  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3429  unsigned Value = CE->getValue();
3432  }
3433 
3434  void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
3435  assert(N == 1 && "Invalid number of operands!");
3436  // The immediate encodes the type of constant as well as the value.
3437  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3438  unsigned Value = CE->getValue();
3441  }
3442 
3443  void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
3444  // The immediate encodes the type of constant as well as the value.
3445  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3446  assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
3447  Inst.getOpcode() == ARM::VMOVv16i8) &&
3448  "All instructions that wants to replicate non-zero byte "
3449  "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3450  unsigned Value = CE->getValue();
3451  if (Inv)
3452  Value = ~Value;
3453  unsigned B = Value & 0xff;
3454  B |= 0xe00; // cmode = 0b1110
3456  }
3457 
3458  void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3459  assert(N == 1 && "Invalid number of operands!");
3460  addNEONi8ReplicateOperands(Inst, true);
3461  }
3462 
3463  static unsigned encodeNeonVMOVImmediate(unsigned Value) {
3464  if (Value >= 256 && Value <= 0xffff)
3465  Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
3466  else if (Value > 0xffff && Value <= 0xffffff)
3467  Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
3468  else if (Value > 0xffffff)
3469  Value = (Value >> 24) | 0x600;
3470  return Value;
3471  }
3472 
3473  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
3474  assert(N == 1 && "Invalid number of operands!");
3475  // The immediate encodes the type of constant as well as the value.
3476  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3477  unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
3479  }
3480 
3481  void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3482  assert(N == 1 && "Invalid number of operands!");
3483  addNEONi8ReplicateOperands(Inst, false);
3484  }
3485 
3486  void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
3487  assert(N == 1 && "Invalid number of operands!");
3488  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3489  assert((Inst.getOpcode() == ARM::VMOVv4i16 ||
3490  Inst.getOpcode() == ARM::VMOVv8i16 ||
3491  Inst.getOpcode() == ARM::VMVNv4i16 ||
3492  Inst.getOpcode() == ARM::VMVNv8i16) &&
3493  "All instructions that want to replicate non-zero half-word "
3494  "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3495  uint64_t Value = CE->getValue();
3496  unsigned Elem = Value & 0xffff;
3497  if (Elem >= 256)
3498  Elem = (Elem >> 8) | 0x200;
3499  Inst.addOperand(MCOperand::createImm(Elem));
3500  }
3501 
3502  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
3503  assert(N == 1 && "Invalid number of operands!");
3504  // The immediate encodes the type of constant as well as the value.
3505  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3506  unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
3508  }
3509 
3510  void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
3511  assert(N == 1 && "Invalid number of operands!");
3512  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3513  assert((Inst.getOpcode() == ARM::VMOVv2i32 ||
3514  Inst.getOpcode() == ARM::VMOVv4i32 ||
3515  Inst.getOpcode() == ARM::VMVNv2i32 ||
3516  Inst.getOpcode() == ARM::VMVNv4i32) &&
3517  "All instructions that want to replicate non-zero word "
3518  "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3519  uint64_t Value = CE->getValue();
3520  unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
3521  Inst.addOperand(MCOperand::createImm(Elem));
3522  }
3523 
3524  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
3525  assert(N == 1 && "Invalid number of operands!");
3526  // The immediate encodes the type of constant as well as the value.
3527  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3528  uint64_t Value = CE->getValue();
3529  unsigned Imm = 0;
3530  for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
3531  Imm |= (Value & 1) << i;
3532  }
3533  Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
3534  }
3535 
3536  void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
3537  assert(N == 1 && "Invalid number of operands!");
3538  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3539  Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
3540  }
3541 
3542  void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
3543  assert(N == 1 && "Invalid number of operands!");
3544  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3545  Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
3546  }
3547 
3548  void addMveSaturateOperands(MCInst &Inst, unsigned N) const {
3549  assert(N == 1 && "Invalid number of operands!");
3550  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3551  unsigned Imm = CE->getValue();
3552  assert((Imm == 48 || Imm == 64) && "Invalid saturate operand");
3553  Inst.addOperand(MCOperand::createImm(Imm == 48 ? 1 : 0));
3554  }
3555 
3556  void print(raw_ostream &OS) const override;
3557 
3558  static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
3559  auto Op = std::make_unique<ARMOperand>(k_ITCondMask);
3560  Op->ITMask.Mask = Mask;
3561  Op->StartLoc = S;
3562  Op->EndLoc = S;
3563  return Op;
3564  }
3565 
3566  static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
3567  SMLoc S) {
3568  auto Op = std::make_unique<ARMOperand>(k_CondCode);
3569  Op->CC.Val = CC;
3570  Op->StartLoc = S;
3571  Op->EndLoc = S;
3572  return Op;
3573  }
3574 
3575  static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC,
3576  SMLoc S) {
3577  auto Op = std::make_unique<ARMOperand>(k_VPTPred);
3578  Op->VCC.Val = CC;
3579  Op->StartLoc = S;
3580  Op->EndLoc = S;
3581  return Op;
3582  }
3583 
3584  static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
3585  auto Op = std::make_unique<ARMOperand>(k_CoprocNum);
3586  Op->Cop.Val = CopVal;
3587  Op->StartLoc = S;
3588  Op->EndLoc = S;
3589  return Op;
3590  }
3591 
3592  static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
3593  auto Op = std::make_unique<ARMOperand>(k_CoprocReg);
3594  Op->Cop.Val = CopVal;
3595  Op->StartLoc = S;
3596  Op->EndLoc = S;
3597  return Op;
3598  }
3599 
3600  static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
3601  SMLoc E) {
3602  auto Op = std::make_unique<ARMOperand>(k_CoprocOption);
3603  Op->Cop.Val = Val;
3604  Op->StartLoc = S;
3605  Op->EndLoc = E;
3606  return Op;
3607  }
3608 
3609  static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
3610  auto Op = std::make_unique<ARMOperand>(k_CCOut);
3611  Op->Reg.RegNum = RegNum;
3612  Op->StartLoc = S;
3613  Op->EndLoc = S;
3614  return Op;
3615  }
3616 
3617  static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
3618  auto Op = std::make_unique<ARMOperand>(k_Token);
3619  Op->Tok.Data = Str.data();
3620  Op->Tok.Length = Str.size();
3621  Op->StartLoc = S;
3622  Op->EndLoc = S;
3623  return Op;
3624  }
3625 
3626  static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
3627  SMLoc E) {
3628  auto Op = std::make_unique<ARMOperand>(k_Register);
3629  Op->Reg.RegNum = RegNum;
3630  Op->StartLoc = S;
3631  Op->EndLoc = E;
3632  return Op;
3633  }
3634 
3635  static std::unique_ptr<ARMOperand>
3636  CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3637  unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
3638  SMLoc E) {
3639  auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister);
3640  Op->RegShiftedReg.ShiftTy = ShTy;
3641  Op->RegShiftedReg.SrcReg = SrcReg;
3642  Op->RegShiftedReg.ShiftReg = ShiftReg;
3643  Op->RegShiftedReg.ShiftImm = ShiftImm;
3644  Op->StartLoc = S;
3645  Op->EndLoc = E;
3646  return Op;
3647  }
3648 
3649  static std::unique_ptr<ARMOperand>
3650  CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3651  unsigned ShiftImm, SMLoc S, SMLoc E) {
3652  auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate);
3653  Op->RegShiftedImm.ShiftTy = ShTy;
3654  Op->RegShiftedImm.SrcReg = SrcReg;
3655  Op->RegShiftedImm.ShiftImm = ShiftImm;
3656  Op->StartLoc = S;
3657  Op->EndLoc = E;
3658  return Op;
3659  }
3660 
3661  static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
3662  SMLoc S, SMLoc E) {
3663  auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate);
3664  Op->ShifterImm.isASR = isASR;
3665  Op->ShifterImm.Imm = Imm;
3666  Op->StartLoc = S;
3667  Op->EndLoc = E;
3668  return Op;
3669  }
3670 
3671  static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
3672  SMLoc E) {
3673  auto Op = std::make_unique<ARMOperand>(k_RotateImmediate);
3674  Op->RotImm.Imm = Imm;
3675  Op->StartLoc = S;
3676  Op->EndLoc = E;
3677  return Op;
3678  }
3679 
3680  static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
3681  SMLoc S, SMLoc E) {
3682  auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate);
3683  Op->ModImm.Bits = Bits;
3684  Op->ModImm.Rot = Rot;
3685  Op->StartLoc = S;
3686  Op->EndLoc = E;
3687  return Op;
3688  }
3689 
3690  static std::unique_ptr<ARMOperand>
3691  CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
3692  auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate);
3693  Op->Imm.Val = Val;
3694  Op->StartLoc = S;
3695  Op->EndLoc = E;
3696  return Op;
3697  }
3698 
3699  static std::unique_ptr<ARMOperand>
3700  CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
3701  auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor);
3702  Op->Bitfield.LSB = LSB;
3703  Op->Bitfield.Width = Width;
3704  Op->StartLoc = S;
3705  Op->EndLoc = E;
3706  return Op;
3707  }
3708 
3709  static std::unique_ptr<ARMOperand>
3710  CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
3711  SMLoc StartLoc, SMLoc EndLoc) {
3712  assert(Regs.size() > 0 && "RegList contains no registers?");
3713  KindTy Kind = k_RegisterList;
3714 
3715  if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
3716  Regs.front().second)) {
3717  if (Regs.back().second == ARM::VPR)
3718  Kind = k_FPDRegisterListWithVPR;
3719  else
3720  Kind = k_DPRRegisterList;
3721  } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
3722  Regs.front().second)) {
3723  if (Regs.back().second == ARM::VPR)
3724  Kind = k_FPSRegisterListWithVPR;
3725  else
3726  Kind = k_SPRRegisterList;
3727  }
3728 
3729  if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3730  Kind = k_RegisterListWithAPSR;
3731 
3732  assert(llvm::is_sorted(Regs) && "Register list must be sorted by encoding");
3733 
3734  auto Op = std::make_unique<ARMOperand>(Kind);
3735  for (const auto &P : Regs)
3736  Op->Registers.push_back(P.second);
3737 
3738  Op->StartLoc = StartLoc;
3739  Op->EndLoc = EndLoc;
3740  return Op;
3741  }
3742 
3743  static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
3744  unsigned Count,
3745  bool isDoubleSpaced,
3746  SMLoc S, SMLoc E) {
3747  auto Op = std::make_unique<ARMOperand>(k_VectorList);
3748  Op->VectorList.RegNum = RegNum;
3749  Op->VectorList.Count = Count;
3750  Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3751  Op->StartLoc = S;
3752  Op->EndLoc = E;
3753  return Op;
3754  }
3755 
3756  static std::unique_ptr<ARMOperand>
3757  CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
3758  SMLoc S, SMLoc E) {
3759  auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes);
3760  Op->VectorList.RegNum = RegNum;
3761  Op->VectorList.Count = Count;
3762  Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3763  Op->StartLoc = S;
3764  Op->EndLoc = E;
3765  return Op;
3766  }
3767 
3768  static std::unique_ptr<ARMOperand>
3769  CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
3770  bool isDoubleSpaced, SMLoc S, SMLoc E) {
3771  auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed);
3772  Op->VectorList.RegNum = RegNum;
3773  Op->VectorList.Count = Count;
3774  Op->VectorList.LaneIndex = Index;
3775  Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3776  Op->StartLoc = S;
3777  Op->EndLoc = E;
3778  return Op;
3779  }
3780 
3781  static std::unique_ptr<ARMOperand>
3782  CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
3783  auto Op = std::make_unique<ARMOperand>(k_VectorIndex);
3784  Op->VectorIndex.Val = Idx;
3785  Op->StartLoc = S;
3786  Op->EndLoc = E;
3787  return Op;
3788  }
3789 
3790  static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3791  SMLoc E) {
3792  auto Op = std::make_unique<ARMOperand>(k_Immediate);
3793  Op->Imm.Val = Val;
3794  Op->StartLoc = S;
3795  Op->EndLoc = E;
3796  return Op;
3797  }
3798 
3799  static std::unique_ptr<ARMOperand>
3800  CreateMem(unsigned BaseRegNum, const MCExpr *OffsetImm, unsigned OffsetRegNum,
3801  ARM_AM::ShiftOpc ShiftType, unsigned ShiftImm, unsigned Alignment,
3802  bool isNegative, SMLoc S, SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
3803  auto Op = std::make_unique<ARMOperand>(k_Memory);
3804  Op->Memory.BaseRegNum = BaseRegNum;
3805  Op->Memory.OffsetImm = OffsetImm;
3806  Op->Memory.OffsetRegNum = OffsetRegNum;
3807  Op->Memory.ShiftType = ShiftType;
3808  Op->Memory.ShiftImm = ShiftImm;
3809  Op->Memory.Alignment = Alignment;
3810  Op->Memory.isNegative = isNegative;
3811  Op->StartLoc = S;
3812  Op->EndLoc = E;
3813  Op->AlignmentLoc = AlignmentLoc;
3814  return Op;
3815  }
3816 
3817  static std::unique_ptr<ARMOperand>
3818  CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3819  unsigned ShiftImm, SMLoc S, SMLoc E) {
3820  auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister);
3821  Op->PostIdxReg.RegNum = RegNum;
3822  Op->PostIdxReg.isAdd = isAdd;
3823  Op->PostIdxReg.ShiftTy = ShiftTy;
3824  Op->PostIdxReg.ShiftImm = ShiftImm;
3825  Op->StartLoc = S;
3826  Op->EndLoc = E;
3827  return Op;
3828  }
3829 
3830  static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
3831  SMLoc S) {
3832  auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt);
3833  Op->MBOpt.Val = Opt;
3834  Op->StartLoc = S;
3835  Op->EndLoc = S;
3836  return Op;
3837  }
3838 
3839  static std::unique_ptr<ARMOperand>
3840  CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
3841  auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3842  Op->ISBOpt.Val = Opt;
3843  Op->StartLoc = S;
3844  Op->EndLoc = S;
3845  return Op;
3846  }
3847 
3848  static std::unique_ptr<ARMOperand>
3849  CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S) {
3850  auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
3851  Op->TSBOpt.Val = Opt;
3852  Op->StartLoc = S;
3853  Op->EndLoc = S;
3854  return Op;
3855  }
3856 
3857  static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
3858  SMLoc S) {
3859  auto Op = std::make_unique<ARMOperand>(k_ProcIFlags);
3860  Op->IFlags.Val = IFlags;
3861  Op->StartLoc = S;
3862  Op->EndLoc = S;
3863  return Op;
3864  }
3865 
3866  static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
3867  auto Op = std::make_unique<ARMOperand>(k_MSRMask);
3868  Op->MMask.Val = MMask;
3869  Op->StartLoc = S;
3870  Op->EndLoc = S;
3871  return Op;
3872  }
3873 
3874  static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
3875  auto Op = std::make_unique<ARMOperand>(k_BankedReg);
3876  Op->BankedReg.Val = Reg;
3877  Op->StartLoc = S;
3878  Op->EndLoc = S;
3879  return Op;
3880  }
3881 };
3882 
3883 } // end anonymous namespace.
3884 
3885 void ARMOperand::print(raw_ostream &OS) const {
3886  auto RegName = [](unsigned Reg) {
3887  if (Reg)
3889  else
3890  return "noreg";
3891  };
3892 
3893  switch (Kind) {
3894  case k_CondCode:
3895  OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3896  break;
3897  case k_VPTPred:
3898  OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">";
3899  break;
3900  case k_CCOut:
3901  OS << "<ccout " << RegName(getReg()) << ">";
3902  break;
3903  case k_ITCondMask: {
3904  static const char *const MaskStr[] = {
3905  "(invalid)", "(tttt)", "(ttt)", "(ttte)",
3906  "(tt)", "(ttet)", "(tte)", "(ttee)",
3907  "(t)", "(tett)", "(tet)", "(tete)",
3908  "(te)", "(teet)", "(tee)", "(teee)",
3909  };
3910  assert((ITMask.Mask & 0xf) == ITMask.Mask);
3911  OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
3912  break;
3913  }
3914  case k_CoprocNum:
3915  OS << "<coprocessor number: " << getCoproc() << ">";
3916  break;
3917  case k_CoprocReg:
3918  OS << "<coprocessor register: " << getCoproc() << ">";
3919  break;
3920  case k_CoprocOption:
3921  OS << "<coprocessor option: " << CoprocOption.Val << ">";
3922  break;
3923  case k_MSRMask:
3924  OS << "<mask: " << getMSRMask() << ">";
3925  break;
3926  case k_BankedReg:
3927  OS << "<banked reg: " << getBankedReg() << ">";
3928  break;
3929  case k_Immediate:
3930  OS << *getImm();
3931  break;
3932  case k_MemBarrierOpt:
3933  OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
3934  break;
3935  case k_InstSyncBarrierOpt:
3936  OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
3937  break;
3938  case k_TraceSyncBarrierOpt:
3939  OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">";
3940  break;
3941  case k_Memory:
3942  OS << "<memory";
3943  if (Memory.BaseRegNum)
3944  OS << " base:" << RegName(Memory.BaseRegNum);
3945  if (Memory.OffsetImm)
3946  OS << " offset-imm:" << *Memory.OffsetImm;
3947  if (Memory.OffsetRegNum)
3948  OS << " offset-reg:" << (Memory.isNegative ? "-" : "")
3949  << RegName(Memory.OffsetRegNum);
3950  if (Memory.ShiftType != ARM_AM::no_shift) {
3951  OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType);
3952  OS << " shift-imm:" << Memory.ShiftImm;
3953  }
3954  if (Memory.Alignment)
3955  OS << " alignment:" << Memory.Alignment;
3956  OS << ">";
3957  break;
3958  case k_PostIndexRegister:
3959  OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
3960  << RegName(PostIdxReg.RegNum);
3961  if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
3962  OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
3963  << PostIdxReg.ShiftImm;
3964  OS << ">";
3965  break;
3966  case k_ProcIFlags: {
3967  OS << "<ARM_PROC::";
3968  unsigned IFlags = getProcIFlags();
3969  for (int i=2; i >= 0; --i)
3970  if (IFlags & (1 << i))
3971  OS << ARM_PROC::IFlagsToString(1 << i);
3972  OS << ">";
3973  break;
3974  }
3975  case k_Register:
3976  OS << "<register " << RegName(getReg()) << ">";
3977  break;
3978  case k_ShifterImmediate:
3979  OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
3980  << " #" << ShifterImm.Imm << ">";
3981  break;
3982  case k_ShiftedRegister:
3983  OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " "
3984  << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " "
3985  << RegName(RegShiftedReg.ShiftReg) << ">";
3986  break;
3987  case k_ShiftedImmediate:
3988  OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " "
3989  << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #"
3990  << RegShiftedImm.ShiftImm << ">";
3991  break;
3992  case k_RotateImmediate:
3993  OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
3994  break;
3995  case k_ModifiedImmediate:
3996  OS << "<mod_imm #" << ModImm.Bits << ", #"
3997  << ModImm.Rot << ")>";
3998  break;
3999  case k_ConstantPoolImmediate:
4000  OS << "<constant_pool_imm #" << *getConstantPoolImm();
4001  break;
4002  case k_BitfieldDescriptor:
4003  OS << "<bitfield " << "lsb: " << Bitfield.LSB
4004  << ", width: " << Bitfield.Width << ">";
4005  break;
4006  case k_RegisterList:
4007  case k_RegisterListWithAPSR:
4008  case k_DPRRegisterList:
4009  case k_SPRRegisterList:
4010  case k_FPSRegisterListWithVPR:
4011  case k_FPDRegisterListWithVPR: {
4012  OS << "<register_list ";
4013 
4014  const SmallVectorImpl<unsigned> &RegList = getRegList();
4016  I = RegList.begin(), E = RegList.end(); I != E; ) {
4017  OS << RegName(*I);
4018  if (++I < E) OS << ", ";
4019  }
4020 
4021  OS << ">";
4022  break;
4023  }
4024  case k_VectorList:
4025  OS << "<vector_list " << VectorList.Count << " * "
4026  << RegName(VectorList.RegNum) << ">";
4027  break;
4028  case k_VectorListAllLanes:
4029  OS << "<vector_list(all lanes) " << VectorList.Count << " * "
4030  << RegName(VectorList.RegNum) << ">";
4031  break;
4032  case k_VectorListIndexed:
4033  OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
4034  << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">";
4035  break;
4036  case k_Token:
4037  OS << "'" << getToken() << "'";
4038  break;
4039  case k_VectorIndex:
4040  OS << "<vectorindex " << getVectorIndex() << ">";
4041  break;
4042  }
4043 }
4044 
4045 /// @name Auto-generated Match Functions
4046 /// {
4047 
4048 static unsigned MatchRegisterName(StringRef Name);
4049 
4050 /// }
4051 
4052 bool ARMAsmParser::ParseRegister(unsigned &RegNo,
4053  SMLoc &StartLoc, SMLoc &EndLoc) {
4054  const AsmToken &Tok = getParser().getTok();
4055  StartLoc = Tok.getLoc();
4056  EndLoc = Tok.getEndLoc();
4057  RegNo = tryParseRegister();
4058 
4059  return (RegNo == (unsigned)-1);
4060 }
4061 
4062 OperandMatchResultTy ARMAsmParser::tryParseRegister(unsigned &RegNo,
4063  SMLoc &StartLoc,
4064  SMLoc &EndLoc) {
4065  if (ParseRegister(RegNo, StartLoc, EndLoc))
4066  return MatchOperand_NoMatch;
4067  return MatchOperand_Success;
4068 }
4069 
4070 /// Try to parse a register name. The token must be an Identifier when called,
4071 /// and if it is a register name the token is eaten and the register number is
4072 /// returned. Otherwise return -1.
4073 int ARMAsmParser::tryParseRegister() {
4074  MCAsmParser &Parser = getParser();
4075  const AsmToken &Tok = Parser.getTok();
4076  if (Tok.isNot(AsmToken::Identifier)) return -1;
4077 
4078  std::string lowerCase = Tok.getString().lower();
4079  unsigned RegNum = MatchRegisterName(lowerCase);
4080  if (!RegNum) {
4081  RegNum = StringSwitch<unsigned>(lowerCase)
4082  .Case("r13", ARM::SP)
4083  .Case("r14", ARM::LR)
4084  .Case("r15", ARM::PC)
4085  .Case("ip", ARM::R12)
4086  // Additional register name aliases for 'gas' compatibility.
4087  .Case("a1", ARM::R0)
4088  .Case("a2", ARM::R1)
4089  .Case("a3", ARM::R2)
4090  .Case("a4", ARM::R3)
4091  .Case("v1", ARM::R4)
4092  .Case("v2", ARM::R5)
4093  .Case("v3", ARM::R6)
4094  .Case("v4", ARM::R7)
4095  .Case("v5", ARM::R8)
4096  .Case("v6", ARM::R9)
4097  .Case("v7", ARM::R10)
4098  .Case("v8", ARM::R11)
4099  .Case("sb", ARM::R9)
4100  .Case("sl", ARM::R10)
4101  .Case("fp", ARM::R11)
4102  .Default(0);
4103  }
4104  if (!RegNum) {
4105  // Check for aliases registered via .req. Canonicalize to lower case.
4106  // That's more consistent since register names are case insensitive, and
4107  // it's how the original entry was passed in from MC/MCParser/AsmParser.
4108  StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
4109  // If no match, return failure.
4110  if (Entry == RegisterReqs.end())
4111  return -1;
4112  Parser.Lex(); // Eat identifier token.
4113  return Entry->getValue();
4114  }
4115 
4116  // Some FPUs only have 16 D registers, so D16-D31 are invalid
4117  if (!hasD32() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
4118  return -1;
4119 
4120  Parser.Lex(); // Eat identifier token.
4121 
4122  return RegNum;
4123 }
4124 
4125 // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
4126 // If a recoverable error occurs, return 1. If an irrecoverable error
4127 // occurs, return -1. An irrecoverable error is one where tokens have been
4128 // consumed in the process of trying to parse the shifter (i.e., when it is
4129 // indeed a shifter operand, but malformed).
4130 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
4131  MCAsmParser &Parser = getParser();
4132  SMLoc S = Parser.getTok().getLoc();
4133  const AsmToken &Tok = Parser.getTok();
4134  if (Tok.isNot(AsmToken::Identifier))
4135  return -1;
4136 
4137  std::string lowerCase = Tok.getString().lower();
4139  .Case("asl", ARM_AM::lsl)
4140  .Case("lsl", ARM_AM::lsl)
4141  .Case("lsr", ARM_AM::lsr)
4142  .Case("asr", ARM_AM::asr)
4143  .Case("ror", ARM_AM::ror)
4144  .Case("rrx", ARM_AM::rrx)
4146 
4147  if (ShiftTy == ARM_AM::no_shift)
4148  return 1;
4149 
4150  Parser.Lex(); // Eat the operator.
4151 
4152  // The source register for the shift has already been added to the
4153  // operand list, so we need to pop it off and combine it into the shifted
4154  // register operand instead.
4155  std::unique_ptr<ARMOperand> PrevOp(
4156  (ARMOperand *)Operands.pop_back_val().release());
4157  if (!PrevOp->isReg())
4158  return Error(PrevOp->getStartLoc(), "shift must be of a register");
4159  int SrcReg = PrevOp->getReg();
4160 
4161  SMLoc EndLoc;
4162  int64_t Imm = 0;
4163  int ShiftReg = 0;
4164  if (ShiftTy == ARM_AM::rrx) {
4165  // RRX Doesn't have an explicit shift amount. The encoder expects
4166  // the shift register to be the same as the source register. Seems odd,
4167  // but OK.
4168  ShiftReg = SrcReg;
4169  } else {
4170  // Figure out if this is shifted by a constant or a register (for non-RRX).
4171  if (Parser.getTok().is(AsmToken::Hash) ||
4172  Parser.getTok().is(AsmToken::Dollar)) {
4173  Parser.Lex(); // Eat hash.
4174  SMLoc ImmLoc = Parser.getTok().getLoc();
4175  const MCExpr *ShiftExpr = nullptr;
4176  if (getParser().parseExpression(ShiftExpr, EndLoc)) {
4177  Error(ImmLoc, "invalid immediate shift value");
4178  return -1;
4179  }
4180  // The expression must be evaluatable as an immediate.
4181  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
4182  if (!CE) {
4183  Error(ImmLoc, "invalid immediate shift value");
4184  return -1;
4185  }
4186  // Range check the immediate.
4187  // lsl, ror: 0 <= imm <= 31
4188  // lsr, asr: 0 <= imm <= 32
4189  Imm = CE->getValue();
4190  if (Imm < 0 ||
4191  ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
4192  ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
4193  Error(ImmLoc, "immediate shift value out of range");
4194  return -1;
4195  }
4196  // shift by zero is a nop. Always send it through as lsl.
4197  // ('as' compatibility)
4198  if (Imm == 0)
4199  ShiftTy = ARM_AM::lsl;
4200  } else if (Parser.getTok().is(AsmToken::Identifier)) {
4201  SMLoc L = Parser.getTok().getLoc();
4202  EndLoc = Parser.getTok().getEndLoc();
4203  ShiftReg = tryParseRegister();
4204  if (ShiftReg == -1) {
4205  Error(L, "expected immediate or register in shift operand");
4206  return -1;
4207  }
4208  } else {
4209  Error(Parser.getTok().getLoc(),
4210  "expected immediate or register in shift operand");
4211  return -1;
4212  }
4213  }
4214 
4215  if (ShiftReg && ShiftTy != ARM_AM::rrx)
4216  Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
4217  ShiftReg, Imm,
4218  S, EndLoc));
4219  else
4220  Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
4221  S, EndLoc));
4222 
4223  return 0;
4224 }
4225 
4226 /// Try to parse a register name. The token must be an Identifier when called.
4227 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
4228 /// if there is a "writeback". 'true' if it's not a register.
4229 ///
4230 /// TODO this is likely to change to allow different register types and or to
4231 /// parse for a specific register type.
4232 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
4233  MCAsmParser &Parser = getParser();
4234  SMLoc RegStartLoc = Parser.getTok().getLoc();
4235  SMLoc RegEndLoc = Parser.getTok().getEndLoc();
4236  int RegNo = tryParseRegister();
4237  if (RegNo == -1)
4238  return true;
4239 
4240  Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
4241 
4242  const AsmToken &ExclaimTok = Parser.getTok();
4243  if (ExclaimTok.is(AsmToken::Exclaim)) {
4244  Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
4245  ExclaimTok.getLoc()));
4246  Parser.Lex(); // Eat exclaim token
4247  return false;
4248  }
4249 
4250  // Also check for an index operand. This is only legal for vector registers,
4251  // but that'll get caught OK in operand matching, so we don't need to
4252  // explicitly filter everything else out here.
4253  if (Parser.getTok().is(AsmToken::LBrac)) {
4254  SMLoc SIdx = Parser.getTok().getLoc();
4255  Parser.Lex(); // Eat left bracket token.
4256 
4257  const MCExpr *ImmVal;
4258  if (getParser().parseExpression(ImmVal))
4259  return true;
4260  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4261  if (!MCE)
4262  return TokError("immediate value expected for vector index");
4263 
4264  if (Parser.getTok().isNot(AsmToken::RBrac))
4265  return Error(Parser.getTok().getLoc(), "']' expected");
4266 
4267  SMLoc E = Parser.getTok().getEndLoc();
4268  Parser.Lex(); // Eat right bracket token.
4269 
4270  Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
4271  SIdx, E,
4272  getContext()));
4273  }
4274 
4275  return false;
4276 }
4277 
4278 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
4279 /// instruction with a symbolic operand name.
4280 /// We accept "crN" syntax for GAS compatibility.
4281 /// <operand-name> ::= <prefix><number>
4282 /// If CoprocOp is 'c', then:
4283 /// <prefix> ::= c | cr
4284 /// If CoprocOp is 'p', then :
4285 /// <prefix> ::= p
4286 /// <number> ::= integer in range [0, 15]
4287 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
4288  // Use the same layout as the tablegen'erated register name matcher. Ugly,
4289  // but efficient.
4290  if (Name.size() < 2 || Name[0] != CoprocOp)
4291  return -1;
4292  Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
4293 
4294  switch (Name.size()) {
4295  default: return -1;
4296  case 1:
4297  switch (Name[0]) {
4298  default: return -1;
4299  case '0': return 0;
4300  case '1': return 1;
4301  case '2': return 2;
4302  case '3': return 3;
4303  case '4': return 4;
4304  case '5': return 5;
4305  case '6': return 6;
4306  case '7': return 7;
4307  case '8': return 8;
4308  case '9': return 9;
4309  }
4310  case 2:
4311  if (Name[0] != '1')
4312  return -1;
4313  switch (Name[1]) {
4314  default: return -1;
4315  // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
4316  // However, old cores (v5/v6) did use them in that way.
4317  case '0': return 10;
4318  case '1': return 11;
4319  case '2': return 12;
4320  case '3': return 13;
4321  case '4': return 14;
4322  case '5': return 15;
4323  }
4324  }
4325 }
4326 
4327 /// parseITCondCode - Try to parse a condition code for an IT instruction.
4329 ARMAsmParser::parseITCondCode(OperandVector &Operands) {
4330  MCAsmParser &Parser = getParser();
4331  SMLoc S = Parser.getTok().getLoc();
4332  const AsmToken &Tok = Parser.getTok();
4333  if (!Tok.is(AsmToken::Identifier))
4334  return MatchOperand_NoMatch;
4335  unsigned CC = ARMCondCodeFromString(Tok.getString());
4336  if (CC == ~0U)
4337  return MatchOperand_NoMatch;
4338  Parser.Lex(); // Eat the token.
4339 
4340  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
4341 
4342  return MatchOperand_Success;
4343 }
4344 
4345 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
4346 /// token must be an Identifier when called, and if it is a coprocessor
4347 /// number, the token is eaten and the operand is added to the operand list.
4349 ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
4350  MCAsmParser &Parser = getParser();
4351  SMLoc S = Parser.getTok().getLoc();
4352  const AsmToken &Tok = Parser.getTok();
4353  if (Tok.isNot(AsmToken::Identifier))
4354  return MatchOperand_NoMatch;
4355 
4356  int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p');
4357  if (Num == -1)
4358  return MatchOperand_NoMatch;
4359  if (!isValidCoprocessorNumber(Num, getSTI().getFeatureBits()))
4360  return MatchOperand_NoMatch;
4361 
4362  Parser.Lex(); // Eat identifier token.
4363  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
4364  return MatchOperand_Success;
4365 }
4366 
4367 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
4368 /// token must be an Identifier when called, and if it is a coprocessor
4369 /// number, the token is eaten and the operand is added to the operand list.
4371 ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
4372  MCAsmParser &Parser = getParser();
4373  SMLoc S = Parser.getTok().getLoc();
4374  const AsmToken &Tok = Parser.getTok();
4375  if (Tok.isNot(AsmToken::Identifier))
4376  return MatchOperand_NoMatch;
4377 
4378  int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c');
4379  if (Reg == -1)
4380  return MatchOperand_NoMatch;
4381 
4382  Parser.Lex(); // Eat identifier token.
4383  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
4384  return MatchOperand_Success;
4385 }
4386 
4387 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
4388 /// coproc_option : '{' imm0_255 '}'
4390 ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
4391  MCAsmParser &Parser = getParser();
4392  SMLoc S = Parser.getTok().getLoc();
4393 
4394  // If this isn't a '{', this isn't a coprocessor immediate operand.
4395  if (Parser.getTok().isNot(AsmToken::LCurly))
4396  return MatchOperand_NoMatch;
4397  Parser.Lex(); // Eat the '{'
4398 
4399  const MCExpr *Expr;
4400  SMLoc Loc = Parser.getTok().getLoc();
4401  if (getParser().parseExpression(Expr)) {
4402  Error(Loc, "illegal expression");
4403  return MatchOperand_ParseFail;
4404  }
4405  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4406  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
4407  Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
4408  return MatchOperand_ParseFail;
4409  }
4410  int Val = CE->getValue();
4411 
4412  // Check for and consume the closing '}'
4413  if (Parser.getTok().isNot(AsmToken::RCurly))
4414  return MatchOperand_ParseFail;
4415  SMLoc E = Parser.getTok().getEndLoc();
4416  Parser.Lex(); // Eat the '}'
4417 
4418  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
4419  return MatchOperand_Success;
4420 }
4421 
4422 // For register list parsing, we need to map from raw GPR register numbering
4423 // to the enumeration values. The enumeration values aren't sorted by
4424 // register number due to our using "sp", "lr" and "pc" as canonical names.
4425 static unsigned getNextRegister(unsigned Reg) {
4426  // If this is a GPR, we need to do it manually, otherwise we can rely
4427  // on the sort ordering of the enumeration since the other reg-classes
4428  // are sane.
4429  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4430  return Reg + 1;
4431  switch(Reg) {
4432  default: llvm_unreachable("Invalid GPR number!");
4433  case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
4434  case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
4435  case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
4436  case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8;
4437  case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10;
4438  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
4439  case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR;
4440  case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0;
4441  }
4442 }
4443 
4444 // Insert an <Encoding, Register> pair in an ordered vector. Return true on
4445 // success, or false, if duplicate encoding found.
4446 static bool
4447 insertNoDuplicates(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
4448  unsigned Enc, unsigned Reg) {
4449  Regs.emplace_back(Enc, Reg);
4450  for (auto I = Regs.rbegin(), J = I + 1, E = Regs.rend(); J != E; ++I, ++J) {
4451  if (J->first == Enc) {
4452  Regs.erase(J.base());
4453  return false;
4454  }
4455  if (J->first < Enc)
4456  break;
4457  std::swap(*I, *J);
4458  }
4459  return true;
4460 }
4461 
4462 /// Parse a register list.
4463 bool ARMAsmParser::parseRegisterList(OperandVector &Operands, bool EnforceOrder,
4464  bool AllowRAAC) {
4465  MCAsmParser &Parser = getParser();
4466  if (Parser.getTok().isNot(AsmToken::LCurly))
4467  return TokError("Token is not a Left Curly Brace");
4468  SMLoc S = Parser.getTok().getLoc();
4469  Parser.Lex(); // Eat '{' token.
4470  SMLoc RegLoc = Parser.getTok().getLoc();
4471 
4472  // Check the first register in the list to see what register class
4473  // this is a list of.
4474  int Reg = tryParseRegister();
4475  if (Reg == -1)
4476  return Error(RegLoc, "register expected");
4477  if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4478  return Error(RegLoc, "pseudo-register not allowed");
4479  // The reglist instructions have at most 16 registers, so reserve
4480  // space for that many.
4481  int EReg = 0;
4483 
4484  // Allow Q regs and just interpret them as the two D sub-registers.
4485  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4486  Reg = getDRegFromQReg(Reg);
4487  EReg = MRI->getEncodingValue(Reg);
4488  Registers.emplace_back(EReg, Reg);
4489  ++Reg;
4490  }
4491  const MCRegisterClass *RC;
4492  if (Reg == ARM::RA_AUTH_CODE ||
4493  ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4494  RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4495  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
4496  RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4497  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
4498  RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4499  else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4500  RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4501  else
4502  return Error(RegLoc, "invalid register in register list");
4503 
4504  // Store the register.
4505  EReg = MRI->getEncodingValue(Reg);
4506  Registers.emplace_back(EReg, Reg);
4507 
4508  // This starts immediately after the first register token in the list,
4509  // so we can see either a comma or a minus (range separator) as a legal
4510  // next token.
4511  while (Parser.getTok().is(AsmToken::Comma) ||
4512  Parser.getTok().is(AsmToken::Minus)) {
4513  if (Parser.getTok().is(AsmToken::Minus)) {
4514  if (Reg == ARM::RA_AUTH_CODE)
4515  return Error(RegLoc, "pseudo-register not allowed");
4516  Parser.Lex(); // Eat the minus.
4517  SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4518  int EndReg = tryParseRegister();
4519  if (EndReg == -1)
4520  return Error(AfterMinusLoc, "register expected");
4521  if (EndReg == ARM::RA_AUTH_CODE)
4522  return Error(AfterMinusLoc, "pseudo-register not allowed");
4523  // Allow Q regs and just interpret them as the two D sub-registers.
4524  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4525  EndReg = getDRegFromQReg(EndReg) + 1;
4526  // If the register is the same as the start reg, there's nothing
4527  // more to do.
4528  if (Reg == EndReg)
4529  continue;
4530  // The register must be in the same register class as the first.
4531  if (!RC->contains(Reg))
4532  return Error(AfterMinusLoc, "invalid register in register list");
4533  // Ranges must go from low to high.
4534  if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
4535  return Error(AfterMinusLoc, "bad range in register list");
4536 
4537  // Add all the registers in the range to the register list.
4538  while (Reg != EndReg) {
4539  Reg = getNextRegister(Reg);
4540  EReg = MRI->getEncodingValue(Reg);
4541  if (!insertNoDuplicates(Registers, EReg, Reg)) {
4542  Warning(AfterMinusLoc, StringRef("duplicated register (") +
4544  ") in register list");
4545  }
4546  }
4547  continue;
4548  }
4549  Parser.Lex(); // Eat the comma.
4550  RegLoc = Parser.getTok().getLoc();
4551  int OldReg = Reg;
4552  const AsmToken RegTok = Parser.getTok();
4553  Reg = tryParseRegister();
4554  if (Reg == -1)
4555  return Error(RegLoc, "register expected");
4556  if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4557  return Error(RegLoc, "pseudo-register not allowed");
4558  // Allow Q regs and just interpret them as the two D sub-registers.
4559  bool isQReg = false;
4560  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4561  Reg = getDRegFromQReg(Reg);
4562  isQReg = true;
4563  }
4564  if (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg) &&
4565  RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4566  ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) {
4567  // switch the register classes, as GPRwithAPSRnospRegClassID is a partial
4568  // subset of GPRRegClassId except it contains APSR as well.
4569  RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4570  }
4571  if (Reg == ARM::VPR &&
4572  (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4573  RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] ||
4574  RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) {
4575  RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4576  EReg = MRI->getEncodingValue(Reg);
4577  if (!insertNoDuplicates(Registers, EReg, Reg)) {
4578  Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4579  ") in register list");
4580  }
4581  continue;
4582  }
4583  // The register must be in the same register class as the first.
4584  if ((Reg == ARM::RA_AUTH_CODE &&
4585  RC != &ARMMCRegisterClasses[ARM::GPRRegClassID]) ||
4586  (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg)))
4587  return Error(RegLoc, "invalid register in register list");
4588  // In most cases, the list must be monotonically increasing. An
4589  // exception is CLRM, which is order-independent anyway, so
4590  // there's no potential for confusion if you write clrm {r2,r1}
4591  // instead of clrm {r1,r2}.
4592  if (EnforceOrder &&
4593  MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
4594  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4595  Warning(RegLoc, "register list not in ascending order");
4596  else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4597  return Error(RegLoc, "register list not in ascending order");
4598  }
4599  // VFP register lists must also be contiguous.
4600  if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4601  RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4602  Reg != OldReg + 1)
4603  return Error(RegLoc, "non-contiguous register range");
4604  EReg = MRI->getEncodingValue(Reg);
4605  if (!insertNoDuplicates(Registers, EReg, Reg)) {
4606  Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4607  ") in register list");
4608  }
4609  if (isQReg) {
4610  EReg = MRI->getEncodingValue(++Reg);
4611  Registers.emplace_back(EReg, Reg);
4612  }
4613  }
4614 
4615  if (Parser.getTok().isNot(AsmToken::RCurly))
4616  return Error(Parser.getTok().getLoc(), "'}' expected");
4617  SMLoc E = Parser.getTok().getEndLoc();
4618  Parser.Lex(); // Eat '}' token.
4619 
4620  // Push the register list operand.
4621  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
4622 
4623  // The ARM system instruction variants for LDM/STM have a '^' token here.
4624  if (Parser.getTok().is(AsmToken::Caret)) {
4625  Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
4626  Parser.Lex(); // Eat '^' token.
4627  }
4628 
4629  return false;
4630 }
4631 
4632 // Helper function to parse the lane index for vector lists.
4633 OperandMatchResultTy ARMAsmParser::
4634 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
4635  MCAsmParser &Parser = getParser();
4636  Index = 0; // Always return a defined index value.
4637  if (Parser.getTok().is(AsmToken::LBrac)) {
4638  Parser.Lex(); // Eat the '['.
4639  if (Parser.getTok().is(AsmToken::RBrac)) {
4640  // "Dn[]" is the 'all lanes' syntax.
4641  LaneKind = AllLanes;
4642  EndLoc = Parser.getTok().getEndLoc();
4643  Parser.Lex(); // Eat the ']'.
4644  return MatchOperand_Success;
4645  }
4646 
4647  // There's an optional '#' token here. Normally there wouldn't be, but
4648  // inline assemble puts one in, and it's friendly to accept that.
4649  if (Parser.getTok().is(AsmToken::Hash))
4650  Parser.Lex(); // Eat '#' or '$'.
4651 
4652  const MCExpr *LaneIndex;
4653  SMLoc Loc = Parser.getTok().getLoc();
4654  if (getParser().parseExpression(LaneIndex)) {
4655  Error(Loc, "illegal expression");
4656  return MatchOperand_ParseFail;
4657  }
4658  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
4659  if (!CE) {
4660  Error(Loc, "lane index must be empty or an integer");
4661  return MatchOperand_ParseFail;
4662  }
4663  if (Parser.getTok().isNot(AsmToken::RBrac)) {
4664  Error(Parser.getTok().getLoc(), "']' expected");
4665  return MatchOperand_ParseFail;
4666  }
4667  EndLoc = Parser.getTok().getEndLoc();
4668  Parser.Lex(); // Eat the ']'.
4669  int64_t Val = CE->getValue();
4670 
4671  // FIXME: Make this range check context sensitive for .8, .16, .32.
4672  if (Val < 0 || Val > 7) {
4673  Error(Parser.getTok().getLoc(), "lane index out of range");
4674  return MatchOperand_ParseFail;
4675  }
4676  Index = Val;
4677  LaneKind = IndexedLane;
4678  return MatchOperand_Success;
4679  }
4680  LaneKind = NoLanes;
4681  return MatchOperand_Success;
4682 }
4683 
4684 // parse a vector register list
4686 ARMAsmParser::parseVectorList(OperandVector &Operands) {
4687  MCAsmParser &Parser = getParser();
4688  VectorLaneTy LaneKind;
4689  unsigned LaneIndex;
4690  SMLoc S = Parser.getTok().getLoc();
4691  // As an extension (to match gas), support a plain D register or Q register
4692  // (without encosing curly braces) as a single or double entry list,
4693  // respectively.
4694  if (!hasMVE() && Parser.getTok().is(AsmToken::Identifier)) {
4695  SMLoc E = Parser.getTok().getEndLoc();
4696  int Reg = tryParseRegister();
4697  if (Reg == -1)
4698  return MatchOperand_NoMatch;
4699  if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
4700  OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
4701  if (Res != MatchOperand_Success)
4702  return Res;
4703  switch (LaneKind) {
4704  case NoLanes:
4705  Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
4706  break;
4707  case AllLanes:
4708  Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
4709  S, E));
4710  break;
4711  case IndexedLane:
4712  Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
4713  LaneIndex,
4714  false, S, E));
4715  break;
4716  }
4717  return MatchOperand_Success;
4718  }
4719  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4720  Reg = getDRegFromQReg(Reg);
4721  OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
4722  if (Res != MatchOperand_Success)
4723  return Res;
4724  switch (LaneKind) {
4725  case NoLanes:
4726  Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4727  &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4728  Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
4729  break;
4730  case AllLanes:
4731  Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4732  &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4733  Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
4734  S, E));
4735  break;
4736  case IndexedLane:
4737  Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
4738  LaneIndex,
4739  false, S, E));
4740  break;
4741  }
4742  return MatchOperand_Success;
4743  }
4744  Error(S, "vector register expected");
4745  return MatchOperand_ParseFail;
4746  }
4747 
4748  if (Parser.getTok().isNot(AsmToken::LCurly))
4749  return MatchOperand_NoMatch;
4750 
4751  Parser.Lex(); // Eat '{' token.
4752  SMLoc RegLoc = Parser.getTok().getLoc();
4753 
4754  int Reg = tryParseRegister();
4755  if (Reg == -1) {
4756  Error(RegLoc, "register expected");
4757  return MatchOperand_ParseFail;
4758  }
4759  unsigned Count = 1;
4760  int Spacing = 0;
4761  unsigned FirstReg = Reg;
4762 
4763  if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg)) {
4764  Error(Parser.getTok().getLoc(), "vector register in range Q0-Q7 expected");
4765  return MatchOperand_ParseFail;
4766  }
4767  // The list is of D registers, but we also allow Q regs and just interpret
4768  // them as the two D sub-registers.
4769  else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4770  FirstReg = Reg = getDRegFromQReg(Reg);
4771  Spacing = 1; // double-spacing requires explicit D registers, otherwise
4772  // it's ambiguous with four-register single spaced.
4773  ++Reg;
4774  ++Count;
4775  }
4776 
4777  SMLoc E;
4778  if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
4779  return MatchOperand_ParseFail;
4780 
4781  while (Parser.getTok().is(AsmToken::Comma) ||
4782  Parser.getTok().is(AsmToken::Minus)) {
4783  if (Parser.getTok().is(AsmToken::Minus)) {
4784  if (!Spacing)
4785  Spacing = 1; // Register range implies a single spaced list.
4786  else if (Spacing == 2) {
4787  Error(Parser.getTok().getLoc(),
4788  "sequential registers in double spaced list");
4789  return MatchOperand_ParseFail;
4790  }
4791  Parser.Lex(); // Eat the minus.
4792  SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4793  int EndReg = tryParseRegister();
4794  if (EndReg == -1) {
4795  Error(AfterMinusLoc, "register expected");
4796  return MatchOperand_ParseFail;
4797  }
4798  // Allow Q regs and just interpret them as the two D sub-registers.
4799  if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4800  EndReg = getDRegFromQReg(EndReg) + 1;
4801  // If the register is the same as the start reg, there's nothing
4802  // more to do.
4803  if (Reg == EndReg)
4804  continue;
4805  // The register must be in the same register class as the first.
4806  if ((hasMVE() &&
4807  !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(EndReg)) ||
4808  (!hasMVE() &&
4809  !ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg))) {
4810  Error(AfterMinusLoc, "invalid register in register list");
4811  return MatchOperand_ParseFail;
4812  }
4813  // Ranges must go from low to high.
4814  if (Reg > EndReg) {
4815  Error(AfterMinusLoc, "bad range in register list");
4816  return MatchOperand_ParseFail;
4817  }
4818  // Parse the lane specifier if present.
4819  VectorLaneTy NextLaneKind;
4820  unsigned NextLaneIndex;
4821  if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4823  return MatchOperand_ParseFail;
4824  if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4825  Error(AfterMinusLoc, "mismatched lane index in register list");
4826  return MatchOperand_ParseFail;
4827  }
4828 
4829  // Add all the registers in the range to the register list.
4830  Count += EndReg - Reg;
4831  Reg = EndReg;
4832  continue;
4833  }
4834  Parser.Lex(); // Eat the comma.
4835  RegLoc = Parser.getTok().getLoc();
4836  int OldReg = Reg;
4837  Reg = tryParseRegister();
4838  if (Reg == -1) {
4839  Error(RegLoc, "register expected");
4840  return MatchOperand_ParseFail;
4841  }
4842 
4843  if (hasMVE()) {
4844  if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg)) {
4845  Error(RegLoc, "vector register in range Q0-Q7 expected");
4846  return MatchOperand_ParseFail;
4847  }
4848  Spacing = 1;
4849  }
4850  // vector register lists must be contiguous.
4851  // It's OK to use the enumeration values directly here rather, as the
4852  // VFP register classes have the enum sorted properly.
4853  //
4854  // The list is of D registers, but we also allow Q regs and just interpret
4855  // them as the two D sub-registers.
4856  else if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4857  if (!Spacing)
4858  Spacing = 1; // Register range implies a single spaced list.
4859  else if (Spacing == 2) {
4860  Error(RegLoc,
4861  "invalid register in double-spaced list (must be 'D' register')");
4862  return MatchOperand_ParseFail;
4863  }
4864  Reg = getDRegFromQReg(Reg);
4865  if (Reg != OldReg + 1) {
4866  Error(RegLoc, "non-contiguous register range");
4867  return MatchOperand_ParseFail;
4868  }
4869  ++Reg;
4870  Count += 2;
4871  // Parse the lane specifier if present.
4872  VectorLaneTy NextLaneKind;
4873  unsigned NextLaneIndex;
4874  SMLoc LaneLoc = Parser.getTok().getLoc();
4875  if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4877  return MatchOperand_ParseFail;
4878  if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4879  Error(LaneLoc, "mismatched lane index in register list");
4880  return MatchOperand_ParseFail;
4881  }
4882  continue;
4883  }
4884  // Normal D register.
4885  // Figure out the register spacing (single or double) of the list if
4886  // we don't know it already.
4887  if (!Spacing)
4888  Spacing = 1 + (Reg == OldReg + 2);
4889 
4890  // Just check that it's contiguous and keep going.
4891  if (Reg != OldReg + Spacing) {
4892  Error(RegLoc, "non-contiguous register range");
4893  return MatchOperand_ParseFail;
4894  }
4895  ++Count;
4896  // Parse the lane specifier if present.
4897  VectorLaneTy NextLaneKind;
4898  unsigned NextLaneIndex;
4899  SMLoc EndLoc = Parser.getTok().getLoc();
4900  if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
4901  return MatchOperand_ParseFail;
4902  if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4903  Error(EndLoc, "mismatched lane index in register list");
4904  return MatchOperand_ParseFail;
4905  }
4906  }
4907 
4908  if (Parser.getTok().isNot(AsmToken::RCurly)) {
4909  Error(Parser.getTok().getLoc(), "'}' expected");
4910  return MatchOperand_ParseFail;
4911  }
4912  E = Parser.getTok().getEndLoc();
4913  Parser.Lex(); // Eat '}' token.
4914 
4915  switch (LaneKind) {
4916  case NoLanes:
4917  case AllLanes: {
4918  // Two-register operands have been converted to the
4919  // composite register classes.
4920  if (Count == 2 && !hasMVE()) {
4921  const MCRegisterClass *RC = (Spacing == 1) ?
4922  &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4923  &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4924  FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4925  }
4926  auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
4927  ARMOperand::CreateVectorListAllLanes);
4928  Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S, E));
4929  break;
4930  }
4931  case IndexedLane:
4932  Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
4933  LaneIndex,
4934  (Spacing == 2),
4935  S, E));
4936  break;
4937  }
4938  return MatchOperand_Success;
4939 }
4940 
4941 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
4943 ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
4944  MCAsmParser &Parser = getParser();
4945  SMLoc S = Parser.getTok().getLoc();
4946  const AsmToken &Tok = Parser.getTok();
4947  unsigned Opt;
4948 
4949  if (Tok.is(AsmToken::Identifier)) {
4950  StringRef OptStr = Tok.getString();
4951 
4952  Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
4953  .Case("sy", ARM_MB::SY)
4954  .Case("st", ARM_MB::ST)
4955  .Case("ld", ARM_MB::LD)
4956  .Case("sh", ARM_MB::ISH)
4957  .Case("ish", ARM_MB::ISH)
4958  .Case("shst", ARM_MB::ISHST)
4959  .Case("ishst", ARM_MB::ISHST)
4960  .Case("ishld", ARM_MB::ISHLD)
4961  .Case("nsh", ARM_MB::NSH)
4962  .Case("un", ARM_MB::NSH)
4963  .Case("nshst", ARM_MB::NSHST)
4964  .Case("nshld", ARM_MB::NSHLD)
4965  .Case("unst", ARM_MB::NSHST)
4966  .Case("osh", ARM_MB::OSH)
4967  .Case("oshst", ARM_MB::OSHST)
4968  .Case("oshld", ARM_MB::OSHLD)